aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorSean Paul <seanpaul@chromium.org>2018-03-21 09:40:55 -0400
committerSean Paul <seanpaul@chromium.org>2018-03-21 09:40:55 -0400
commit1c7095d2836baafd84e596dd34ba1a1293a4faa9 (patch)
tree498f529809b9c0a3c75c8b8bb1098ed4f71233db /drivers/gpu/drm/amd
parent2793c1d77aa8876e5674e901d051c79570e99db2 (diff)
parent78230c46ec0a91dd4256c9e54934b3c7095a7ee3 (diff)
Merge airlied/drm-next into drm-misc-next
Refresh -misc-next Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c1506
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c431
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c289
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c412
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c566
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c290
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c297
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c326
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c306
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/emu_soc.c (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h)15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c309
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rwxr-xr-xdrivers/gpu/drm/amd/amdgpu/vce_v4_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c159
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c356
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c301
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c357
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.h78
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c44
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c362
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h6
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c456
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c274
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c126
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c82
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c28
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c190
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c247
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c179
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h128
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c451
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c834
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c600
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c6085
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c1772
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_logger.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h)17
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h34
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c1396
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h)58
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c155
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h65
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c334
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h31150
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h1658
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h200
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h99
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h62
-rw-r--r--drivers/gpu/drm/amd/include/soc15_hw_ip.h98
-rw-r--r--drivers/gpu/drm/amd/include/soc15_ih_clientid.h70
-rw-r--r--drivers/gpu/drm/amd/include/vega10_ip_offset.h (renamed from drivers/gpu/drm/amd/include/soc15ip.h)370
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c845
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c209
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c646
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c114
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c)548
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h)130
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h)4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c874
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h34
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c)1071
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h)82
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c536
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h180
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c520
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c57
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h20
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h210
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_feature.h67
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_instance.h)26
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h44
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c228
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c871
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h98
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c255
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c266
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c406
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c344
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h)26
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c891
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h99
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c78
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c197
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c316
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h7
372 files changed, 51692 insertions, 22481 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index d6e5b7273853..2ca2b5154d52 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -30,7 +30,6 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
30ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ 30ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
31 -I$(FULL_AMD_PATH)/include \ 31 -I$(FULL_AMD_PATH)/include \
32 -I$(FULL_AMD_PATH)/amdgpu \ 32 -I$(FULL_AMD_PATH)/amdgpu \
33 -I$(FULL_AMD_PATH)/scheduler \
34 -I$(FULL_AMD_PATH)/powerplay/inc \ 33 -I$(FULL_AMD_PATH)/powerplay/inc \
35 -I$(FULL_AMD_PATH)/acp/include \ 34 -I$(FULL_AMD_PATH)/acp/include \
36 -I$(FULL_AMD_DISPLAY_PATH) \ 35 -I$(FULL_AMD_DISPLAY_PATH) \
@@ -63,7 +62,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
63amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o 62amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
64 63
65amdgpu-y += \ 64amdgpu-y += \
66 vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o 65 vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
67 66
68# add GMC block 67# add GMC block
69amdgpu-y += \ 68amdgpu-y += \
@@ -88,8 +87,7 @@ amdgpu-y += \
88 87
89# add SMC block 88# add SMC block
90amdgpu-y += \ 89amdgpu-y += \
91 amdgpu_dpm.o \ 90 amdgpu_dpm.o
92 amdgpu_powerplay.o
93 91
94# add DCE block 92# add DCE block
95amdgpu-y += \ 93amdgpu-y += \
@@ -130,6 +128,8 @@ amdgpu-y += \
130# add amdkfd interfaces 128# add amdkfd interfaces
131amdgpu-y += \ 129amdgpu-y += \
132 amdgpu_amdkfd.o \ 130 amdgpu_amdkfd.o \
131 amdgpu_amdkfd_fence.o \
132 amdgpu_amdkfd_gpuvm.o \
133 amdgpu_amdkfd_gfx_v8.o 133 amdgpu_amdkfd_gfx_v8.o
134 134
135# add cgs 135# add cgs
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 31126df06c8c..f44a83ab2bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -68,6 +68,7 @@
68#include "amdgpu_vce.h" 68#include "amdgpu_vce.h"
69#include "amdgpu_vcn.h" 69#include "amdgpu_vcn.h"
70#include "amdgpu_mn.h" 70#include "amdgpu_mn.h"
71#include "amdgpu_gmc.h"
71#include "amdgpu_dm.h" 72#include "amdgpu_dm.h"
72#include "amdgpu_virt.h" 73#include "amdgpu_virt.h"
73#include "amdgpu_gart.h" 74#include "amdgpu_gart.h"
@@ -127,6 +128,7 @@ extern int amdgpu_job_hang_limit;
127extern int amdgpu_lbpw; 128extern int amdgpu_lbpw;
128extern int amdgpu_compute_multipipe; 129extern int amdgpu_compute_multipipe;
129extern int amdgpu_gpu_recovery; 130extern int amdgpu_gpu_recovery;
131extern int amdgpu_emu_mode;
130 132
131#ifdef CONFIG_DRM_AMDGPU_SI 133#ifdef CONFIG_DRM_AMDGPU_SI
132extern int amdgpu_si_support; 134extern int amdgpu_si_support;
@@ -179,10 +181,6 @@ extern int amdgpu_cik_support;
179#define CIK_CURSOR_WIDTH 128 181#define CIK_CURSOR_WIDTH 128
180#define CIK_CURSOR_HEIGHT 128 182#define CIK_CURSOR_HEIGHT 128
181 183
182/* GPU RESET flags */
183#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0)
184#define AMDGPU_RESET_INFO_FULLRESET (1 << 1)
185
186struct amdgpu_device; 184struct amdgpu_device;
187struct amdgpu_ib; 185struct amdgpu_ib;
188struct amdgpu_cs_parser; 186struct amdgpu_cs_parser;
@@ -318,13 +316,6 @@ struct amdgpu_vm_pte_funcs {
318 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, 316 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
319 uint64_t value, unsigned count, 317 uint64_t value, unsigned count,
320 uint32_t incr); 318 uint32_t incr);
321
322 /* maximum nums of PTEs/PDEs in a single operation */
323 uint32_t set_max_nums_pte_pde;
324
325 /* number of dw to reserve per operation */
326 unsigned set_pte_pde_num_dw;
327
328 /* for linear pte/pde updates without addr mapping */ 319 /* for linear pte/pde updates without addr mapping */
329 void (*set_pte_pde)(struct amdgpu_ib *ib, 320 void (*set_pte_pde)(struct amdgpu_ib *ib,
330 uint64_t pe, 321 uint64_t pe,
@@ -332,28 +323,6 @@ struct amdgpu_vm_pte_funcs {
332 uint32_t incr, uint64_t flags); 323 uint32_t incr, uint64_t flags);
333}; 324};
334 325
335/* provided by the gmc block */
336struct amdgpu_gart_funcs {
337 /* flush the vm tlb via mmio */
338 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
339 uint32_t vmid);
340 /* write pte/pde updates using the cpu */
341 int (*set_pte_pde)(struct amdgpu_device *adev,
342 void *cpu_pt_addr, /* cpu addr of page table */
343 uint32_t gpu_page_idx, /* pte/pde to update */
344 uint64_t addr, /* addr to write into pte/pde */
345 uint64_t flags); /* access flags */
346 /* enable/disable PRT support */
347 void (*set_prt)(struct amdgpu_device *adev, bool enable);
348 /* set pte flags based per asic */
349 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
350 uint32_t flags);
351 /* get the pde for a given mc addr */
352 void (*get_vm_pde)(struct amdgpu_device *adev, int level,
353 u64 *dst, u64 *flags);
354 uint32_t (*get_invalidate_req)(unsigned int vmid);
355};
356
357/* provided by the ih block */ 326/* provided by the ih block */
358struct amdgpu_ih_funcs { 327struct amdgpu_ih_funcs {
359 /* ring read/write ptr handling, called from interrupt context */ 328 /* ring read/write ptr handling, called from interrupt context */
@@ -371,14 +340,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev);
371bool amdgpu_read_bios(struct amdgpu_device *adev); 340bool amdgpu_read_bios(struct amdgpu_device *adev);
372 341
373/* 342/*
374 * Dummy page
375 */
376struct amdgpu_dummy_page {
377 struct page *page;
378 dma_addr_t addr;
379};
380
381/*
382 * Clocks 343 * Clocks
383 */ 344 */
384 345
@@ -418,8 +379,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
418struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 379struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
419 struct drm_gem_object *gobj, 380 struct drm_gem_object *gobj,
420 int flags); 381 int flags);
421int amdgpu_gem_prime_pin(struct drm_gem_object *obj); 382struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
422void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); 383 struct dma_buf *dma_buf);
423struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 384struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
424void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); 385void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
425void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 386void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
@@ -480,7 +441,7 @@ struct amdgpu_sa_bo {
480void amdgpu_gem_force_release(struct amdgpu_device *adev); 441void amdgpu_gem_force_release(struct amdgpu_device *adev);
481int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 442int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
482 int alignment, u32 initial_domain, 443 int alignment, u32 initial_domain,
483 u64 flags, bool kernel, 444 u64 flags, enum ttm_bo_type type,
484 struct reservation_object *resv, 445 struct reservation_object *resv,
485 struct drm_gem_object **obj); 446 struct drm_gem_object **obj);
486 447
@@ -494,56 +455,6 @@ int amdgpu_fence_slab_init(void);
494void amdgpu_fence_slab_fini(void); 455void amdgpu_fence_slab_fini(void);
495 456
496/* 457/*
497 * VMHUB structures, functions & helpers
498 */
499struct amdgpu_vmhub {
500 uint32_t ctx0_ptb_addr_lo32;
501 uint32_t ctx0_ptb_addr_hi32;
502 uint32_t vm_inv_eng0_req;
503 uint32_t vm_inv_eng0_ack;
504 uint32_t vm_context0_cntl;
505 uint32_t vm_l2_pro_fault_status;
506 uint32_t vm_l2_pro_fault_cntl;
507};
508
509/*
510 * GPU MC structures, functions & helpers
511 */
512struct amdgpu_mc {
513 resource_size_t aper_size;
514 resource_size_t aper_base;
515 resource_size_t agp_base;
516 /* for some chips with <= 32MB we need to lie
517 * about vram size near mc fb location */
518 u64 mc_vram_size;
519 u64 visible_vram_size;
520 u64 gart_size;
521 u64 gart_start;
522 u64 gart_end;
523 u64 vram_start;
524 u64 vram_end;
525 unsigned vram_width;
526 u64 real_vram_size;
527 int vram_mtrr;
528 u64 mc_mask;
529 const struct firmware *fw; /* MC firmware */
530 uint32_t fw_version;
531 struct amdgpu_irq_src vm_fault;
532 uint32_t vram_type;
533 uint32_t srbm_soft_reset;
534 bool prt_warning;
535 uint64_t stolen_size;
536 /* apertures */
537 u64 shared_aperture_start;
538 u64 shared_aperture_end;
539 u64 private_aperture_start;
540 u64 private_aperture_end;
541 /* protects concurrent invalidation */
542 spinlock_t invalidate_lock;
543 bool translate_further;
544};
545
546/*
547 * GPU doorbell structures, functions & helpers 458 * GPU doorbell structures, functions & helpers
548 */ 459 */
549typedef enum _AMDGPU_DOORBELL_ASSIGNMENT 460typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
@@ -1125,8 +1036,9 @@ struct amdgpu_job {
1125 void *owner; 1036 void *owner;
1126 uint64_t fence_ctx; /* the fence_context this job uses */ 1037 uint64_t fence_ctx; /* the fence_context this job uses */
1127 bool vm_needs_flush; 1038 bool vm_needs_flush;
1128 unsigned vmid;
1129 uint64_t vm_pd_addr; 1039 uint64_t vm_pd_addr;
1040 unsigned vmid;
1041 unsigned pasid;
1130 uint32_t gds_base, gds_size; 1042 uint32_t gds_base, gds_size;
1131 uint32_t gws_base, gws_size; 1043 uint32_t gws_base, gws_size;
1132 uint32_t oa_base, oa_size; 1044 uint32_t oa_base, oa_size;
@@ -1156,7 +1068,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1156/* 1068/*
1157 * Writeback 1069 * Writeback
1158 */ 1070 */
1159#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */ 1071#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
1160 1072
1161struct amdgpu_wb { 1073struct amdgpu_wb {
1162 struct amdgpu_bo *wb_obj; 1074 struct amdgpu_bo *wb_obj;
@@ -1169,8 +1081,6 @@ struct amdgpu_wb {
1169int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 1081int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
1170void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 1082void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
1171 1083
1172void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
1173
1174/* 1084/*
1175 * SDMA 1085 * SDMA
1176 */ 1086 */
@@ -1288,6 +1198,11 @@ struct amdgpu_asic_funcs {
1288 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 1198 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
1289 /* get config memsize register */ 1199 /* get config memsize register */
1290 u32 (*get_config_memsize)(struct amdgpu_device *adev); 1200 u32 (*get_config_memsize)(struct amdgpu_device *adev);
1201 /* flush hdp write queue */
1202 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
1203 /* invalidate hdp read cache */
1204 void (*invalidate_hdp)(struct amdgpu_device *adev,
1205 struct amdgpu_ring *ring);
1291}; 1206};
1292 1207
1293/* 1208/*
@@ -1431,7 +1346,7 @@ struct amdgpu_nbio_funcs {
1431 u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); 1346 u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
1432 u32 (*get_rev_id)(struct amdgpu_device *adev); 1347 u32 (*get_rev_id)(struct amdgpu_device *adev);
1433 void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); 1348 void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
1434 void (*hdp_flush)(struct amdgpu_device *adev); 1349 void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
1435 u32 (*get_memsize)(struct amdgpu_device *adev); 1350 u32 (*get_memsize)(struct amdgpu_device *adev);
1436 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, 1351 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
1437 bool use_doorbell, int doorbell_index); 1352 bool use_doorbell, int doorbell_index);
@@ -1478,9 +1393,7 @@ enum amd_hw_ip_block_type {
1478#define HWIP_MAX_INSTANCE 6 1393#define HWIP_MAX_INSTANCE 6
1479 1394
1480struct amd_powerplay { 1395struct amd_powerplay {
1481 struct cgs_device *cgs_device;
1482 void *pp_handle; 1396 void *pp_handle;
1483 const struct amd_ip_funcs *ip_funcs;
1484 const struct amd_pm_funcs *pp_funcs; 1397 const struct amd_pm_funcs *pp_funcs;
1485}; 1398};
1486 1399
@@ -1574,9 +1487,9 @@ struct amdgpu_device {
1574 struct amdgpu_clock clock; 1487 struct amdgpu_clock clock;
1575 1488
1576 /* MC */ 1489 /* MC */
1577 struct amdgpu_mc mc; 1490 struct amdgpu_gmc gmc;
1578 struct amdgpu_gart gart; 1491 struct amdgpu_gart gart;
1579 struct amdgpu_dummy_page dummy_page; 1492 dma_addr_t dummy_page_addr;
1580 struct amdgpu_vm_manager vm_manager; 1493 struct amdgpu_vm_manager vm_manager;
1581 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 1494 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
1582 1495
@@ -1715,6 +1628,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
1715 uint32_t acc_flags); 1628 uint32_t acc_flags);
1716void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 1629void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
1717 uint32_t acc_flags); 1630 uint32_t acc_flags);
1631void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1632uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1633
1718u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 1634u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
1719void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 1635void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
1720 1636
@@ -1726,6 +1642,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
1726bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); 1642bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1727bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 1643bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1728 1644
1645int emu_soc_asic_init(struct amdgpu_device *adev);
1646
1729/* 1647/*
1730 * Registers read & write functions. 1648 * Registers read & write functions.
1731 */ 1649 */
@@ -1736,6 +1654,9 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1736#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1654#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1737#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1655#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1738 1656
1657#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1658#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1659
1739#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) 1660#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
1740#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) 1661#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
1741#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) 1662#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
@@ -1838,13 +1759,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1838#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 1759#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1839#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1760#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1840#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1761#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1841#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 1762#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
1842#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 1763#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
1843#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags)) 1764#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
1765#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
1766#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
1767#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
1768#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
1769#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
1844#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 1770#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
1845#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 1771#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
1846#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 1772#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
1847#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
1848#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 1773#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
1849#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 1774#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
1850#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) 1775#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
@@ -1857,11 +1782,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1857#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 1782#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
1858#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 1783#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
1859#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 1784#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
1860#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
1861#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) 1785#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
1862#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) 1786#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
1863#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) 1787#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
1864#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) 1788#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
1789#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
1865#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) 1790#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
1866#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 1791#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
1867#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) 1792#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
@@ -1871,7 +1796,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1871#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 1796#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
1872#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 1797#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
1873#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) 1798#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
1874#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
1875#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) 1799#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
1876#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) 1800#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
1877#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) 1801#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
@@ -1894,20 +1818,17 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1894 struct amdgpu_job* job, bool force); 1818 struct amdgpu_job* job, bool force);
1895void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 1819void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1896bool amdgpu_device_need_post(struct amdgpu_device *adev); 1820bool amdgpu_device_need_post(struct amdgpu_device *adev);
1897void amdgpu_update_display_priority(struct amdgpu_device *adev); 1821void amdgpu_display_update_priority(struct amdgpu_device *adev);
1898 1822
1899void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1823void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1900 u64 num_vis_bytes); 1824 u64 num_vis_bytes);
1901void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 1825void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
1902bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 1826bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
1903void amdgpu_device_vram_location(struct amdgpu_device *adev, 1827void amdgpu_device_vram_location(struct amdgpu_device *adev,
1904 struct amdgpu_mc *mc, u64 base); 1828 struct amdgpu_gmc *mc, u64 base);
1905void amdgpu_device_gart_location(struct amdgpu_device *adev, 1829void amdgpu_device_gart_location(struct amdgpu_device *adev,
1906 struct amdgpu_mc *mc); 1830 struct amdgpu_gmc *mc);
1907int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 1831int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1908void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
1909int amdgpu_ttm_init(struct amdgpu_device *adev);
1910void amdgpu_ttm_fini(struct amdgpu_device *adev);
1911void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1832void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1912 const u32 *registers, 1833 const u32 *registers,
1913 const u32 array_size); 1834 const u32 array_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 57afad79f55d..8fa850a070e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
540 size_t size; 540 size_t size;
541 u32 retry = 3; 541 u32 retry = 3;
542 542
543 if (amdgpu_acpi_pcie_notify_device_ready(adev))
544 return -EINVAL;
545
543 /* Get the device handle */ 546 /* Get the device handle */
544 handle = ACPI_HANDLE(&adev->pdev->dev); 547 handle = ACPI_HANDLE(&adev->pdev->dev);
545 if (!handle) 548 if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1d605e1c1d66..4d36203ffb11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -30,6 +30,8 @@
30const struct kgd2kfd_calls *kgd2kfd; 30const struct kgd2kfd_calls *kgd2kfd;
31bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); 31bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
32 32
33static const unsigned int compute_vmid_bitmap = 0xFF00;
34
33int amdgpu_amdkfd_init(void) 35int amdgpu_amdkfd_init(void)
34{ 36{
35 int ret; 37 int ret;
@@ -56,6 +58,7 @@ int amdgpu_amdkfd_init(void)
56#else 58#else
57 ret = -ENOENT; 59 ret = -ENOENT;
58#endif 60#endif
61 amdgpu_amdkfd_gpuvm_init_mem_limits();
59 62
60 return ret; 63 return ret;
61} 64}
@@ -78,10 +81,15 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
78 switch (adev->asic_type) { 81 switch (adev->asic_type) {
79#ifdef CONFIG_DRM_AMDGPU_CIK 82#ifdef CONFIG_DRM_AMDGPU_CIK
80 case CHIP_KAVERI: 83 case CHIP_KAVERI:
84 case CHIP_HAWAII:
81 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); 85 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
82 break; 86 break;
83#endif 87#endif
84 case CHIP_CARRIZO: 88 case CHIP_CARRIZO:
89 case CHIP_TONGA:
90 case CHIP_FIJI:
91 case CHIP_POLARIS10:
92 case CHIP_POLARIS11:
85 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); 93 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
86 break; 94 break;
87 default: 95 default:
@@ -132,9 +140,13 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
132 int last_valid_bit; 140 int last_valid_bit;
133 if (adev->kfd) { 141 if (adev->kfd) {
134 struct kgd2kfd_shared_resources gpu_resources = { 142 struct kgd2kfd_shared_resources gpu_resources = {
135 .compute_vmid_bitmap = 0xFF00, 143 .compute_vmid_bitmap = compute_vmid_bitmap,
136 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, 144 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
137 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe 145 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
146 .gpuvm_size = min(adev->vm_manager.max_pfn
147 << AMDGPU_GPU_PAGE_SHIFT,
148 AMDGPU_VA_HOLE_START),
149 .drm_render_minor = adev->ddev->render->index
138 }; 150 };
139 151
140 /* this is going to have a few of the MSBs set that we need to 152 /* this is going to have a few of the MSBs set that we need to
@@ -204,20 +216,14 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
204 void **cpu_ptr) 216 void **cpu_ptr)
205{ 217{
206 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 218 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
207 struct kgd_mem **mem = (struct kgd_mem **) mem_obj; 219 struct amdgpu_bo *bo = NULL;
208 int r; 220 int r;
221 uint64_t gpu_addr_tmp = 0;
222 void *cpu_ptr_tmp = NULL;
209 223
210 BUG_ON(kgd == NULL); 224 r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
211 BUG_ON(gpu_addr == NULL); 225 AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
212 BUG_ON(cpu_ptr == NULL); 226 NULL, &bo);
213
214 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
215 if ((*mem) == NULL)
216 return -ENOMEM;
217
218 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
219 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0,
220 &(*mem)->bo);
221 if (r) { 227 if (r) {
222 dev_err(adev->dev, 228 dev_err(adev->dev,
223 "failed to allocate BO for amdkfd (%d)\n", r); 229 "failed to allocate BO for amdkfd (%d)\n", r);
@@ -225,54 +231,53 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
225 } 231 }
226 232
227 /* map the buffer */ 233 /* map the buffer */
228 r = amdgpu_bo_reserve((*mem)->bo, true); 234 r = amdgpu_bo_reserve(bo, true);
229 if (r) { 235 if (r) {
230 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r); 236 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
231 goto allocate_mem_reserve_bo_failed; 237 goto allocate_mem_reserve_bo_failed;
232 } 238 }
233 239
234 r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT, 240 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
235 &(*mem)->gpu_addr); 241 &gpu_addr_tmp);
236 if (r) { 242 if (r) {
237 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r); 243 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
238 goto allocate_mem_pin_bo_failed; 244 goto allocate_mem_pin_bo_failed;
239 } 245 }
240 *gpu_addr = (*mem)->gpu_addr;
241 246
242 r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr); 247 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
243 if (r) { 248 if (r) {
244 dev_err(adev->dev, 249 dev_err(adev->dev,
245 "(%d) failed to map bo to kernel for amdkfd\n", r); 250 "(%d) failed to map bo to kernel for amdkfd\n", r);
246 goto allocate_mem_kmap_bo_failed; 251 goto allocate_mem_kmap_bo_failed;
247 } 252 }
248 *cpu_ptr = (*mem)->cpu_ptr;
249 253
250 amdgpu_bo_unreserve((*mem)->bo); 254 *mem_obj = bo;
255 *gpu_addr = gpu_addr_tmp;
256 *cpu_ptr = cpu_ptr_tmp;
257
258 amdgpu_bo_unreserve(bo);
251 259
252 return 0; 260 return 0;
253 261
254allocate_mem_kmap_bo_failed: 262allocate_mem_kmap_bo_failed:
255 amdgpu_bo_unpin((*mem)->bo); 263 amdgpu_bo_unpin(bo);
256allocate_mem_pin_bo_failed: 264allocate_mem_pin_bo_failed:
257 amdgpu_bo_unreserve((*mem)->bo); 265 amdgpu_bo_unreserve(bo);
258allocate_mem_reserve_bo_failed: 266allocate_mem_reserve_bo_failed:
259 amdgpu_bo_unref(&(*mem)->bo); 267 amdgpu_bo_unref(&bo);
260 268
261 return r; 269 return r;
262} 270}
263 271
264void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) 272void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
265{ 273{
266 struct kgd_mem *mem = (struct kgd_mem *) mem_obj; 274 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
267
268 BUG_ON(mem == NULL);
269 275
270 amdgpu_bo_reserve(mem->bo, true); 276 amdgpu_bo_reserve(bo, true);
271 amdgpu_bo_kunmap(mem->bo); 277 amdgpu_bo_kunmap(bo);
272 amdgpu_bo_unpin(mem->bo); 278 amdgpu_bo_unpin(bo);
273 amdgpu_bo_unreserve(mem->bo); 279 amdgpu_bo_unreserve(bo);
274 amdgpu_bo_unref(&(mem->bo)); 280 amdgpu_bo_unref(&(bo));
275 kfree(mem);
276} 281}
277 282
278void get_local_mem_info(struct kgd_dev *kgd, 283void get_local_mem_info(struct kgd_dev *kgd,
@@ -281,24 +286,29 @@ void get_local_mem_info(struct kgd_dev *kgd,
281 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 286 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
282 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : 287 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
283 ~((1ULL << 32) - 1); 288 ~((1ULL << 32) - 1);
284 resource_size_t aper_limit = adev->mc.aper_base + adev->mc.aper_size; 289 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
285 290
286 memset(mem_info, 0, sizeof(*mem_info)); 291 memset(mem_info, 0, sizeof(*mem_info));
287 if (!(adev->mc.aper_base & address_mask || aper_limit & address_mask)) { 292 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
288 mem_info->local_mem_size_public = adev->mc.visible_vram_size; 293 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
289 mem_info->local_mem_size_private = adev->mc.real_vram_size - 294 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
290 adev->mc.visible_vram_size; 295 adev->gmc.visible_vram_size;
291 } else { 296 } else {
292 mem_info->local_mem_size_public = 0; 297 mem_info->local_mem_size_public = 0;
293 mem_info->local_mem_size_private = adev->mc.real_vram_size; 298 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
294 } 299 }
295 mem_info->vram_width = adev->mc.vram_width; 300 mem_info->vram_width = adev->gmc.vram_width;
296 301
297 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n", 302 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
298 &adev->mc.aper_base, &aper_limit, 303 &adev->gmc.aper_base, &aper_limit,
299 mem_info->local_mem_size_public, 304 mem_info->local_mem_size_public,
300 mem_info->local_mem_size_private); 305 mem_info->local_mem_size_private);
301 306
307 if (amdgpu_emu_mode == 1) {
308 mem_info->mem_clk_max = 100;
309 return;
310 }
311
302 if (amdgpu_sriov_vf(adev)) 312 if (amdgpu_sriov_vf(adev))
303 mem_info->mem_clk_max = adev->clock.default_mclk / 100; 313 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
304 else 314 else
@@ -319,6 +329,9 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
319 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 329 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
320 330
321 /* the sclk is in quantas of 10kHz */ 331 /* the sclk is in quantas of 10kHz */
332 if (amdgpu_emu_mode == 1)
333 return 100;
334
322 if (amdgpu_sriov_vf(adev)) 335 if (amdgpu_sriov_vf(adev))
323 return adev->clock.default_sclk / 100; 336 return adev->clock.default_sclk / 100;
324 337
@@ -354,3 +367,68 @@ uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
354 367
355 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 368 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
356} 369}
370
371int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
372 uint32_t vmid, uint64_t gpu_addr,
373 uint32_t *ib_cmd, uint32_t ib_len)
374{
375 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
376 struct amdgpu_job *job;
377 struct amdgpu_ib *ib;
378 struct amdgpu_ring *ring;
379 struct dma_fence *f = NULL;
380 int ret;
381
382 switch (engine) {
383 case KGD_ENGINE_MEC1:
384 ring = &adev->gfx.compute_ring[0];
385 break;
386 case KGD_ENGINE_SDMA1:
387 ring = &adev->sdma.instance[0].ring;
388 break;
389 case KGD_ENGINE_SDMA2:
390 ring = &adev->sdma.instance[1].ring;
391 break;
392 default:
393 pr_err("Invalid engine in IB submission: %d\n", engine);
394 ret = -EINVAL;
395 goto err;
396 }
397
398 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
399 if (ret)
400 goto err;
401
402 ib = &job->ibs[0];
403 memset(ib, 0, sizeof(struct amdgpu_ib));
404
405 ib->gpu_addr = gpu_addr;
406 ib->ptr = ib_cmd;
407 ib->length_dw = ib_len;
408 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
409 job->vmid = vmid;
410
411 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
412 if (ret) {
413 DRM_ERROR("amdgpu: failed to schedule IB.\n");
414 goto err_ib_sched;
415 }
416
417 ret = dma_fence_wait(f, false);
418
419err_ib_sched:
420 dma_fence_put(f);
421 amdgpu_job_free(job);
422err:
423 return ret;
424}
425
426bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
427{
428 if (adev->kfd) {
429 if ((1 << vmid) & compute_vmid_bitmap)
430 return true;
431 }
432
433 return false;
434}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 2a519f9062ee..d7509b706b26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -28,13 +28,89 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/mmu_context.h> 29#include <linux/mmu_context.h>
30#include <kgd_kfd_interface.h> 30#include <kgd_kfd_interface.h>
31#include <drm/ttm/ttm_execbuf_util.h>
32#include "amdgpu_sync.h"
33#include "amdgpu_vm.h"
34
35extern const struct kgd2kfd_calls *kgd2kfd;
31 36
32struct amdgpu_device; 37struct amdgpu_device;
33 38
39struct kfd_bo_va_list {
40 struct list_head bo_list;
41 struct amdgpu_bo_va *bo_va;
42 void *kgd_dev;
43 bool is_mapped;
44 uint64_t va;
45 uint64_t pte_flags;
46};
47
34struct kgd_mem { 48struct kgd_mem {
49 struct mutex lock;
35 struct amdgpu_bo *bo; 50 struct amdgpu_bo *bo;
36 uint64_t gpu_addr; 51 struct list_head bo_va_list;
37 void *cpu_ptr; 52 /* protected by amdkfd_process_info.lock */
53 struct ttm_validate_buffer validate_list;
54 struct ttm_validate_buffer resv_list;
55 uint32_t domain;
56 unsigned int mapped_to_gpu_memory;
57 uint64_t va;
58
59 uint32_t mapping_flags;
60
61 struct amdkfd_process_info *process_info;
62
63 struct amdgpu_sync sync;
64
65 bool aql_queue;
66};
67
68/* KFD Memory Eviction */
69struct amdgpu_amdkfd_fence {
70 struct dma_fence base;
71 struct mm_struct *mm;
72 spinlock_t lock;
73 char timeline_name[TASK_COMM_LEN];
74};
75
76struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
77 struct mm_struct *mm);
78bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
79struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
80
81struct amdkfd_process_info {
82 /* List head of all VMs that belong to a KFD process */
83 struct list_head vm_list_head;
84 /* List head for all KFD BOs that belong to a KFD process. */
85 struct list_head kfd_bo_list;
86 /* Lock to protect kfd_bo_list */
87 struct mutex lock;
88
89 /* Number of VMs */
90 unsigned int n_vms;
91 /* Eviction Fence */
92 struct amdgpu_amdkfd_fence *eviction_fence;
93};
94
95/* struct amdkfd_vm -
96 * For Memory Eviction KGD requires a mechanism to keep track of all KFD BOs
97 * belonging to a KFD process. All the VMs belonging to the same process point
98 * to the same amdkfd_process_info.
99 */
100struct amdkfd_vm {
101 /* Keep base as the first parameter for pointer compatibility between
102 * amdkfd_vm and amdgpu_vm.
103 */
104 struct amdgpu_vm base;
105
106 /* List node in amdkfd_process_info.vm_list_head*/
107 struct list_head vm_list_node;
108
109 struct amdgpu_device *adev;
110 /* Points to the KFD process VM info*/
111 struct amdkfd_process_info *process_info;
112
113 uint64_t pd_phys_addr;
38}; 114};
39 115
40int amdgpu_amdkfd_init(void); 116int amdgpu_amdkfd_init(void);
@@ -48,9 +124,15 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
48void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); 124void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
49void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev); 125void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
50 126
127int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
128 uint32_t vmid, uint64_t gpu_addr,
129 uint32_t *ib_cmd, uint32_t ib_len);
130
51struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); 131struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
52struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); 132struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
53 133
134bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
135
54/* Shared API */ 136/* Shared API */
55int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 137int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
56 void **mem_obj, uint64_t *gpu_addr, 138 void **mem_obj, uint64_t *gpu_addr,
@@ -79,4 +161,30 @@ uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
79 valid; \ 161 valid; \
80 }) 162 })
81 163
164/* GPUVM API */
165int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
166 void **process_info,
167 struct dma_fence **ef);
168void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
169uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
170int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
171 struct kgd_dev *kgd, uint64_t va, uint64_t size,
172 void *vm, struct kgd_mem **mem,
173 uint64_t *offset, uint32_t flags);
174int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
175 struct kgd_dev *kgd, struct kgd_mem *mem);
176int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
177 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
178int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
179 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
180int amdgpu_amdkfd_gpuvm_sync_memory(
181 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
182int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
183 struct kgd_mem *mem, void **kptr, uint64_t *size);
184int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
185 struct dma_fence **ef);
186
187void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
188void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
189
82#endif /* AMDGPU_AMDKFD_H_INCLUDED */ 190#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
new file mode 100644
index 000000000000..2c14025e5e76
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright 2016-2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/dma-fence.h>
24#include <linux/spinlock.h>
25#include <linux/atomic.h>
26#include <linux/stacktrace.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/sched/mm.h>
30#include "amdgpu_amdkfd.h"
31
32static const struct dma_fence_ops amdkfd_fence_ops;
33static atomic_t fence_seq = ATOMIC_INIT(0);
34
35/* Eviction Fence
36 * Fence helper functions to deal with KFD memory eviction.
37 * Big Idea - Since KFD submissions are done by user queues, a BO cannot be
38 * evicted unless all the user queues for that process are evicted.
39 *
40 * All the BOs in a process share an eviction fence. When process X wants
41 * to map VRAM memory but TTM can't find enough space, TTM will attempt to
42 * evict BOs from its LRU list. TTM checks if the BO is valuable to evict
43 * by calling ttm_bo_driver->eviction_valuable().
44 *
45 * ttm_bo_driver->eviction_valuable() - will return false if the BO belongs
46 * to process X. Otherwise, it will return true to indicate BO can be
47 * evicted by TTM.
48 *
49 * If ttm_bo_driver->eviction_valuable returns true, then TTM will continue
50 * the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
51 * --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
52 *
53 * GPU Scheduler (amd_sched_main) - sets up a cb (fence_add_callback) to
54 * nofity when the BO is free to move. fence_add_callback --> enable_signaling
55 * --> amdgpu_amdkfd_fence.enable_signaling
56 *
57 * amdgpu_amdkfd_fence.enable_signaling - Start a work item that will quiesce
58 * user queues and signal fence. The work item will also start another delayed
59 * work item to restore BOs
60 */
61
62struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
63 struct mm_struct *mm)
64{
65 struct amdgpu_amdkfd_fence *fence;
66
67 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
68 if (fence == NULL)
69 return NULL;
70
71 /* This reference gets released in amdkfd_fence_release */
72 mmgrab(mm);
73 fence->mm = mm;
74 get_task_comm(fence->timeline_name, current);
75 spin_lock_init(&fence->lock);
76
77 dma_fence_init(&fence->base, &amdkfd_fence_ops, &fence->lock,
78 context, atomic_inc_return(&fence_seq));
79
80 return fence;
81}
82
83struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
84{
85 struct amdgpu_amdkfd_fence *fence;
86
87 if (!f)
88 return NULL;
89
90 fence = container_of(f, struct amdgpu_amdkfd_fence, base);
91 if (fence && f->ops == &amdkfd_fence_ops)
92 return fence;
93
94 return NULL;
95}
96
97static const char *amdkfd_fence_get_driver_name(struct dma_fence *f)
98{
99 return "amdgpu_amdkfd_fence";
100}
101
102static const char *amdkfd_fence_get_timeline_name(struct dma_fence *f)
103{
104 struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
105
106 return fence->timeline_name;
107}
108
109/**
110 * amdkfd_fence_enable_signaling - This gets called when TTM wants to evict
111 * a KFD BO and schedules a job to move the BO.
112 * If fence is already signaled return true.
113 * If fence is not signaled schedule a evict KFD process work item.
114 */
115static bool amdkfd_fence_enable_signaling(struct dma_fence *f)
116{
117 struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
118
119 if (!fence)
120 return false;
121
122 if (dma_fence_is_signaled(f))
123 return true;
124
125 if (!kgd2kfd->schedule_evict_and_restore_process(fence->mm, f))
126 return true;
127
128 return false;
129}
130
131/**
132 * amdkfd_fence_release - callback that fence can be freed
133 *
134 * @fence: fence
135 *
136 * This function is called when the reference count becomes zero.
137 * Drops the mm_struct reference and RCU schedules freeing up the fence.
138 */
139static void amdkfd_fence_release(struct dma_fence *f)
140{
141 struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
142
143 /* Unconditionally signal the fence. The process is getting
144 * terminated.
145 */
146 if (WARN_ON(!fence))
147 return; /* Not an amdgpu_amdkfd_fence */
148
149 mmdrop(fence->mm);
150 kfree_rcu(f, rcu);
151}
152
153/**
154 * amdkfd_fence_check_mm - Check if @mm is same as that of the fence @f
155 * if same return TRUE else return FALSE.
156 *
157 * @f: [IN] fence
158 * @mm: [IN] mm that needs to be verified
159 */
160bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
161{
162 struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
163
164 if (!fence)
165 return false;
166 else if (fence->mm == mm)
167 return true;
168
169 return false;
170}
171
172static const struct dma_fence_ops amdkfd_fence_ops = {
173 .get_driver_name = amdkfd_fence_get_driver_name,
174 .get_timeline_name = amdkfd_fence_get_timeline_name,
175 .enable_signaling = amdkfd_fence_enable_signaling,
176 .signaled = NULL,
177 .wait = dma_fence_default_wait,
178 .release = amdkfd_fence_release,
179};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index a9e6aea0e5f8..7485c376b90e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -139,11 +139,14 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
139static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); 139static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
140static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 140static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
141 uint8_t vmid); 141 uint8_t vmid);
142static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
143 142
144static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 143static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
145static void set_scratch_backing_va(struct kgd_dev *kgd, 144static void set_scratch_backing_va(struct kgd_dev *kgd,
146 uint64_t va, uint32_t vmid); 145 uint64_t va, uint32_t vmid);
146static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
147 uint32_t page_table_base);
148static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
149static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
147 150
148/* Because of REG_GET_FIELD() being used, we put this function in the 151/* Because of REG_GET_FIELD() being used, we put this function in the
149 * asic specific file. 152 * asic specific file.
@@ -196,12 +199,25 @@ static const struct kfd2kgd_calls kfd2kgd = {
196 .address_watch_get_offset = kgd_address_watch_get_offset, 199 .address_watch_get_offset = kgd_address_watch_get_offset,
197 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, 200 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
198 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, 201 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
199 .write_vmid_invalidate_request = write_vmid_invalidate_request,
200 .get_fw_version = get_fw_version, 202 .get_fw_version = get_fw_version,
201 .set_scratch_backing_va = set_scratch_backing_va, 203 .set_scratch_backing_va = set_scratch_backing_va,
202 .get_tile_config = get_tile_config, 204 .get_tile_config = get_tile_config,
203 .get_cu_info = get_cu_info, 205 .get_cu_info = get_cu_info,
204 .get_vram_usage = amdgpu_amdkfd_get_vram_usage 206 .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
207 .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
208 .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
209 .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
210 .set_vm_context_page_table_base = set_vm_context_page_table_base,
211 .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
212 .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
213 .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
214 .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
215 .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
216 .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
217 .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
218 .invalidate_tlbs = invalidate_tlbs,
219 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
220 .submit_ib = amdgpu_amdkfd_submit_ib,
205}; 221};
206 222
207struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) 223struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -787,14 +803,7 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
787 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 803 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
788 804
789 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 805 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
790 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; 806 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
791}
792
793static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
794{
795 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
796
797 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
798} 807}
799 808
800static void set_scratch_backing_va(struct kgd_dev *kgd, 809static void set_scratch_backing_va(struct kgd_dev *kgd,
@@ -812,8 +821,6 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
812 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 821 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
813 const union amdgpu_firmware_header *hdr; 822 const union amdgpu_firmware_header *hdr;
814 823
815 BUG_ON(kgd == NULL);
816
817 switch (type) { 824 switch (type) {
818 case KGD_ENGINE_PFP: 825 case KGD_ENGINE_PFP:
819 hdr = (const union amdgpu_firmware_header *) 826 hdr = (const union amdgpu_firmware_header *)
@@ -866,3 +873,50 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
866 return hdr->common.ucode_version; 873 return hdr->common.ucode_version;
867} 874}
868 875
876static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
877 uint32_t page_table_base)
878{
879 struct amdgpu_device *adev = get_amdgpu_device(kgd);
880
881 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
882 pr_err("trying to set page table base for wrong VMID\n");
883 return;
884 }
885 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
886}
887
888static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
889{
890 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
891 int vmid;
892 unsigned int tmp;
893
894 for (vmid = 0; vmid < 16; vmid++) {
895 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
896 continue;
897
898 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
899 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
900 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
901 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
902 RREG32(mmVM_INVALIDATE_RESPONSE);
903 break;
904 }
905 }
906
907 return 0;
908}
909
910static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
911{
912 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
913
914 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
915 pr_err("non kfd vmid\n");
916 return 0;
917 }
918
919 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
920 RREG32(mmVM_INVALIDATE_RESPONSE);
921 return 0;
922}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index b127259d7d85..7be453494423 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -81,7 +81,6 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
81 uint32_t queue_id); 81 uint32_t queue_id);
82static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 82static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
83 unsigned int utimeout); 83 unsigned int utimeout);
84static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
85static int kgd_address_watch_disable(struct kgd_dev *kgd); 84static int kgd_address_watch_disable(struct kgd_dev *kgd);
86static int kgd_address_watch_execute(struct kgd_dev *kgd, 85static int kgd_address_watch_execute(struct kgd_dev *kgd,
87 unsigned int watch_point_id, 86 unsigned int watch_point_id,
@@ -99,10 +98,13 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
99 uint8_t vmid); 98 uint8_t vmid);
100static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 99static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
101 uint8_t vmid); 100 uint8_t vmid);
102static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
103static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 101static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
104static void set_scratch_backing_va(struct kgd_dev *kgd, 102static void set_scratch_backing_va(struct kgd_dev *kgd,
105 uint64_t va, uint32_t vmid); 103 uint64_t va, uint32_t vmid);
104static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
105 uint32_t page_table_base);
106static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
107static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
106 108
107/* Because of REG_GET_FIELD() being used, we put this function in the 109/* Because of REG_GET_FIELD() being used, we put this function in the
108 * asic specific file. 110 * asic specific file.
@@ -157,12 +159,25 @@ static const struct kfd2kgd_calls kfd2kgd = {
157 get_atc_vmid_pasid_mapping_pasid, 159 get_atc_vmid_pasid_mapping_pasid,
158 .get_atc_vmid_pasid_mapping_valid = 160 .get_atc_vmid_pasid_mapping_valid =
159 get_atc_vmid_pasid_mapping_valid, 161 get_atc_vmid_pasid_mapping_valid,
160 .write_vmid_invalidate_request = write_vmid_invalidate_request,
161 .get_fw_version = get_fw_version, 162 .get_fw_version = get_fw_version,
162 .set_scratch_backing_va = set_scratch_backing_va, 163 .set_scratch_backing_va = set_scratch_backing_va,
163 .get_tile_config = get_tile_config, 164 .get_tile_config = get_tile_config,
164 .get_cu_info = get_cu_info, 165 .get_cu_info = get_cu_info,
165 .get_vram_usage = amdgpu_amdkfd_get_vram_usage 166 .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
167 .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
168 .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
169 .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
170 .set_vm_context_page_table_base = set_vm_context_page_table_base,
171 .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
172 .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
173 .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
174 .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
175 .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
176 .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
177 .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
178 .invalidate_tlbs = invalidate_tlbs,
179 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
180 .submit_ib = amdgpu_amdkfd_submit_ib,
166}; 181};
167 182
168struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) 183struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -704,14 +719,7 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
704 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 719 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
705 720
706 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 721 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
707 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; 722 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
708}
709
710static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
711{
712 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
713
714 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
715} 723}
716 724
717static int kgd_address_watch_disable(struct kgd_dev *kgd) 725static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -775,8 +783,6 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
775 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 783 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
776 const union amdgpu_firmware_header *hdr; 784 const union amdgpu_firmware_header *hdr;
777 785
778 BUG_ON(kgd == NULL);
779
780 switch (type) { 786 switch (type) {
781 case KGD_ENGINE_PFP: 787 case KGD_ENGINE_PFP:
782 hdr = (const union amdgpu_firmware_header *) 788 hdr = (const union amdgpu_firmware_header *)
@@ -828,3 +834,51 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
828 /* Only 12 bit in use*/ 834 /* Only 12 bit in use*/
829 return hdr->common.ucode_version; 835 return hdr->common.ucode_version;
830} 836}
837
838static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
839 uint32_t page_table_base)
840{
841 struct amdgpu_device *adev = get_amdgpu_device(kgd);
842
843 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
844 pr_err("trying to set page table base for wrong VMID\n");
845 return;
846 }
847 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
848}
849
850static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
851{
852 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
853 int vmid;
854 unsigned int tmp;
855
856 for (vmid = 0; vmid < 16; vmid++) {
857 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
858 continue;
859
860 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
861 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
862 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
863 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
864 RREG32(mmVM_INVALIDATE_RESPONSE);
865 break;
866 }
867 }
868
869 return 0;
870}
871
872static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
873{
874 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
875
876 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
877 pr_err("non kfd vmid %d\n", vmid);
878 return -EINVAL;
879 }
880
881 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
882 RREG32(mmVM_INVALIDATE_RESPONSE);
883 return 0;
884}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
new file mode 100644
index 000000000000..a12a1654e124
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -0,0 +1,1506 @@
1/*
2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define pr_fmt(fmt) "kfd2kgd: " fmt
24
25#include <linux/list.h>
26#include <drm/drmP.h>
27#include "amdgpu_object.h"
28#include "amdgpu_vm.h"
29#include "amdgpu_amdkfd.h"
30
31/* Special VM and GART address alignment needed for VI pre-Fiji due to
32 * a HW bug.
33 */
34#define VI_BO_SIZE_ALIGN (0x8000)
35
36/* Impose limit on how much memory KFD can use */
37static struct {
38 uint64_t max_system_mem_limit;
39 int64_t system_mem_used;
40 spinlock_t mem_limit_lock;
41} kfd_mem_limit;
42
43/* Struct used for amdgpu_amdkfd_bo_validate */
44struct amdgpu_vm_parser {
45 uint32_t domain;
46 bool wait;
47};
48
49static const char * const domain_bit_to_string[] = {
50 "CPU",
51 "GTT",
52 "VRAM",
53 "GDS",
54 "GWS",
55 "OA"
56};
57
58#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
59
60
61
62static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
63{
64 return (struct amdgpu_device *)kgd;
65}
66
67static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
68 struct kgd_mem *mem)
69{
70 struct kfd_bo_va_list *entry;
71
72 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
73 if (entry->bo_va->base.vm == avm)
74 return false;
75
76 return true;
77}
78
79/* Set memory usage limits. Current, limits are
80 * System (kernel) memory - 3/8th System RAM
81 */
82void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
83{
84 struct sysinfo si;
85 uint64_t mem;
86
87 si_meminfo(&si);
88 mem = si.totalram - si.totalhigh;
89 mem *= si.mem_unit;
90
91 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
92 kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
93 pr_debug("Kernel memory limit %lluM\n",
94 (kfd_mem_limit.max_system_mem_limit >> 20));
95}
96
97static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
98 uint64_t size, u32 domain)
99{
100 size_t acc_size;
101 int ret = 0;
102
103 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
104 sizeof(struct amdgpu_bo));
105
106 spin_lock(&kfd_mem_limit.mem_limit_lock);
107 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
108 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
109 kfd_mem_limit.max_system_mem_limit) {
110 ret = -ENOMEM;
111 goto err_no_mem;
112 }
113 kfd_mem_limit.system_mem_used += (acc_size + size);
114 }
115err_no_mem:
116 spin_unlock(&kfd_mem_limit.mem_limit_lock);
117 return ret;
118}
119
120static void unreserve_system_mem_limit(struct amdgpu_device *adev,
121 uint64_t size, u32 domain)
122{
123 size_t acc_size;
124
125 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
126 sizeof(struct amdgpu_bo));
127
128 spin_lock(&kfd_mem_limit.mem_limit_lock);
129 if (domain == AMDGPU_GEM_DOMAIN_GTT)
130 kfd_mem_limit.system_mem_used -= (acc_size + size);
131 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
132 "kfd system memory accounting unbalanced");
133
134 spin_unlock(&kfd_mem_limit.mem_limit_lock);
135}
136
137void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
138{
139 spin_lock(&kfd_mem_limit.mem_limit_lock);
140
141 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
142 kfd_mem_limit.system_mem_used -=
143 (bo->tbo.acc_size + amdgpu_bo_size(bo));
144 }
145 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
146 "kfd system memory accounting unbalanced");
147
148 spin_unlock(&kfd_mem_limit.mem_limit_lock);
149}
150
151
152/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
153 * reservation object.
154 *
155 * @bo: [IN] Remove eviction fence(s) from this BO
156 * @ef: [IN] If ef is specified, then this eviction fence is removed if it
157 * is present in the shared list.
158 * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
159 * from BO's reservation object shared list.
160 * @ef_count: [OUT] Number of fences in ef_list.
161 *
162 * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
163 * called to restore the eviction fences and to avoid memory leak. This is
164 * useful for shared BOs.
165 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
166 */
167static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
168 struct amdgpu_amdkfd_fence *ef,
169 struct amdgpu_amdkfd_fence ***ef_list,
170 unsigned int *ef_count)
171{
172 struct reservation_object_list *fobj;
173 struct reservation_object *resv;
174 unsigned int i = 0, j = 0, k = 0, shared_count;
175 unsigned int count = 0;
176 struct amdgpu_amdkfd_fence **fence_list;
177
178 if (!ef && !ef_list)
179 return -EINVAL;
180
181 if (ef_list) {
182 *ef_list = NULL;
183 *ef_count = 0;
184 }
185
186 resv = bo->tbo.resv;
187 fobj = reservation_object_get_list(resv);
188
189 if (!fobj)
190 return 0;
191
192 preempt_disable();
193 write_seqcount_begin(&resv->seq);
194
195 /* Go through all the shared fences in the resevation object. If
196 * ef is specified and it exists in the list, remove it and reduce the
197 * count. If ef is not specified, then get the count of eviction fences
198 * present.
199 */
200 shared_count = fobj->shared_count;
201 for (i = 0; i < shared_count; ++i) {
202 struct dma_fence *f;
203
204 f = rcu_dereference_protected(fobj->shared[i],
205 reservation_object_held(resv));
206
207 if (ef) {
208 if (f->context == ef->base.context) {
209 dma_fence_put(f);
210 fobj->shared_count--;
211 } else {
212 RCU_INIT_POINTER(fobj->shared[j++], f);
213 }
214 } else if (to_amdgpu_amdkfd_fence(f))
215 count++;
216 }
217 write_seqcount_end(&resv->seq);
218 preempt_enable();
219
220 if (ef || !count)
221 return 0;
222
223 /* Alloc memory for count number of eviction fence pointers. Fill the
224 * ef_list array and ef_count
225 */
226 fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
227 GFP_KERNEL);
228 if (!fence_list)
229 return -ENOMEM;
230
231 preempt_disable();
232 write_seqcount_begin(&resv->seq);
233
234 j = 0;
235 for (i = 0; i < shared_count; ++i) {
236 struct dma_fence *f;
237 struct amdgpu_amdkfd_fence *efence;
238
239 f = rcu_dereference_protected(fobj->shared[i],
240 reservation_object_held(resv));
241
242 efence = to_amdgpu_amdkfd_fence(f);
243 if (efence) {
244 fence_list[k++] = efence;
245 fobj->shared_count--;
246 } else {
247 RCU_INIT_POINTER(fobj->shared[j++], f);
248 }
249 }
250
251 write_seqcount_end(&resv->seq);
252 preempt_enable();
253
254 *ef_list = fence_list;
255 *ef_count = k;
256
257 return 0;
258}
259
260/* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
261 * reservation object.
262 *
263 * @bo: [IN] Add eviction fences to this BO
264 * @ef_list: [IN] List of eviction fences to be added
265 * @ef_count: [IN] Number of fences in ef_list.
266 *
267 * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
268 * function.
269 */
270static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
271 struct amdgpu_amdkfd_fence **ef_list,
272 unsigned int ef_count)
273{
274 int i;
275
276 if (!ef_list || !ef_count)
277 return;
278
279 for (i = 0; i < ef_count; i++) {
280 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
281 /* Re-adding the fence takes an additional reference. Drop that
282 * reference.
283 */
284 dma_fence_put(&ef_list[i]->base);
285 }
286
287 kfree(ef_list);
288}
289
290static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
291 bool wait)
292{
293 struct ttm_operation_ctx ctx = { false, false };
294 int ret;
295
296 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
297 "Called with userptr BO"))
298 return -EINVAL;
299
300 amdgpu_ttm_placement_from_domain(bo, domain);
301
302 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
303 if (ret)
304 goto validate_fail;
305 if (wait) {
306 struct amdgpu_amdkfd_fence **ef_list;
307 unsigned int ef_count;
308
309 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
310 &ef_count);
311 if (ret)
312 goto validate_fail;
313
314 ttm_bo_wait(&bo->tbo, false, false);
315 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
316 }
317
318validate_fail:
319 return ret;
320}
321
322static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
323{
324 struct amdgpu_vm_parser *p = param;
325
326 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
327}
328
329/* vm_validate_pt_pd_bos - Validate page table and directory BOs
330 *
331 * Page directories are not updated here because huge page handling
332 * during page table updates can invalidate page directory entries
333 * again. Page directories are only updated after updating page
334 * tables.
335 */
336static int vm_validate_pt_pd_bos(struct amdkfd_vm *vm)
337{
338 struct amdgpu_bo *pd = vm->base.root.base.bo;
339 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
340 struct amdgpu_vm_parser param;
341 uint64_t addr, flags = AMDGPU_PTE_VALID;
342 int ret;
343
344 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
345 param.wait = false;
346
347 ret = amdgpu_vm_validate_pt_bos(adev, &vm->base, amdgpu_amdkfd_validate,
348 &param);
349 if (ret) {
350 pr_err("amdgpu: failed to validate PT BOs\n");
351 return ret;
352 }
353
354 ret = amdgpu_amdkfd_validate(&param, pd);
355 if (ret) {
356 pr_err("amdgpu: failed to validate PD\n");
357 return ret;
358 }
359
360 addr = amdgpu_bo_gpu_offset(vm->base.root.base.bo);
361 amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
362 vm->pd_phys_addr = addr;
363
364 if (vm->base.use_cpu_for_update) {
365 ret = amdgpu_bo_kmap(pd, NULL);
366 if (ret) {
367 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
368 return ret;
369 }
370 }
371
372 return 0;
373}
374
375static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
376 struct dma_fence *f)
377{
378 int ret = amdgpu_sync_fence(adev, sync, f, false);
379
380 /* Sync objects can't handle multiple GPUs (contexts) updating
381 * sync->last_vm_update. Fortunately we don't need it for
382 * KFD's purposes, so we can just drop that fence.
383 */
384 if (sync->last_vm_update) {
385 dma_fence_put(sync->last_vm_update);
386 sync->last_vm_update = NULL;
387 }
388
389 return ret;
390}
391
392static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
393{
394 struct amdgpu_bo *pd = vm->root.base.bo;
395 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
396 int ret;
397
398 ret = amdgpu_vm_update_directories(adev, vm);
399 if (ret)
400 return ret;
401
402 return sync_vm_fence(adev, sync, vm->last_update);
403}
404
405/* add_bo_to_vm - Add a BO to a VM
406 *
407 * Everything that needs to bo done only once when a BO is first added
408 * to a VM. It can later be mapped and unmapped many times without
409 * repeating these steps.
410 *
411 * 1. Allocate and initialize BO VA entry data structure
412 * 2. Add BO to the VM
413 * 3. Determine ASIC-specific PTE flags
414 * 4. Alloc page tables and directories if needed
415 * 4a. Validate new page tables and directories
416 */
417static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
418 struct amdgpu_vm *avm, bool is_aql,
419 struct kfd_bo_va_list **p_bo_va_entry)
420{
421 int ret;
422 struct kfd_bo_va_list *bo_va_entry;
423 struct amdkfd_vm *kvm = container_of(avm,
424 struct amdkfd_vm, base);
425 struct amdgpu_bo *pd = avm->root.base.bo;
426 struct amdgpu_bo *bo = mem->bo;
427 uint64_t va = mem->va;
428 struct list_head *list_bo_va = &mem->bo_va_list;
429 unsigned long bo_size = bo->tbo.mem.size;
430
431 if (!va) {
432 pr_err("Invalid VA when adding BO to VM\n");
433 return -EINVAL;
434 }
435
436 if (is_aql)
437 va += bo_size;
438
439 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
440 if (!bo_va_entry)
441 return -ENOMEM;
442
443 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
444 va + bo_size, avm);
445
446 /* Add BO to VM internal data structures*/
447 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, avm, bo);
448 if (!bo_va_entry->bo_va) {
449 ret = -EINVAL;
450 pr_err("Failed to add BO object to VM. ret == %d\n",
451 ret);
452 goto err_vmadd;
453 }
454
455 bo_va_entry->va = va;
456 bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
457 mem->mapping_flags);
458 bo_va_entry->kgd_dev = (void *)adev;
459 list_add(&bo_va_entry->bo_list, list_bo_va);
460
461 if (p_bo_va_entry)
462 *p_bo_va_entry = bo_va_entry;
463
464 /* Allocate new page tables if needed and validate
465 * them. Clearing of new page tables and validate need to wait
466 * on move fences. We don't want that to trigger the eviction
467 * fence, so remove it temporarily.
468 */
469 amdgpu_amdkfd_remove_eviction_fence(pd,
470 kvm->process_info->eviction_fence,
471 NULL, NULL);
472
473 ret = amdgpu_vm_alloc_pts(adev, avm, va, amdgpu_bo_size(bo));
474 if (ret) {
475 pr_err("Failed to allocate pts, err=%d\n", ret);
476 goto err_alloc_pts;
477 }
478
479 ret = vm_validate_pt_pd_bos(kvm);
480 if (ret) {
481 pr_err("validate_pt_pd_bos() failed\n");
482 goto err_alloc_pts;
483 }
484
485 /* Add the eviction fence back */
486 amdgpu_bo_fence(pd, &kvm->process_info->eviction_fence->base, true);
487
488 return 0;
489
490err_alloc_pts:
491 amdgpu_bo_fence(pd, &kvm->process_info->eviction_fence->base, true);
492 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
493 list_del(&bo_va_entry->bo_list);
494err_vmadd:
495 kfree(bo_va_entry);
496 return ret;
497}
498
499static void remove_bo_from_vm(struct amdgpu_device *adev,
500 struct kfd_bo_va_list *entry, unsigned long size)
501{
502 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
503 entry->va,
504 entry->va + size, entry);
505 amdgpu_vm_bo_rmv(adev, entry->bo_va);
506 list_del(&entry->bo_list);
507 kfree(entry);
508}
509
510static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
511 struct amdkfd_process_info *process_info)
512{
513 struct ttm_validate_buffer *entry = &mem->validate_list;
514 struct amdgpu_bo *bo = mem->bo;
515
516 INIT_LIST_HEAD(&entry->head);
517 entry->shared = true;
518 entry->bo = &bo->tbo;
519 mutex_lock(&process_info->lock);
520 list_add_tail(&entry->head, &process_info->kfd_bo_list);
521 mutex_unlock(&process_info->lock);
522}
523
524/* Reserving a BO and its page table BOs must happen atomically to
525 * avoid deadlocks. Some operations update multiple VMs at once. Track
526 * all the reservation info in a context structure. Optionally a sync
527 * object can track VM updates.
528 */
529struct bo_vm_reservation_context {
530 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
531 unsigned int n_vms; /* Number of VMs reserved */
532 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
533 struct ww_acquire_ctx ticket; /* Reservation ticket */
534 struct list_head list, duplicates; /* BO lists */
535 struct amdgpu_sync *sync; /* Pointer to sync object */
536 bool reserved; /* Whether BOs are reserved */
537};
538
539enum bo_vm_match {
540 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
541 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
542 BO_VM_ALL, /* Match all VMs a BO was added to */
543};
544
545/**
546 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
547 * @mem: KFD BO structure.
548 * @vm: the VM to reserve.
549 * @ctx: the struct that will be used in unreserve_bo_and_vms().
550 */
551static int reserve_bo_and_vm(struct kgd_mem *mem,
552 struct amdgpu_vm *vm,
553 struct bo_vm_reservation_context *ctx)
554{
555 struct amdgpu_bo *bo = mem->bo;
556 int ret;
557
558 WARN_ON(!vm);
559
560 ctx->reserved = false;
561 ctx->n_vms = 1;
562 ctx->sync = &mem->sync;
563
564 INIT_LIST_HEAD(&ctx->list);
565 INIT_LIST_HEAD(&ctx->duplicates);
566
567 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
568 if (!ctx->vm_pd)
569 return -ENOMEM;
570
571 ctx->kfd_bo.robj = bo;
572 ctx->kfd_bo.priority = 0;
573 ctx->kfd_bo.tv.bo = &bo->tbo;
574 ctx->kfd_bo.tv.shared = true;
575 ctx->kfd_bo.user_pages = NULL;
576 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
577
578 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
579
580 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
581 false, &ctx->duplicates);
582 if (!ret)
583 ctx->reserved = true;
584 else {
585 pr_err("Failed to reserve buffers in ttm\n");
586 kfree(ctx->vm_pd);
587 ctx->vm_pd = NULL;
588 }
589
590 return ret;
591}
592
593/**
594 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
595 * @mem: KFD BO structure.
596 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
597 * is used. Otherwise, a single VM associated with the BO.
598 * @map_type: the mapping status that will be used to filter the VMs.
599 * @ctx: the struct that will be used in unreserve_bo_and_vms().
600 *
601 * Returns 0 for success, negative for failure.
602 */
603static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
604 struct amdgpu_vm *vm, enum bo_vm_match map_type,
605 struct bo_vm_reservation_context *ctx)
606{
607 struct amdgpu_bo *bo = mem->bo;
608 struct kfd_bo_va_list *entry;
609 unsigned int i;
610 int ret;
611
612 ctx->reserved = false;
613 ctx->n_vms = 0;
614 ctx->vm_pd = NULL;
615 ctx->sync = &mem->sync;
616
617 INIT_LIST_HEAD(&ctx->list);
618 INIT_LIST_HEAD(&ctx->duplicates);
619
620 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
621 if ((vm && vm != entry->bo_va->base.vm) ||
622 (entry->is_mapped != map_type
623 && map_type != BO_VM_ALL))
624 continue;
625
626 ctx->n_vms++;
627 }
628
629 if (ctx->n_vms != 0) {
630 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
631 GFP_KERNEL);
632 if (!ctx->vm_pd)
633 return -ENOMEM;
634 }
635
636 ctx->kfd_bo.robj = bo;
637 ctx->kfd_bo.priority = 0;
638 ctx->kfd_bo.tv.bo = &bo->tbo;
639 ctx->kfd_bo.tv.shared = true;
640 ctx->kfd_bo.user_pages = NULL;
641 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
642
643 i = 0;
644 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
645 if ((vm && vm != entry->bo_va->base.vm) ||
646 (entry->is_mapped != map_type
647 && map_type != BO_VM_ALL))
648 continue;
649
650 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
651 &ctx->vm_pd[i]);
652 i++;
653 }
654
655 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
656 false, &ctx->duplicates);
657 if (!ret)
658 ctx->reserved = true;
659 else
660 pr_err("Failed to reserve buffers in ttm.\n");
661
662 if (ret) {
663 kfree(ctx->vm_pd);
664 ctx->vm_pd = NULL;
665 }
666
667 return ret;
668}
669
670/**
671 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
672 * @ctx: Reservation context to unreserve
673 * @wait: Optionally wait for a sync object representing pending VM updates
674 * @intr: Whether the wait is interruptible
675 *
676 * Also frees any resources allocated in
677 * reserve_bo_and_(cond_)vm(s). Returns the status from
678 * amdgpu_sync_wait.
679 */
680static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
681 bool wait, bool intr)
682{
683 int ret = 0;
684
685 if (wait)
686 ret = amdgpu_sync_wait(ctx->sync, intr);
687
688 if (ctx->reserved)
689 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
690 kfree(ctx->vm_pd);
691
692 ctx->sync = NULL;
693
694 ctx->reserved = false;
695 ctx->vm_pd = NULL;
696
697 return ret;
698}
699
700static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
701 struct kfd_bo_va_list *entry,
702 struct amdgpu_sync *sync)
703{
704 struct amdgpu_bo_va *bo_va = entry->bo_va;
705 struct amdgpu_vm *vm = bo_va->base.vm;
706 struct amdkfd_vm *kvm = container_of(vm, struct amdkfd_vm, base);
707 struct amdgpu_bo *pd = vm->root.base.bo;
708
709 /* Remove eviction fence from PD (and thereby from PTs too as
710 * they share the resv. object). Otherwise during PT update
711 * job (see amdgpu_vm_bo_update_mapping), eviction fence would
712 * get added to job->sync object and job execution would
713 * trigger the eviction fence.
714 */
715 amdgpu_amdkfd_remove_eviction_fence(pd,
716 kvm->process_info->eviction_fence,
717 NULL, NULL);
718 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
719
720 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
721
722 /* Add the eviction fence back */
723 amdgpu_bo_fence(pd, &kvm->process_info->eviction_fence->base, true);
724
725 sync_vm_fence(adev, sync, bo_va->last_pt_update);
726
727 return 0;
728}
729
730static int update_gpuvm_pte(struct amdgpu_device *adev,
731 struct kfd_bo_va_list *entry,
732 struct amdgpu_sync *sync)
733{
734 int ret;
735 struct amdgpu_vm *vm;
736 struct amdgpu_bo_va *bo_va;
737 struct amdgpu_bo *bo;
738
739 bo_va = entry->bo_va;
740 vm = bo_va->base.vm;
741 bo = bo_va->base.bo;
742
743 /* Update the page tables */
744 ret = amdgpu_vm_bo_update(adev, bo_va, false);
745 if (ret) {
746 pr_err("amdgpu_vm_bo_update failed\n");
747 return ret;
748 }
749
750 return sync_vm_fence(adev, sync, bo_va->last_pt_update);
751}
752
753static int map_bo_to_gpuvm(struct amdgpu_device *adev,
754 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync)
755{
756 int ret;
757
758 /* Set virtual address for the allocation */
759 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
760 amdgpu_bo_size(entry->bo_va->base.bo),
761 entry->pte_flags);
762 if (ret) {
763 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
764 entry->va, ret);
765 return ret;
766 }
767
768 ret = update_gpuvm_pte(adev, entry, sync);
769 if (ret) {
770 pr_err("update_gpuvm_pte() failed\n");
771 goto update_gpuvm_pte_failed;
772 }
773
774 return 0;
775
776update_gpuvm_pte_failed:
777 unmap_bo_from_gpuvm(adev, entry, sync);
778 return ret;
779}
780
781static int process_validate_vms(struct amdkfd_process_info *process_info)
782{
783 struct amdkfd_vm *peer_vm;
784 int ret;
785
786 list_for_each_entry(peer_vm, &process_info->vm_list_head,
787 vm_list_node) {
788 ret = vm_validate_pt_pd_bos(peer_vm);
789 if (ret)
790 return ret;
791 }
792
793 return 0;
794}
795
796static int process_update_pds(struct amdkfd_process_info *process_info,
797 struct amdgpu_sync *sync)
798{
799 struct amdkfd_vm *peer_vm;
800 int ret;
801
802 list_for_each_entry(peer_vm, &process_info->vm_list_head,
803 vm_list_node) {
804 ret = vm_update_pds(&peer_vm->base, sync);
805 if (ret)
806 return ret;
807 }
808
809 return 0;
810}
811
812int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
813 void **process_info,
814 struct dma_fence **ef)
815{
816 int ret;
817 struct amdkfd_vm *new_vm;
818 struct amdkfd_process_info *info;
819 struct amdgpu_device *adev = get_amdgpu_device(kgd);
820
821 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
822 if (!new_vm)
823 return -ENOMEM;
824
825 /* Initialize the VM context, allocate the page directory and zero it */
826 ret = amdgpu_vm_init(adev, &new_vm->base, AMDGPU_VM_CONTEXT_COMPUTE, 0);
827 if (ret) {
828 pr_err("Failed init vm ret %d\n", ret);
829 goto vm_init_fail;
830 }
831 new_vm->adev = adev;
832
833 if (!*process_info) {
834 info = kzalloc(sizeof(*info), GFP_KERNEL);
835 if (!info) {
836 ret = -ENOMEM;
837 goto alloc_process_info_fail;
838 }
839
840 mutex_init(&info->lock);
841 INIT_LIST_HEAD(&info->vm_list_head);
842 INIT_LIST_HEAD(&info->kfd_bo_list);
843
844 info->eviction_fence =
845 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
846 current->mm);
847 if (!info->eviction_fence) {
848 pr_err("Failed to create eviction fence\n");
849 goto create_evict_fence_fail;
850 }
851
852 *process_info = info;
853 *ef = dma_fence_get(&info->eviction_fence->base);
854 }
855
856 new_vm->process_info = *process_info;
857
858 mutex_lock(&new_vm->process_info->lock);
859 list_add_tail(&new_vm->vm_list_node,
860 &(new_vm->process_info->vm_list_head));
861 new_vm->process_info->n_vms++;
862 mutex_unlock(&new_vm->process_info->lock);
863
864 *vm = (void *) new_vm;
865
866 pr_debug("Created process vm %p\n", *vm);
867
868 return ret;
869
870create_evict_fence_fail:
871 mutex_destroy(&info->lock);
872 kfree(info);
873alloc_process_info_fail:
874 amdgpu_vm_fini(adev, &new_vm->base);
875vm_init_fail:
876 kfree(new_vm);
877 return ret;
878
879}
880
881void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
882{
883 struct amdgpu_device *adev = get_amdgpu_device(kgd);
884 struct amdkfd_vm *kfd_vm = (struct amdkfd_vm *) vm;
885 struct amdgpu_vm *avm = &kfd_vm->base;
886 struct amdgpu_bo *pd;
887 struct amdkfd_process_info *process_info;
888
889 if (WARN_ON(!kgd || !vm))
890 return;
891
892 pr_debug("Destroying process vm %p\n", vm);
893 /* Release eviction fence from PD */
894 pd = avm->root.base.bo;
895 amdgpu_bo_reserve(pd, false);
896 amdgpu_bo_fence(pd, NULL, false);
897 amdgpu_bo_unreserve(pd);
898
899 process_info = kfd_vm->process_info;
900
901 mutex_lock(&process_info->lock);
902 process_info->n_vms--;
903 list_del(&kfd_vm->vm_list_node);
904 mutex_unlock(&process_info->lock);
905
906 /* Release per-process resources */
907 if (!process_info->n_vms) {
908 WARN_ON(!list_empty(&process_info->kfd_bo_list));
909
910 dma_fence_put(&process_info->eviction_fence->base);
911 mutex_destroy(&process_info->lock);
912 kfree(process_info);
913 }
914
915 /* Release the VM context */
916 amdgpu_vm_fini(adev, avm);
917 kfree(vm);
918}
919
920uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
921{
922 struct amdkfd_vm *avm = (struct amdkfd_vm *)vm;
923
924 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
925}
926
927int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
928 struct kgd_dev *kgd, uint64_t va, uint64_t size,
929 void *vm, struct kgd_mem **mem,
930 uint64_t *offset, uint32_t flags)
931{
932 struct amdgpu_device *adev = get_amdgpu_device(kgd);
933 struct amdkfd_vm *kfd_vm = (struct amdkfd_vm *)vm;
934 struct amdgpu_bo *bo;
935 int byte_align;
936 u32 alloc_domain;
937 u64 alloc_flags;
938 uint32_t mapping_flags;
939 int ret;
940
941 /*
942 * Check on which domain to allocate BO
943 */
944 if (flags & ALLOC_MEM_FLAGS_VRAM) {
945 alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
946 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
947 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
948 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
949 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
950 } else if (flags & ALLOC_MEM_FLAGS_GTT) {
951 alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
952 alloc_flags = 0;
953 } else {
954 return -EINVAL;
955 }
956
957 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
958 if (!*mem)
959 return -ENOMEM;
960 INIT_LIST_HEAD(&(*mem)->bo_va_list);
961 mutex_init(&(*mem)->lock);
962 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
963
964 /* Workaround for AQL queue wraparound bug. Map the same
965 * memory twice. That means we only actually allocate half
966 * the memory.
967 */
968 if ((*mem)->aql_queue)
969 size = size >> 1;
970
971 /* Workaround for TLB bug on older VI chips */
972 byte_align = (adev->family == AMDGPU_FAMILY_VI &&
973 adev->asic_type != CHIP_FIJI &&
974 adev->asic_type != CHIP_POLARIS10 &&
975 adev->asic_type != CHIP_POLARIS11) ?
976 VI_BO_SIZE_ALIGN : 1;
977
978 mapping_flags = AMDGPU_VM_PAGE_READABLE;
979 if (flags & ALLOC_MEM_FLAGS_WRITABLE)
980 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
981 if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
982 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
983 if (flags & ALLOC_MEM_FLAGS_COHERENT)
984 mapping_flags |= AMDGPU_VM_MTYPE_UC;
985 else
986 mapping_flags |= AMDGPU_VM_MTYPE_NC;
987 (*mem)->mapping_flags = mapping_flags;
988
989 amdgpu_sync_create(&(*mem)->sync);
990
991 ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
992 if (ret) {
993 pr_debug("Insufficient system memory\n");
994 goto err_reserve_system_mem;
995 }
996
997 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
998 va, size, domain_string(alloc_domain));
999
1000 ret = amdgpu_bo_create(adev, size, byte_align,
1001 alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
1002 if (ret) {
1003 pr_debug("Failed to create BO on domain %s. ret %d\n",
1004 domain_string(alloc_domain), ret);
1005 goto err_bo_create;
1006 }
1007 bo->kfd_bo = *mem;
1008 (*mem)->bo = bo;
1009
1010 (*mem)->va = va;
1011 (*mem)->domain = alloc_domain;
1012 (*mem)->mapped_to_gpu_memory = 0;
1013 (*mem)->process_info = kfd_vm->process_info;
1014 add_kgd_mem_to_kfd_bo_list(*mem, kfd_vm->process_info);
1015
1016 if (offset)
1017 *offset = amdgpu_bo_mmap_offset(bo);
1018
1019 return 0;
1020
1021err_bo_create:
1022 unreserve_system_mem_limit(adev, size, alloc_domain);
1023err_reserve_system_mem:
1024 mutex_destroy(&(*mem)->lock);
1025 kfree(*mem);
1026 return ret;
1027}
1028
1029int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1030 struct kgd_dev *kgd, struct kgd_mem *mem)
1031{
1032 struct amdkfd_process_info *process_info = mem->process_info;
1033 unsigned long bo_size = mem->bo->tbo.mem.size;
1034 struct kfd_bo_va_list *entry, *tmp;
1035 struct bo_vm_reservation_context ctx;
1036 struct ttm_validate_buffer *bo_list_entry;
1037 int ret;
1038
1039 mutex_lock(&mem->lock);
1040
1041 if (mem->mapped_to_gpu_memory > 0) {
1042 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1043 mem->va, bo_size);
1044 mutex_unlock(&mem->lock);
1045 return -EBUSY;
1046 }
1047
1048 mutex_unlock(&mem->lock);
1049 /* lock is not needed after this, since mem is unused and will
1050 * be freed anyway
1051 */
1052
1053 /* Make sure restore workers don't access the BO any more */
1054 bo_list_entry = &mem->validate_list;
1055 mutex_lock(&process_info->lock);
1056 list_del(&bo_list_entry->head);
1057 mutex_unlock(&process_info->lock);
1058
1059 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1060 if (unlikely(ret))
1061 return ret;
1062
1063 /* The eviction fence should be removed by the last unmap.
1064 * TODO: Log an error condition if the bo still has the eviction fence
1065 * attached
1066 */
1067 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1068 process_info->eviction_fence,
1069 NULL, NULL);
1070 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1071 mem->va + bo_size * (1 + mem->aql_queue));
1072
1073 /* Remove from VM internal data structures */
1074 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1075 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1076 entry, bo_size);
1077
1078 ret = unreserve_bo_and_vms(&ctx, false, false);
1079
1080 /* Free the sync object */
1081 amdgpu_sync_free(&mem->sync);
1082
1083 /* Free the BO*/
1084 amdgpu_bo_unref(&mem->bo);
1085 mutex_destroy(&mem->lock);
1086 kfree(mem);
1087
1088 return ret;
1089}
1090
1091int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1092 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1093{
1094 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1095 struct amdkfd_vm *kfd_vm = (struct amdkfd_vm *)vm;
1096 int ret;
1097 struct amdgpu_bo *bo;
1098 uint32_t domain;
1099 struct kfd_bo_va_list *entry;
1100 struct bo_vm_reservation_context ctx;
1101 struct kfd_bo_va_list *bo_va_entry = NULL;
1102 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1103 unsigned long bo_size;
1104
1105 /* Make sure restore is not running concurrently.
1106 */
1107 mutex_lock(&mem->process_info->lock);
1108
1109 mutex_lock(&mem->lock);
1110
1111 bo = mem->bo;
1112
1113 if (!bo) {
1114 pr_err("Invalid BO when mapping memory to GPU\n");
1115 ret = -EINVAL;
1116 goto out;
1117 }
1118
1119 domain = mem->domain;
1120 bo_size = bo->tbo.mem.size;
1121
1122 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1123 mem->va,
1124 mem->va + bo_size * (1 + mem->aql_queue),
1125 vm, domain_string(domain));
1126
1127 ret = reserve_bo_and_vm(mem, vm, &ctx);
1128 if (unlikely(ret))
1129 goto out;
1130
1131 if (check_if_add_bo_to_vm((struct amdgpu_vm *)vm, mem)) {
1132 ret = add_bo_to_vm(adev, mem, (struct amdgpu_vm *)vm, false,
1133 &bo_va_entry);
1134 if (ret)
1135 goto add_bo_to_vm_failed;
1136 if (mem->aql_queue) {
1137 ret = add_bo_to_vm(adev, mem, (struct amdgpu_vm *)vm,
1138 true, &bo_va_entry_aql);
1139 if (ret)
1140 goto add_bo_to_vm_failed_aql;
1141 }
1142 } else {
1143 ret = vm_validate_pt_pd_bos((struct amdkfd_vm *)vm);
1144 if (unlikely(ret))
1145 goto add_bo_to_vm_failed;
1146 }
1147
1148 if (mem->mapped_to_gpu_memory == 0) {
1149 /* Validate BO only once. The eviction fence gets added to BO
1150 * the first time it is mapped. Validate will wait for all
1151 * background evictions to complete.
1152 */
1153 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1154 if (ret) {
1155 pr_debug("Validate failed\n");
1156 goto map_bo_to_gpuvm_failed;
1157 }
1158 }
1159
1160 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1161 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1162 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1163 entry->va, entry->va + bo_size,
1164 entry);
1165
1166 ret = map_bo_to_gpuvm(adev, entry, ctx.sync);
1167 if (ret) {
1168 pr_err("Failed to map radeon bo to gpuvm\n");
1169 goto map_bo_to_gpuvm_failed;
1170 }
1171
1172 ret = vm_update_pds(vm, ctx.sync);
1173 if (ret) {
1174 pr_err("Failed to update page directories\n");
1175 goto map_bo_to_gpuvm_failed;
1176 }
1177
1178 entry->is_mapped = true;
1179 mem->mapped_to_gpu_memory++;
1180 pr_debug("\t INC mapping count %d\n",
1181 mem->mapped_to_gpu_memory);
1182 }
1183 }
1184
1185 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1186 amdgpu_bo_fence(bo,
1187 &kfd_vm->process_info->eviction_fence->base,
1188 true);
1189 ret = unreserve_bo_and_vms(&ctx, false, false);
1190
1191 goto out;
1192
1193map_bo_to_gpuvm_failed:
1194 if (bo_va_entry_aql)
1195 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1196add_bo_to_vm_failed_aql:
1197 if (bo_va_entry)
1198 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1199add_bo_to_vm_failed:
1200 unreserve_bo_and_vms(&ctx, false, false);
1201out:
1202 mutex_unlock(&mem->process_info->lock);
1203 mutex_unlock(&mem->lock);
1204 return ret;
1205}
1206
1207int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1208 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1209{
1210 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1211 struct amdkfd_process_info *process_info =
1212 ((struct amdkfd_vm *)vm)->process_info;
1213 unsigned long bo_size = mem->bo->tbo.mem.size;
1214 struct kfd_bo_va_list *entry;
1215 struct bo_vm_reservation_context ctx;
1216 int ret;
1217
1218 mutex_lock(&mem->lock);
1219
1220 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1221 if (unlikely(ret))
1222 goto out;
1223 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1224 if (ctx.n_vms == 0) {
1225 ret = -EINVAL;
1226 goto unreserve_out;
1227 }
1228
1229 ret = vm_validate_pt_pd_bos((struct amdkfd_vm *)vm);
1230 if (unlikely(ret))
1231 goto unreserve_out;
1232
1233 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1234 mem->va,
1235 mem->va + bo_size * (1 + mem->aql_queue),
1236 vm);
1237
1238 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1239 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1240 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1241 entry->va,
1242 entry->va + bo_size,
1243 entry);
1244
1245 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1246 if (ret == 0) {
1247 entry->is_mapped = false;
1248 } else {
1249 pr_err("failed to unmap VA 0x%llx\n",
1250 mem->va);
1251 goto unreserve_out;
1252 }
1253
1254 mem->mapped_to_gpu_memory--;
1255 pr_debug("\t DEC mapping count %d\n",
1256 mem->mapped_to_gpu_memory);
1257 }
1258 }
1259
1260 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1261 * required.
1262 */
1263 if (mem->mapped_to_gpu_memory == 0 &&
1264 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1265 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1266 process_info->eviction_fence,
1267 NULL, NULL);
1268
1269unreserve_out:
1270 unreserve_bo_and_vms(&ctx, false, false);
1271out:
1272 mutex_unlock(&mem->lock);
1273 return ret;
1274}
1275
1276int amdgpu_amdkfd_gpuvm_sync_memory(
1277 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1278{
1279 struct amdgpu_sync sync;
1280 int ret;
1281
1282 amdgpu_sync_create(&sync);
1283
1284 mutex_lock(&mem->lock);
1285 amdgpu_sync_clone(&mem->sync, &sync);
1286 mutex_unlock(&mem->lock);
1287
1288 ret = amdgpu_sync_wait(&sync, intr);
1289 amdgpu_sync_free(&sync);
1290 return ret;
1291}
1292
1293int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1294 struct kgd_mem *mem, void **kptr, uint64_t *size)
1295{
1296 int ret;
1297 struct amdgpu_bo *bo = mem->bo;
1298
1299 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1300 pr_err("userptr can't be mapped to kernel\n");
1301 return -EINVAL;
1302 }
1303
1304 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1305 * this BO in BO's restoring after eviction.
1306 */
1307 mutex_lock(&mem->process_info->lock);
1308
1309 ret = amdgpu_bo_reserve(bo, true);
1310 if (ret) {
1311 pr_err("Failed to reserve bo. ret %d\n", ret);
1312 goto bo_reserve_failed;
1313 }
1314
1315 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
1316 if (ret) {
1317 pr_err("Failed to pin bo. ret %d\n", ret);
1318 goto pin_failed;
1319 }
1320
1321 ret = amdgpu_bo_kmap(bo, kptr);
1322 if (ret) {
1323 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1324 goto kmap_failed;
1325 }
1326
1327 amdgpu_amdkfd_remove_eviction_fence(
1328 bo, mem->process_info->eviction_fence, NULL, NULL);
1329 list_del_init(&mem->validate_list.head);
1330
1331 if (size)
1332 *size = amdgpu_bo_size(bo);
1333
1334 amdgpu_bo_unreserve(bo);
1335
1336 mutex_unlock(&mem->process_info->lock);
1337 return 0;
1338
1339kmap_failed:
1340 amdgpu_bo_unpin(bo);
1341pin_failed:
1342 amdgpu_bo_unreserve(bo);
1343bo_reserve_failed:
1344 mutex_unlock(&mem->process_info->lock);
1345
1346 return ret;
1347}
1348
1349/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1350 * KFD process identified by process_info
1351 *
1352 * @process_info: amdkfd_process_info of the KFD process
1353 *
1354 * After memory eviction, restore thread calls this function. The function
1355 * should be called when the Process is still valid. BO restore involves -
1356 *
1357 * 1. Release old eviction fence and create new one
1358 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1359 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1360 * BOs that need to be reserved.
1361 * 4. Reserve all the BOs
1362 * 5. Validate of PD and PT BOs.
1363 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1364 * 7. Add fence to all PD and PT BOs.
1365 * 8. Unreserve all BOs
1366 */
1367int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1368{
1369 struct amdgpu_bo_list_entry *pd_bo_list;
1370 struct amdkfd_process_info *process_info = info;
1371 struct amdkfd_vm *peer_vm;
1372 struct kgd_mem *mem;
1373 struct bo_vm_reservation_context ctx;
1374 struct amdgpu_amdkfd_fence *new_fence;
1375 int ret = 0, i;
1376 struct list_head duplicate_save;
1377 struct amdgpu_sync sync_obj;
1378
1379 INIT_LIST_HEAD(&duplicate_save);
1380 INIT_LIST_HEAD(&ctx.list);
1381 INIT_LIST_HEAD(&ctx.duplicates);
1382
1383 pd_bo_list = kcalloc(process_info->n_vms,
1384 sizeof(struct amdgpu_bo_list_entry),
1385 GFP_KERNEL);
1386 if (!pd_bo_list)
1387 return -ENOMEM;
1388
1389 i = 0;
1390 mutex_lock(&process_info->lock);
1391 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1392 vm_list_node)
1393 amdgpu_vm_get_pd_bo(&peer_vm->base, &ctx.list,
1394 &pd_bo_list[i++]);
1395
1396 /* Reserve all BOs and page tables/directory. Add all BOs from
1397 * kfd_bo_list to ctx.list
1398 */
1399 list_for_each_entry(mem, &process_info->kfd_bo_list,
1400 validate_list.head) {
1401
1402 list_add_tail(&mem->resv_list.head, &ctx.list);
1403 mem->resv_list.bo = mem->validate_list.bo;
1404 mem->resv_list.shared = mem->validate_list.shared;
1405 }
1406
1407 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
1408 false, &duplicate_save);
1409 if (ret) {
1410 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
1411 goto ttm_reserve_fail;
1412 }
1413
1414 amdgpu_sync_create(&sync_obj);
1415
1416 /* Validate PDs and PTs */
1417 ret = process_validate_vms(process_info);
1418 if (ret)
1419 goto validate_map_fail;
1420
1421 /* Wait for PD/PTs validate to finish */
1422 /* FIXME: I think this isn't needed */
1423 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1424 vm_list_node) {
1425 struct amdgpu_bo *bo = peer_vm->base.root.base.bo;
1426
1427 ttm_bo_wait(&bo->tbo, false, false);
1428 }
1429
1430 /* Validate BOs and map them to GPUVM (update VM page tables). */
1431 list_for_each_entry(mem, &process_info->kfd_bo_list,
1432 validate_list.head) {
1433
1434 struct amdgpu_bo *bo = mem->bo;
1435 uint32_t domain = mem->domain;
1436 struct kfd_bo_va_list *bo_va_entry;
1437
1438 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
1439 if (ret) {
1440 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
1441 goto validate_map_fail;
1442 }
1443
1444 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
1445 bo_list) {
1446 ret = update_gpuvm_pte((struct amdgpu_device *)
1447 bo_va_entry->kgd_dev,
1448 bo_va_entry,
1449 &sync_obj);
1450 if (ret) {
1451 pr_debug("Memory eviction: update PTE failed. Try again\n");
1452 goto validate_map_fail;
1453 }
1454 }
1455 }
1456
1457 /* Update page directories */
1458 ret = process_update_pds(process_info, &sync_obj);
1459 if (ret) {
1460 pr_debug("Memory eviction: update PDs failed. Try again\n");
1461 goto validate_map_fail;
1462 }
1463
1464 amdgpu_sync_wait(&sync_obj, false);
1465
1466 /* Release old eviction fence and create new one, because fence only
1467 * goes from unsignaled to signaled, fence cannot be reused.
1468 * Use context and mm from the old fence.
1469 */
1470 new_fence = amdgpu_amdkfd_fence_create(
1471 process_info->eviction_fence->base.context,
1472 process_info->eviction_fence->mm);
1473 if (!new_fence) {
1474 pr_err("Failed to create eviction fence\n");
1475 ret = -ENOMEM;
1476 goto validate_map_fail;
1477 }
1478 dma_fence_put(&process_info->eviction_fence->base);
1479 process_info->eviction_fence = new_fence;
1480 *ef = dma_fence_get(&new_fence->base);
1481
1482 /* Wait for validate to finish and attach new eviction fence */
1483 list_for_each_entry(mem, &process_info->kfd_bo_list,
1484 validate_list.head)
1485 ttm_bo_wait(&mem->bo->tbo, false, false);
1486 list_for_each_entry(mem, &process_info->kfd_bo_list,
1487 validate_list.head)
1488 amdgpu_bo_fence(mem->bo,
1489 &process_info->eviction_fence->base, true);
1490
1491 /* Attach eviction fence to PD / PT BOs */
1492 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1493 vm_list_node) {
1494 struct amdgpu_bo *bo = peer_vm->base.root.base.bo;
1495
1496 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
1497 }
1498
1499validate_map_fail:
1500 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
1501 amdgpu_sync_free(&sync_obj);
1502ttm_reserve_fail:
1503 mutex_unlock(&process_info->lock);
1504 kfree(pd_bo_list);
1505 return ret;
1506}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ff8efd0f8fd5..a0f48cb9b8f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -114,6 +114,9 @@ union igp_info {
114 struct atom_integrated_system_info_v1_11 v11; 114 struct atom_integrated_system_info_v1_11 v11;
115}; 115};
116 116
117union umc_info {
118 struct atom_umc_info_v3_1 v31;
119};
117/* 120/*
118 * Return vram width from integrated system info table, if available, 121 * Return vram width from integrated system info table, if available,
119 * or 0 if not. 122 * or 0 if not.
@@ -143,6 +146,94 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
143 return 0; 146 return 0;
144} 147}
145 148
149static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
150 int atom_mem_type)
151{
152 int vram_type;
153
154 if (adev->flags & AMD_IS_APU) {
155 switch (atom_mem_type) {
156 case Ddr2MemType:
157 case LpDdr2MemType:
158 vram_type = AMDGPU_VRAM_TYPE_DDR2;
159 break;
160 case Ddr3MemType:
161 case LpDdr3MemType:
162 vram_type = AMDGPU_VRAM_TYPE_DDR3;
163 break;
164 case Ddr4MemType:
165 case LpDdr4MemType:
166 vram_type = AMDGPU_VRAM_TYPE_DDR4;
167 break;
168 default:
169 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
170 break;
171 }
172 } else {
173 switch (atom_mem_type) {
174 case ATOM_DGPU_VRAM_TYPE_GDDR5:
175 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
176 break;
177 case ATOM_DGPU_VRAM_TYPE_HBM:
178 vram_type = AMDGPU_VRAM_TYPE_HBM;
179 break;
180 default:
181 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
182 break;
183 }
184 }
185
186 return vram_type;
187}
188/*
189 * Return vram type from either integrated system info table
190 * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
191 */
192int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
193{
194 struct amdgpu_mode_info *mode_info = &adev->mode_info;
195 int index;
196 u16 data_offset, size;
197 union igp_info *igp_info;
198 union umc_info *umc_info;
199 u8 frev, crev;
200 u8 mem_type;
201
202 if (adev->flags & AMD_IS_APU)
203 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
204 integratedsysteminfo);
205 else
206 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
207 umc_info);
208 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
209 index, &size,
210 &frev, &crev, &data_offset)) {
211 if (adev->flags & AMD_IS_APU) {
212 igp_info = (union igp_info *)
213 (mode_info->atom_context->bios + data_offset);
214 switch (crev) {
215 case 11:
216 mem_type = igp_info->v11.memorytype;
217 return convert_atom_mem_type_to_vram_type(adev, mem_type);
218 default:
219 return 0;
220 }
221 } else {
222 umc_info = (union umc_info *)
223 (mode_info->atom_context->bios + data_offset);
224 switch (crev) {
225 case 1:
226 mem_type = umc_info->v31.vram_type;
227 return convert_atom_mem_type_to_vram_type(adev, mem_type);
228 default:
229 return 0;
230 }
231 }
232 }
233
234 return 0;
235}
236
146union firmware_info { 237union firmware_info {
147 struct atom_firmware_info_v3_1 v31; 238 struct atom_firmware_info_v3_1 v31;
148}; 239};
@@ -151,10 +242,6 @@ union smu_info {
151 struct atom_smu_info_v3_1 v31; 242 struct atom_smu_info_v3_1 v31;
152}; 243};
153 244
154union umc_info {
155 struct atom_umc_info_v3_1 v31;
156};
157
158int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) 245int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
159{ 246{
160 struct amdgpu_mode_info *mode_info = &adev->mode_info; 247 struct amdgpu_mode_info *mode_info = &adev->mode_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 288b97e54347..7689c961c4ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -28,6 +28,7 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
28void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); 28void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
29int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); 29int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
30int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); 30int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
31int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
31int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); 32int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
32 33
33#endif 34#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index e2c3c5ec42d1..c53095b3b0fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
568 /* HG _PR3 doesn't seem to work on this A+A weston board */ 568 /* HG _PR3 doesn't seem to work on this A+A weston board */
569 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, 569 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
570 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 570 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
571 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
571 { 0, 0, 0, 0, 0 }, 572 { 0, 0, 0, 0, 0 },
572}; 573};
573 574
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 63ec1e1bb6aa..02b849be083b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
80 int time; 80 int time;
81 81
82 n = AMDGPU_BENCHMARK_ITERATIONS; 82 n = AMDGPU_BENCHMARK_ITERATIONS;
83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, 83 r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
84 NULL, 0, &sobj); 84 ttm_bo_type_kernel, NULL, &sobj);
85 if (r) { 85 if (r) {
86 goto out_cleanup; 86 goto out_cleanup;
87 } 87 }
@@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
93 if (r) { 93 if (r) {
94 goto out_cleanup; 94 goto out_cleanup;
95 } 95 }
96 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, 96 r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
97 NULL, 0, &dobj); 97 ttm_bo_type_kernel, NULL, &dobj);
98 if (r) { 98 if (r) {
99 goto out_cleanup; 99 goto out_cleanup;
100 } 100 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 59089e027f4d..92be7f6de197 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
233 for (i = 0; i < list->num_entries; i++) { 233 for (i = 0; i < list->num_entries; i++) {
234 unsigned priority = list->array[i].priority; 234 unsigned priority = list->array[i].priority;
235 235
236 list_add_tail(&list->array[i].tv.head, 236 if (!list->array[i].robj->parent)
237 &bucket[priority]); 237 list_add_tail(&list->array[i].tv.head,
238 &bucket[priority]);
239
238 list->array[i].user_pages = NULL; 240 list->array[i].user_pages = NULL;
239 } 241 }
240 242
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 4466f3535e2d..37098c68a645 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -24,7 +24,6 @@
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/acpi.h>
28#include <drm/drmP.h> 27#include <drm/drmP.h>
29#include <linux/firmware.h> 28#include <linux/firmware.h>
30#include <drm/amdgpu_drm.h> 29#include <drm/amdgpu_drm.h>
@@ -42,152 +41,6 @@ struct amdgpu_cgs_device {
42 struct amdgpu_device *adev = \ 41 struct amdgpu_device *adev = \
43 ((struct amdgpu_cgs_device *)cgs_device)->adev 42 ((struct amdgpu_cgs_device *)cgs_device)->adev
44 43
45static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
46 int (*call_back_func)(struct amd_pp_init *, void **))
47{
48 CGS_FUNC_ADEV;
49 struct amd_pp_init pp_init;
50 struct amd_powerplay *amd_pp;
51
52 if (call_back_func == NULL)
53 return NULL;
54
55 amd_pp = &(adev->powerplay);
56 pp_init.chip_family = adev->family;
57 pp_init.chip_id = adev->asic_type;
58 pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
59 pp_init.feature_mask = amdgpu_pp_feature_mask;
60 pp_init.device = cgs_device;
61 if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
62 return NULL;
63
64 return adev->powerplay.pp_handle;
65}
66
67static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
68 enum cgs_gpu_mem_type type,
69 uint64_t size, uint64_t align,
70 cgs_handle_t *handle)
71{
72 CGS_FUNC_ADEV;
73 uint16_t flags = 0;
74 int ret = 0;
75 uint32_t domain = 0;
76 struct amdgpu_bo *obj;
77
78 /* fail if the alignment is not a power of 2 */
79 if (((align != 1) && (align & (align - 1)))
80 || size == 0 || align == 0)
81 return -EINVAL;
82
83
84 switch(type) {
85 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
86 case CGS_GPU_MEM_TYPE__VISIBLE_FB:
87 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
88 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
89 domain = AMDGPU_GEM_DOMAIN_VRAM;
90 break;
91 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
92 case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
93 flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
94 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
95 domain = AMDGPU_GEM_DOMAIN_VRAM;
96 break;
97 case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
98 domain = AMDGPU_GEM_DOMAIN_GTT;
99 break;
100 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
101 flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
102 domain = AMDGPU_GEM_DOMAIN_GTT;
103 break;
104 default:
105 return -EINVAL;
106 }
107
108
109 *handle = 0;
110
111 ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
112 NULL, NULL, 0, &obj);
113 if (ret) {
114 DRM_ERROR("(%d) bo create failed\n", ret);
115 return ret;
116 }
117 *handle = (cgs_handle_t)obj;
118
119 return ret;
120}
121
122static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
123{
124 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
125
126 if (obj) {
127 int r = amdgpu_bo_reserve(obj, true);
128 if (likely(r == 0)) {
129 amdgpu_bo_kunmap(obj);
130 amdgpu_bo_unpin(obj);
131 amdgpu_bo_unreserve(obj);
132 }
133 amdgpu_bo_unref(&obj);
134
135 }
136 return 0;
137}
138
139static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
140 uint64_t *mcaddr)
141{
142 int r;
143 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
144
145 WARN_ON_ONCE(obj->placement.num_placement > 1);
146
147 r = amdgpu_bo_reserve(obj, true);
148 if (unlikely(r != 0))
149 return r;
150 r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
151 amdgpu_bo_unreserve(obj);
152 return r;
153}
154
155static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
156{
157 int r;
158 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
159 r = amdgpu_bo_reserve(obj, true);
160 if (unlikely(r != 0))
161 return r;
162 r = amdgpu_bo_unpin(obj);
163 amdgpu_bo_unreserve(obj);
164 return r;
165}
166
167static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
168 void **map)
169{
170 int r;
171 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
172 r = amdgpu_bo_reserve(obj, true);
173 if (unlikely(r != 0))
174 return r;
175 r = amdgpu_bo_kmap(obj, map);
176 amdgpu_bo_unreserve(obj);
177 return r;
178}
179
180static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
181{
182 int r;
183 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
184 r = amdgpu_bo_reserve(obj, true);
185 if (unlikely(r != 0))
186 return r;
187 amdgpu_bo_kunmap(obj);
188 amdgpu_bo_unreserve(obj);
189 return r;
190}
191 44
192static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset) 45static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
193{ 46{
@@ -801,11 +654,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
801 else 654 else
802 strcpy(fw_name, "amdgpu/vega10_smc.bin"); 655 strcpy(fw_name, "amdgpu/vega10_smc.bin");
803 break; 656 break;
804 case CHIP_CARRIZO:
805 case CHIP_STONEY:
806 case CHIP_RAVEN:
807 adev->pm.fw_version = info->version;
808 return 0;
809 default: 657 default:
810 DRM_ERROR("SMC firmware not supported\n"); 658 DRM_ERROR("SMC firmware not supported\n");
811 return -EINVAL; 659 return -EINVAL;
@@ -857,61 +705,6 @@ static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
857 return amdgpu_sriov_vf(adev); 705 return amdgpu_sriov_vf(adev);
858} 706}
859 707
860static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
861 struct cgs_system_info *sys_info)
862{
863 CGS_FUNC_ADEV;
864
865 if (NULL == sys_info)
866 return -ENODEV;
867
868 if (sizeof(struct cgs_system_info) != sys_info->size)
869 return -ENODEV;
870
871 switch (sys_info->info_id) {
872 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
873 sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
874 break;
875 case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
876 sys_info->value = adev->pm.pcie_gen_mask;
877 break;
878 case CGS_SYSTEM_INFO_PCIE_MLW:
879 sys_info->value = adev->pm.pcie_mlw_mask;
880 break;
881 case CGS_SYSTEM_INFO_PCIE_DEV:
882 sys_info->value = adev->pdev->device;
883 break;
884 case CGS_SYSTEM_INFO_PCIE_REV:
885 sys_info->value = adev->pdev->revision;
886 break;
887 case CGS_SYSTEM_INFO_CG_FLAGS:
888 sys_info->value = adev->cg_flags;
889 break;
890 case CGS_SYSTEM_INFO_PG_FLAGS:
891 sys_info->value = adev->pg_flags;
892 break;
893 case CGS_SYSTEM_INFO_GFX_CU_INFO:
894 sys_info->value = adev->gfx.cu_info.number;
895 break;
896 case CGS_SYSTEM_INFO_GFX_SE_INFO:
897 sys_info->value = adev->gfx.config.max_shader_engines;
898 break;
899 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
900 sys_info->value = adev->pdev->subsystem_device;
901 break;
902 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
903 sys_info->value = adev->pdev->subsystem_vendor;
904 break;
905 case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
906 sys_info->value = adev->pdev->devfn;
907 break;
908 default:
909 return -ENODEV;
910 }
911
912 return 0;
913}
914
915static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, 708static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
916 struct cgs_display_info *info) 709 struct cgs_display_info *info)
917{ 710{
@@ -953,6 +746,11 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
953 (amdgpu_crtc->v_border * 2); 746 (amdgpu_crtc->v_border * 2);
954 mode_info->vblank_time_us = vblank_lines * line_time_us; 747 mode_info->vblank_time_us = vblank_lines * line_time_us;
955 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 748 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
749 /* we have issues with mclk switching with refresh rates
750 * over 120 hz on the non-DC code.
751 */
752 if (mode_info->refresh_rate > 120)
753 mode_info->vblank_time_us = 0;
956 mode_info = NULL; 754 mode_info = NULL;
957 } 755 }
958 } 756 }
@@ -977,223 +775,7 @@ static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool ena
977 return 0; 775 return 0;
978} 776}
979 777
980/** \brief evaluate acpi namespace object, handle or pathname must be valid
981 * \param cgs_device
982 * \param info input/output arguments for the control method
983 * \return status
984 */
985
986#if defined(CONFIG_ACPI)
987static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
988 struct cgs_acpi_method_info *info)
989{
990 CGS_FUNC_ADEV;
991 acpi_handle handle;
992 struct acpi_object_list input;
993 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
994 union acpi_object *params, *obj;
995 uint8_t name[5] = {'\0'};
996 struct cgs_acpi_method_argument *argument;
997 uint32_t i, count;
998 acpi_status status;
999 int result;
1000
1001 handle = ACPI_HANDLE(&adev->pdev->dev);
1002 if (!handle)
1003 return -ENODEV;
1004
1005 memset(&input, 0, sizeof(struct acpi_object_list));
1006
1007 /* validate input info */
1008 if (info->size != sizeof(struct cgs_acpi_method_info))
1009 return -EINVAL;
1010
1011 input.count = info->input_count;
1012 if (info->input_count > 0) {
1013 if (info->pinput_argument == NULL)
1014 return -EINVAL;
1015 argument = info->pinput_argument;
1016 for (i = 0; i < info->input_count; i++) {
1017 if (((argument->type == ACPI_TYPE_STRING) ||
1018 (argument->type == ACPI_TYPE_BUFFER)) &&
1019 (argument->pointer == NULL))
1020 return -EINVAL;
1021 argument++;
1022 }
1023 }
1024
1025 if (info->output_count > 0) {
1026 if (info->poutput_argument == NULL)
1027 return -EINVAL;
1028 argument = info->poutput_argument;
1029 for (i = 0; i < info->output_count; i++) {
1030 if (((argument->type == ACPI_TYPE_STRING) ||
1031 (argument->type == ACPI_TYPE_BUFFER))
1032 && (argument->pointer == NULL))
1033 return -EINVAL;
1034 argument++;
1035 }
1036 }
1037
1038 /* The path name passed to acpi_evaluate_object should be null terminated */
1039 if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
1040 strncpy(name, (char *)&(info->name), sizeof(uint32_t));
1041 name[4] = '\0';
1042 }
1043
1044 /* parse input parameters */
1045 if (input.count > 0) {
1046 input.pointer = params =
1047 kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
1048 if (params == NULL)
1049 return -EINVAL;
1050
1051 argument = info->pinput_argument;
1052
1053 for (i = 0; i < input.count; i++) {
1054 params->type = argument->type;
1055 switch (params->type) {
1056 case ACPI_TYPE_INTEGER:
1057 params->integer.value = argument->value;
1058 break;
1059 case ACPI_TYPE_STRING:
1060 params->string.length = argument->data_length;
1061 params->string.pointer = argument->pointer;
1062 break;
1063 case ACPI_TYPE_BUFFER:
1064 params->buffer.length = argument->data_length;
1065 params->buffer.pointer = argument->pointer;
1066 break;
1067 default:
1068 break;
1069 }
1070 params++;
1071 argument++;
1072 }
1073 }
1074
1075 /* parse output info */
1076 count = info->output_count;
1077 argument = info->poutput_argument;
1078
1079 /* evaluate the acpi method */
1080 status = acpi_evaluate_object(handle, name, &input, &output);
1081
1082 if (ACPI_FAILURE(status)) {
1083 result = -EIO;
1084 goto free_input;
1085 }
1086
1087 /* return the output info */
1088 obj = output.pointer;
1089
1090 if (count > 1) {
1091 if ((obj->type != ACPI_TYPE_PACKAGE) ||
1092 (obj->package.count != count)) {
1093 result = -EIO;
1094 goto free_obj;
1095 }
1096 params = obj->package.elements;
1097 } else
1098 params = obj;
1099
1100 if (params == NULL) {
1101 result = -EIO;
1102 goto free_obj;
1103 }
1104
1105 for (i = 0; i < count; i++) {
1106 if (argument->type != params->type) {
1107 result = -EIO;
1108 goto free_obj;
1109 }
1110 switch (params->type) {
1111 case ACPI_TYPE_INTEGER:
1112 argument->value = params->integer.value;
1113 break;
1114 case ACPI_TYPE_STRING:
1115 if ((params->string.length != argument->data_length) ||
1116 (params->string.pointer == NULL)) {
1117 result = -EIO;
1118 goto free_obj;
1119 }
1120 strncpy(argument->pointer,
1121 params->string.pointer,
1122 params->string.length);
1123 break;
1124 case ACPI_TYPE_BUFFER:
1125 if (params->buffer.pointer == NULL) {
1126 result = -EIO;
1127 goto free_obj;
1128 }
1129 memcpy(argument->pointer,
1130 params->buffer.pointer,
1131 argument->data_length);
1132 break;
1133 default:
1134 break;
1135 }
1136 argument++;
1137 params++;
1138 }
1139
1140 result = 0;
1141free_obj:
1142 kfree(obj);
1143free_input:
1144 kfree((void *)input.pointer);
1145 return result;
1146}
1147#else
1148static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1149 struct cgs_acpi_method_info *info)
1150{
1151 return -EIO;
1152}
1153#endif
1154
1155static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1156 uint32_t acpi_method,
1157 uint32_t acpi_function,
1158 void *pinput, void *poutput,
1159 uint32_t output_count,
1160 uint32_t input_size,
1161 uint32_t output_size)
1162{
1163 struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1164 struct cgs_acpi_method_argument acpi_output = {0};
1165 struct cgs_acpi_method_info info = {0};
1166
1167 acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1168 acpi_input[0].data_length = sizeof(uint32_t);
1169 acpi_input[0].value = acpi_function;
1170
1171 acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1172 acpi_input[1].data_length = input_size;
1173 acpi_input[1].pointer = pinput;
1174
1175 acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1176 acpi_output.data_length = output_size;
1177 acpi_output.pointer = poutput;
1178
1179 info.size = sizeof(struct cgs_acpi_method_info);
1180 info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1181 info.input_count = 2;
1182 info.name = acpi_method;
1183 info.pinput_argument = acpi_input;
1184 info.output_count = output_count;
1185 info.poutput_argument = &acpi_output;
1186
1187 return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1188}
1189
1190static const struct cgs_ops amdgpu_cgs_ops = { 778static const struct cgs_ops amdgpu_cgs_ops = {
1191 .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
1192 .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
1193 .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
1194 .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
1195 .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
1196 .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
1197 .read_register = amdgpu_cgs_read_register, 779 .read_register = amdgpu_cgs_read_register,
1198 .write_register = amdgpu_cgs_write_register, 780 .write_register = amdgpu_cgs_write_register,
1199 .read_ind_register = amdgpu_cgs_read_ind_register, 781 .read_ind_register = amdgpu_cgs_read_ind_register,
@@ -1208,12 +790,9 @@ static const struct cgs_ops amdgpu_cgs_ops = {
1208 .set_clockgating_state = amdgpu_cgs_set_clockgating_state, 790 .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
1209 .get_active_displays_info = amdgpu_cgs_get_active_displays_info, 791 .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
1210 .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled, 792 .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
1211 .call_acpi_method = amdgpu_cgs_call_acpi_method,
1212 .query_system_info = amdgpu_cgs_query_system_info,
1213 .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, 793 .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
1214 .enter_safe_mode = amdgpu_cgs_enter_safe_mode, 794 .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
1215 .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, 795 .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
1216 .register_pp_handle = amdgpu_cgs_register_pp_handle,
1217}; 796};
1218 797
1219static const struct cgs_os_ops amdgpu_cgs_os_ops = { 798static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8ca3783f2deb..9da8d5802980 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
69 /* don't do anything if sink is not display port, i.e., 69 /* don't do anything if sink is not display port, i.e.,
70 * passive dp->(dvi|hdmi) adaptor 70 * passive dp->(dvi|hdmi) adaptor
71 */ 71 */
72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
73 int saved_dpms = connector->dpms; 73 amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
74 /* Only turn off the display if it's physically disconnected */ 74 amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
75 if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 75 /* Don't start link training before we have the DPCD */
76 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 76 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 return;
78 /* Don't try to start link training before we 78
79 * have the dpcd */ 79 /* Turn the connector off and back on immediately, which
80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 * will trigger link training
81 return; 81 */
82 82 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
83 /* set it to OFF so that drm_helper_connector_dpms() 83 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
84 * won't return immediately since the current state
85 * is ON at this point.
86 */
87 connector->dpms = DRM_MODE_DPMS_OFF;
88 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
89 }
90 connector->dpms = saved_dpms;
91 } 84 }
92 } 85 }
93} 86}
@@ -877,7 +870,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
877 ret = connector_status_disconnected; 870 ret = connector_status_disconnected;
878 871
879 if (amdgpu_connector->ddc_bus) 872 if (amdgpu_connector->ddc_bus)
880 dret = amdgpu_ddc_probe(amdgpu_connector, false); 873 dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
881 if (dret) { 874 if (dret) {
882 amdgpu_connector->detected_by_load = false; 875 amdgpu_connector->detected_by_load = false;
883 amdgpu_connector_free_edid(connector); 876 amdgpu_connector_free_edid(connector);
@@ -998,7 +991,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
998 } 991 }
999 992
1000 if (amdgpu_connector->ddc_bus) 993 if (amdgpu_connector->ddc_bus)
1001 dret = amdgpu_ddc_probe(amdgpu_connector, false); 994 dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
1002 if (dret) { 995 if (dret) {
1003 amdgpu_connector->detected_by_load = false; 996 amdgpu_connector->detected_by_load = false;
1004 amdgpu_connector_free_edid(connector); 997 amdgpu_connector_free_edid(connector);
@@ -1401,7 +1394,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1401 /* setup ddc on the bridge */ 1394 /* setup ddc on the bridge */
1402 amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder); 1395 amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
1403 /* bridge chips are always aux */ 1396 /* bridge chips are always aux */
1404 if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */ 1397 /* try DDC */
1398 if (amdgpu_display_ddc_probe(amdgpu_connector, true))
1405 ret = connector_status_connected; 1399 ret = connector_status_connected;
1406 else if (amdgpu_connector->dac_load_detect) { /* try load detection */ 1400 else if (amdgpu_connector->dac_load_detect) { /* try load detection */
1407 const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 1401 const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1421,7 +1415,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1421 ret = connector_status_connected; 1415 ret = connector_status_connected;
1422 } else { 1416 } else {
1423 /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ 1417 /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
1424 if (amdgpu_ddc_probe(amdgpu_connector, false)) 1418 if (amdgpu_display_ddc_probe(amdgpu_connector,
1419 false))
1425 ret = connector_status_connected; 1420 ret = connector_status_connected;
1426 } 1421 }
1427 } 1422 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e80fc38141b5..dc34b50e6b29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
257 return; 257 return;
258 } 258 }
259 259
260 total_vram = adev->mc.real_vram_size - adev->vram_pin_size; 260 total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
261 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 261 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
262 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 262 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
263 263
@@ -302,8 +302,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
302 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 302 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
303 303
304 /* Do the same for visible VRAM if half of it is free */ 304 /* Do the same for visible VRAM if half of it is free */
305 if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { 305 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
306 u64 total_vis_vram = adev->mc.visible_vram_size; 306 u64 total_vis_vram = adev->gmc.visible_vram_size;
307 u64 used_vis_vram = 307 u64 used_vis_vram =
308 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 308 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
309 309
@@ -346,8 +346,8 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
346 struct ttm_operation_ctx ctx = { 346 struct ttm_operation_ctx ctx = {
347 .interruptible = true, 347 .interruptible = true,
348 .no_wait_gpu = false, 348 .no_wait_gpu = false,
349 .allow_reserved_eviction = false, 349 .resv = bo->tbo.resv,
350 .resv = bo->tbo.resv 350 .flags = 0
351 }; 351 };
352 uint32_t domain; 352 uint32_t domain;
353 int r; 353 int r;
@@ -359,7 +359,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
359 * to move it. Don't move anything if the threshold is zero. 359 * to move it. Don't move anything if the threshold is zero.
360 */ 360 */
361 if (p->bytes_moved < p->bytes_moved_threshold) { 361 if (p->bytes_moved < p->bytes_moved_threshold) {
362 if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 362 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
363 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 363 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
364 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 364 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
365 * visible VRAM if we've depleted our allowance to do 365 * visible VRAM if we've depleted our allowance to do
@@ -381,9 +381,9 @@ retry:
381 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 381 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
382 382
383 p->bytes_moved += ctx.bytes_moved; 383 p->bytes_moved += ctx.bytes_moved;
384 if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 384 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
385 bo->tbo.mem.mem_type == TTM_PL_VRAM && 385 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
386 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) 386 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
387 p->bytes_moved_vis += ctx.bytes_moved; 387 p->bytes_moved_vis += ctx.bytes_moved;
388 388
389 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 389 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -437,9 +437,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
437 /* Good we can try to move this BO somewhere else */ 437 /* Good we can try to move this BO somewhere else */
438 amdgpu_ttm_placement_from_domain(bo, other); 438 amdgpu_ttm_placement_from_domain(bo, other);
439 update_bytes_moved_vis = 439 update_bytes_moved_vis =
440 adev->mc.visible_vram_size < adev->mc.real_vram_size && 440 adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
441 bo->tbo.mem.mem_type == TTM_PL_VRAM && 441 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
442 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; 442 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
443 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 443 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
444 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 444 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
445 bytes_moved = atomic64_read(&adev->num_bytes_moved) - 445 bytes_moved = atomic64_read(&adev->num_bytes_moved) -
@@ -542,7 +542,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
542 INIT_LIST_HEAD(&duplicates); 542 INIT_LIST_HEAD(&duplicates);
543 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 543 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
544 544
545 if (p->uf_entry.robj) 545 if (p->uf_entry.robj && !p->uf_entry.robj->parent)
546 list_add(&p->uf_entry.tv.head, &p->validated); 546 list_add(&p->uf_entry.tv.head, &p->validated);
547 547
548 while (1) { 548 while (1) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index ee76b468774a..369beb5041a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -767,10 +767,21 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
767 return 0; 767 return 0;
768} 768}
769 769
770static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
771{
772 struct drm_info_node *node = (struct drm_info_node *)m->private;
773 struct drm_device *dev = node->minor->dev;
774 struct amdgpu_device *adev = dev->dev_private;
775
776 seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
777 return 0;
778}
779
770static const struct drm_info_list amdgpu_debugfs_list[] = { 780static const struct drm_info_list amdgpu_debugfs_list[] = {
771 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, 781 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
772 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, 782 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
773 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram} 783 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
784 {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
774}; 785};
775 786
776int amdgpu_debugfs_init(struct amdgpu_device *adev) 787int amdgpu_debugfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 00a50cc5ec9a..690cf77b950e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -87,6 +87,8 @@ static const char *amdgpu_asic_name[] = {
87 "LAST", 87 "LAST",
88}; 88};
89 89
90static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
91
90bool amdgpu_device_is_px(struct drm_device *dev) 92bool amdgpu_device_is_px(struct drm_device *dev)
91{ 93{
92 struct amdgpu_device *adev = dev->dev_private; 94 struct amdgpu_device *adev = dev->dev_private;
@@ -121,6 +123,32 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
121 return ret; 123 return ret;
122} 124}
123 125
126/*
127 * MMIO register read with bytes helper functions
128 * @offset:bytes offset from MMIO start
129 *
130*/
131
132uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
133 if (offset < adev->rmmio_size)
134 return (readb(adev->rmmio + offset));
135 BUG();
136}
137
138/*
139 * MMIO register write with bytes helper functions
140 * @offset:bytes offset from MMIO start
141 * @value: the value want to be written to the register
142 *
143*/
144void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
145 if (offset < adev->rmmio_size)
146 writeb(value, adev->rmmio + offset);
147 else
148 BUG();
149}
150
151
124void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 152void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
125 uint32_t acc_flags) 153 uint32_t acc_flags)
126{ 154{
@@ -492,7 +520,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 520 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
493 521
494 /* clear wb memory */ 522 /* clear wb memory */
495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); 523 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
496 } 524 }
497 525
498 return 0; 526 return 0;
@@ -530,8 +558,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
530 */ 558 */
531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 559void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
532{ 560{
561 wb >>= 3;
533 if (wb < adev->wb.num_wb) 562 if (wb < adev->wb.num_wb)
534 __clear_bit(wb >> 3, adev->wb.used); 563 __clear_bit(wb, adev->wb.used);
535} 564}
536 565
537/** 566/**
@@ -544,7 +573,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
544 * as parameter. 573 * as parameter.
545 */ 574 */
546void amdgpu_device_vram_location(struct amdgpu_device *adev, 575void amdgpu_device_vram_location(struct amdgpu_device *adev,
547 struct amdgpu_mc *mc, u64 base) 576 struct amdgpu_gmc *mc, u64 base)
548{ 577{
549 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; 578 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
550 579
@@ -570,11 +599,11 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
570 * FIXME: when reducing GTT size align new size on power of 2. 599 * FIXME: when reducing GTT size align new size on power of 2.
571 */ 600 */
572void amdgpu_device_gart_location(struct amdgpu_device *adev, 601void amdgpu_device_gart_location(struct amdgpu_device *adev,
573 struct amdgpu_mc *mc) 602 struct amdgpu_gmc *mc)
574{ 603{
575 u64 size_af, size_bf; 604 u64 size_af, size_bf;
576 605
577 size_af = adev->mc.mc_mask - mc->vram_end; 606 size_af = adev->gmc.mc_mask - mc->vram_end;
578 size_bf = mc->vram_start; 607 size_bf = mc->vram_start;
579 if (size_bf > size_af) { 608 if (size_bf > size_af) {
580 if (mc->gart_size > size_bf) { 609 if (mc->gart_size > size_bf) {
@@ -608,7 +637,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
608 */ 637 */
609int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 638int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
610{ 639{
611 u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size); 640 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
612 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; 641 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
613 struct pci_bus *root; 642 struct pci_bus *root;
614 struct resource *res; 643 struct resource *res;
@@ -829,6 +858,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
829 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n"); 858 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
830 amdgpu_lockup_timeout = 10000; 859 amdgpu_lockup_timeout = 10000;
831 } 860 }
861
862 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
832} 863}
833 864
834/** 865/**
@@ -1036,7 +1067,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1036 if (!ip_block_version) 1067 if (!ip_block_version)
1037 return -EINVAL; 1068 return -EINVAL;
1038 1069
1039 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks, 1070 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1040 ip_block_version->funcs->name); 1071 ip_block_version->funcs->name);
1041 1072
1042 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1073 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
@@ -1310,6 +1341,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1310 return r; 1341 return r;
1311 } 1342 }
1312 adev->ip_blocks[i].status.sw = true; 1343 adev->ip_blocks[i].status.sw = true;
1344
1313 /* need to do gmc hw init early so we can allocate gpu mem */ 1345 /* need to do gmc hw init early so we can allocate gpu mem */
1314 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1346 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1315 r = amdgpu_device_vram_scratch_init(adev); 1347 r = amdgpu_device_vram_scratch_init(adev);
@@ -1343,8 +1375,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1343 for (i = 0; i < adev->num_ip_blocks; i++) { 1375 for (i = 0; i < adev->num_ip_blocks; i++) {
1344 if (!adev->ip_blocks[i].status.sw) 1376 if (!adev->ip_blocks[i].status.sw)
1345 continue; 1377 continue;
1346 /* gmc hw init is done early */ 1378 if (adev->ip_blocks[i].status.hw)
1347 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1348 continue; 1379 continue;
1349 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1380 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1350 if (r) { 1381 if (r) {
@@ -1378,12 +1409,16 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1378{ 1409{
1379 int i = 0, r; 1410 int i = 0, r;
1380 1411
1412 if (amdgpu_emu_mode == 1)
1413 return 0;
1414
1381 for (i = 0; i < adev->num_ip_blocks; i++) { 1415 for (i = 0; i < adev->num_ip_blocks; i++) {
1382 if (!adev->ip_blocks[i].status.valid) 1416 if (!adev->ip_blocks[i].status.valid)
1383 continue; 1417 continue;
1384 /* skip CG for VCE/UVD, it's handled specially */ 1418 /* skip CG for VCE/UVD, it's handled specially */
1385 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1419 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1386 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1420 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1421 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1387 /* enable clockgating to save power */ 1422 /* enable clockgating to save power */
1388 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1423 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1389 AMD_CG_STATE_GATE); 1424 AMD_CG_STATE_GATE);
@@ -1432,7 +1467,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1432 for (i = 0; i < adev->num_ip_blocks; i++) { 1467 for (i = 0; i < adev->num_ip_blocks; i++) {
1433 if (!adev->ip_blocks[i].status.hw) 1468 if (!adev->ip_blocks[i].status.hw)
1434 continue; 1469 continue;
1435 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 1470 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1471 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1436 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1472 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1437 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1473 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1438 AMD_CG_STATE_UNGATE); 1474 AMD_CG_STATE_UNGATE);
@@ -1455,11 +1491,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1455 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1491 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1456 if (!adev->ip_blocks[i].status.hw) 1492 if (!adev->ip_blocks[i].status.hw)
1457 continue; 1493 continue;
1458 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1459 amdgpu_free_static_csa(adev);
1460 amdgpu_device_wb_fini(adev);
1461 amdgpu_device_vram_scratch_fini(adev);
1462 }
1463 1494
1464 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1495 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1465 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1496 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
@@ -1483,9 +1514,19 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1483 adev->ip_blocks[i].status.hw = false; 1514 adev->ip_blocks[i].status.hw = false;
1484 } 1515 }
1485 1516
1517 /* disable all interrupts */
1518 amdgpu_irq_disable_all(adev);
1519
1486 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1520 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1487 if (!adev->ip_blocks[i].status.sw) 1521 if (!adev->ip_blocks[i].status.sw)
1488 continue; 1522 continue;
1523
1524 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1525 amdgpu_free_static_csa(adev);
1526 amdgpu_device_wb_fini(adev);
1527 amdgpu_device_vram_scratch_fini(adev);
1528 }
1529
1489 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 1530 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1490 /* XXX handle errors */ 1531 /* XXX handle errors */
1491 if (r) { 1532 if (r) {
@@ -1536,7 +1577,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1536 if (!adev->ip_blocks[i].status.valid) 1577 if (!adev->ip_blocks[i].status.valid)
1537 continue; 1578 continue;
1538 /* ungate blocks so that suspend can properly shut them down */ 1579 /* ungate blocks so that suspend can properly shut them down */
1539 if (i != AMD_IP_BLOCK_TYPE_SMC) { 1580 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
1581 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1540 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1582 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1541 AMD_CG_STATE_UNGATE); 1583 AMD_CG_STATE_UNGATE);
1542 if (r) { 1584 if (r) {
@@ -1582,6 +1624,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
1582 1624
1583 r = block->version->funcs->hw_init(adev); 1625 r = block->version->funcs->hw_init(adev);
1584 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 1626 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1627 if (r)
1628 return r;
1585 } 1629 }
1586 } 1630 }
1587 1631
@@ -1615,6 +1659,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
1615 1659
1616 r = block->version->funcs->hw_init(adev); 1660 r = block->version->funcs->hw_init(adev);
1617 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 1661 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1662 if (r)
1663 return r;
1618 } 1664 }
1619 } 1665 }
1620 1666
@@ -1701,6 +1747,8 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1701 case CHIP_BONAIRE: 1747 case CHIP_BONAIRE:
1702 case CHIP_HAWAII: 1748 case CHIP_HAWAII:
1703 case CHIP_KAVERI: 1749 case CHIP_KAVERI:
1750 case CHIP_KABINI:
1751 case CHIP_MULLINS:
1704 case CHIP_CARRIZO: 1752 case CHIP_CARRIZO:
1705 case CHIP_STONEY: 1753 case CHIP_STONEY:
1706 case CHIP_POLARIS11: 1754 case CHIP_POLARIS11:
@@ -1711,9 +1759,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1711#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) 1759#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1712 return amdgpu_dc != 0; 1760 return amdgpu_dc != 0;
1713#endif 1761#endif
1714 case CHIP_KABINI:
1715 case CHIP_MULLINS:
1716 return amdgpu_dc > 0;
1717 case CHIP_VEGA10: 1762 case CHIP_VEGA10:
1718#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1763#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1719 case CHIP_RAVEN: 1764 case CHIP_RAVEN:
@@ -1768,14 +1813,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1768 adev->flags = flags; 1813 adev->flags = flags;
1769 adev->asic_type = flags & AMD_ASIC_MASK; 1814 adev->asic_type = flags & AMD_ASIC_MASK;
1770 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 1815 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1771 adev->mc.gart_size = 512 * 1024 * 1024; 1816 if (amdgpu_emu_mode == 1)
1817 adev->usec_timeout *= 2;
1818 adev->gmc.gart_size = 512 * 1024 * 1024;
1772 adev->accel_working = false; 1819 adev->accel_working = false;
1773 adev->num_rings = 0; 1820 adev->num_rings = 0;
1774 adev->mman.buffer_funcs = NULL; 1821 adev->mman.buffer_funcs = NULL;
1775 adev->mman.buffer_funcs_ring = NULL; 1822 adev->mman.buffer_funcs_ring = NULL;
1776 adev->vm_manager.vm_pte_funcs = NULL; 1823 adev->vm_manager.vm_pte_funcs = NULL;
1777 adev->vm_manager.vm_pte_num_rings = 0; 1824 adev->vm_manager.vm_pte_num_rings = 0;
1778 adev->gart.gart_funcs = NULL; 1825 adev->gmc.gmc_funcs = NULL;
1779 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 1826 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1780 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 1827 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1781 1828
@@ -1864,6 +1911,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1864 if (adev->rio_mem == NULL) 1911 if (adev->rio_mem == NULL)
1865 DRM_INFO("PCI I/O BAR is not found.\n"); 1912 DRM_INFO("PCI I/O BAR is not found.\n");
1866 1913
1914 amdgpu_device_get_pcie_info(adev);
1915
1867 /* early init functions */ 1916 /* early init functions */
1868 r = amdgpu_device_ip_early_init(adev); 1917 r = amdgpu_device_ip_early_init(adev);
1869 if (r) 1918 if (r)
@@ -1882,6 +1931,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1882 if (runtime) 1931 if (runtime)
1883 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 1932 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1884 1933
1934 if (amdgpu_emu_mode == 1) {
1935 /* post the asic on emulation mode */
1936 emu_soc_asic_init(adev);
1937 goto fence_driver_init;
1938 }
1939
1885 /* Read BIOS */ 1940 /* Read BIOS */
1886 if (!amdgpu_get_bios(adev)) { 1941 if (!amdgpu_get_bios(adev)) {
1887 r = -EINVAL; 1942 r = -EINVAL;
@@ -1934,6 +1989,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1934 amdgpu_atombios_i2c_init(adev); 1989 amdgpu_atombios_i2c_init(adev);
1935 } 1990 }
1936 1991
1992fence_driver_init:
1937 /* Fence driver */ 1993 /* Fence driver */
1938 r = amdgpu_fence_driver_init(adev); 1994 r = amdgpu_fence_driver_init(adev);
1939 if (r) { 1995 if (r) {
@@ -2065,6 +2121,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2065 2121
2066 amdgpu_ib_pool_fini(adev); 2122 amdgpu_ib_pool_fini(adev);
2067 amdgpu_fence_driver_fini(adev); 2123 amdgpu_fence_driver_fini(adev);
2124 amdgpu_pm_sysfs_fini(adev);
2068 amdgpu_fbdev_fini(adev); 2125 amdgpu_fbdev_fini(adev);
2069 r = amdgpu_device_ip_fini(adev); 2126 r = amdgpu_device_ip_fini(adev);
2070 if (adev->firmware.gpu_info_fw) { 2127 if (adev->firmware.gpu_info_fw) {
@@ -2076,7 +2133,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2076 /* free i2c buses */ 2133 /* free i2c buses */
2077 if (!amdgpu_device_has_dc_support(adev)) 2134 if (!amdgpu_device_has_dc_support(adev))
2078 amdgpu_i2c_fini(adev); 2135 amdgpu_i2c_fini(adev);
2079 amdgpu_atombios_fini(adev); 2136
2137 if (amdgpu_emu_mode != 1)
2138 amdgpu_atombios_fini(adev);
2139
2080 kfree(adev->bios); 2140 kfree(adev->bios);
2081 adev->bios = NULL; 2141 adev->bios = NULL;
2082 if (!pci_is_thunderbolt_attached(adev->pdev)) 2142 if (!pci_is_thunderbolt_attached(adev->pdev))
@@ -2090,7 +2150,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2090 iounmap(adev->rmmio); 2150 iounmap(adev->rmmio);
2091 adev->rmmio = NULL; 2151 adev->rmmio = NULL;
2092 amdgpu_device_doorbell_fini(adev); 2152 amdgpu_device_doorbell_fini(adev);
2093 amdgpu_pm_sysfs_fini(adev);
2094 amdgpu_debugfs_regs_cleanup(adev); 2153 amdgpu_debugfs_regs_cleanup(adev);
2095} 2154}
2096 2155
@@ -2284,14 +2343,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2284 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 2343 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2285 } 2344 }
2286 drm_modeset_unlock_all(dev); 2345 drm_modeset_unlock_all(dev);
2287 } else {
2288 /*
2289 * There is no equivalent atomic helper to turn on
2290 * display, so we defined our own function for this,
2291 * once suspend resume is supported by the atomic
2292 * framework this will be reworked
2293 */
2294 amdgpu_dm_display_resume(adev);
2295 } 2346 }
2296 } 2347 }
2297 2348
@@ -2458,17 +2509,71 @@ err:
2458 return r; 2509 return r;
2459} 2510}
2460 2511
2512static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2513{
2514 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2515 struct amdgpu_bo *bo, *tmp;
2516 struct dma_fence *fence = NULL, *next = NULL;
2517 long r = 1;
2518 int i = 0;
2519 long tmo;
2520
2521 if (amdgpu_sriov_runtime(adev))
2522 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2523 else
2524 tmo = msecs_to_jiffies(100);
2525
2526 DRM_INFO("recover vram bo from shadow start\n");
2527 mutex_lock(&adev->shadow_list_lock);
2528 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2529 next = NULL;
2530 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2531 if (fence) {
2532 r = dma_fence_wait_timeout(fence, false, tmo);
2533 if (r == 0)
2534 pr_err("wait fence %p[%d] timeout\n", fence, i);
2535 else if (r < 0)
2536 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2537 if (r < 1) {
2538 dma_fence_put(fence);
2539 fence = next;
2540 break;
2541 }
2542 i++;
2543 }
2544
2545 dma_fence_put(fence);
2546 fence = next;
2547 }
2548 mutex_unlock(&adev->shadow_list_lock);
2549
2550 if (fence) {
2551 r = dma_fence_wait_timeout(fence, false, tmo);
2552 if (r == 0)
2553 pr_err("wait fence %p[%d] timeout\n", fence, i);
2554 else if (r < 0)
2555 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2556
2557 }
2558 dma_fence_put(fence);
2559
2560 if (r > 0)
2561 DRM_INFO("recover vram bo from shadow done\n");
2562 else
2563 DRM_ERROR("recover vram bo from shadow failed\n");
2564
2565 return (r > 0?0:1);
2566}
2567
2461/* 2568/*
2462 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough 2569 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
2463 * 2570 *
2464 * @adev: amdgpu device pointer 2571 * @adev: amdgpu device pointer
2465 * @reset_flags: output param tells caller the reset result
2466 * 2572 *
2467 * attempt to do soft-reset or full-reset and reinitialize Asic 2573 * attempt to do soft-reset or full-reset and reinitialize Asic
2468 * return 0 means successed otherwise failed 2574 * return 0 means successed otherwise failed
2469*/ 2575*/
2470static int amdgpu_device_reset(struct amdgpu_device *adev, 2576static int amdgpu_device_reset(struct amdgpu_device *adev)
2471 uint64_t* reset_flags)
2472{ 2577{
2473 bool need_full_reset, vram_lost = 0; 2578 bool need_full_reset, vram_lost = 0;
2474 int r; 2579 int r;
@@ -2483,7 +2588,6 @@ static int amdgpu_device_reset(struct amdgpu_device *adev,
2483 DRM_INFO("soft reset failed, will fallback to full reset!\n"); 2588 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2484 need_full_reset = true; 2589 need_full_reset = true;
2485 } 2590 }
2486
2487 } 2591 }
2488 2592
2489 if (need_full_reset) { 2593 if (need_full_reset) {
@@ -2532,13 +2636,8 @@ out:
2532 } 2636 }
2533 } 2637 }
2534 2638
2535 if (reset_flags) { 2639 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
2536 if (vram_lost) 2640 r = amdgpu_device_handle_vram_lost(adev);
2537 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
2538
2539 if (need_full_reset)
2540 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
2541 }
2542 2641
2543 return r; 2642 return r;
2544} 2643}
@@ -2547,14 +2646,11 @@ out:
2547 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 2646 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
2548 * 2647 *
2549 * @adev: amdgpu device pointer 2648 * @adev: amdgpu device pointer
2550 * @reset_flags: output param tells caller the reset result
2551 * 2649 *
2552 * do VF FLR and reinitialize Asic 2650 * do VF FLR and reinitialize Asic
2553 * return 0 means successed otherwise failed 2651 * return 0 means successed otherwise failed
2554*/ 2652*/
2555static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 2653static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor)
2556 uint64_t *reset_flags,
2557 bool from_hypervisor)
2558{ 2654{
2559 int r; 2655 int r;
2560 2656
@@ -2575,28 +2671,20 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
2575 2671
2576 /* now we are okay to resume SMC/CP/SDMA */ 2672 /* now we are okay to resume SMC/CP/SDMA */
2577 r = amdgpu_device_ip_reinit_late_sriov(adev); 2673 r = amdgpu_device_ip_reinit_late_sriov(adev);
2674 amdgpu_virt_release_full_gpu(adev, true);
2578 if (r) 2675 if (r)
2579 goto error; 2676 goto error;
2580 2677
2581 amdgpu_irq_gpu_reset_resume_helper(adev); 2678 amdgpu_irq_gpu_reset_resume_helper(adev);
2582 r = amdgpu_ib_ring_tests(adev); 2679 r = amdgpu_ib_ring_tests(adev);
2583 if (r)
2584 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2585
2586error:
2587 /* release full control of GPU after ib test */
2588 amdgpu_virt_release_full_gpu(adev, true);
2589 2680
2590 if (reset_flags) { 2681 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2591 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 2682 atomic_inc(&adev->vram_lost_counter);
2592 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST; 2683 r = amdgpu_device_handle_vram_lost(adev);
2593 atomic_inc(&adev->vram_lost_counter);
2594 }
2595
2596 /* VF FLR or hotlink reset is always full-reset */
2597 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
2598 } 2684 }
2599 2685
2686error:
2687
2600 return r; 2688 return r;
2601} 2689}
2602 2690
@@ -2614,7 +2702,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2614 struct amdgpu_job *job, bool force) 2702 struct amdgpu_job *job, bool force)
2615{ 2703{
2616 struct drm_atomic_state *state = NULL; 2704 struct drm_atomic_state *state = NULL;
2617 uint64_t reset_flags = 0;
2618 int i, r, resched; 2705 int i, r, resched;
2619 2706
2620 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { 2707 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
@@ -2636,22 +2723,23 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2636 2723
2637 /* block TTM */ 2724 /* block TTM */
2638 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 2725 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2726
2639 /* store modesetting */ 2727 /* store modesetting */
2640 if (amdgpu_device_has_dc_support(adev)) 2728 if (amdgpu_device_has_dc_support(adev))
2641 state = drm_atomic_helper_suspend(adev->ddev); 2729 state = drm_atomic_helper_suspend(adev->ddev);
2642 2730
2643 /* block scheduler */ 2731 /* block all schedulers and reset given job's ring */
2644 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2732 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2645 struct amdgpu_ring *ring = adev->rings[i]; 2733 struct amdgpu_ring *ring = adev->rings[i];
2646 2734
2647 if (!ring || !ring->sched.thread) 2735 if (!ring || !ring->sched.thread)
2648 continue; 2736 continue;
2649 2737
2650 /* only focus on the ring hit timeout if &job not NULL */ 2738 kthread_park(ring->sched.thread);
2739
2651 if (job && job->ring->idx != i) 2740 if (job && job->ring->idx != i)
2652 continue; 2741 continue;
2653 2742
2654 kthread_park(ring->sched.thread);
2655 drm_sched_hw_job_reset(&ring->sched, &job->base); 2743 drm_sched_hw_job_reset(&ring->sched, &job->base);
2656 2744
2657 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 2745 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
@@ -2659,74 +2747,29 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2659 } 2747 }
2660 2748
2661 if (amdgpu_sriov_vf(adev)) 2749 if (amdgpu_sriov_vf(adev))
2662 r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true); 2750 r = amdgpu_device_reset_sriov(adev, job ? false : true);
2663 else 2751 else
2664 r = amdgpu_device_reset(adev, &reset_flags); 2752 r = amdgpu_device_reset(adev);
2665 2753
2666 if (!r) { 2754 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2667 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) || 2755 struct amdgpu_ring *ring = adev->rings[i];
2668 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
2669 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2670 struct amdgpu_bo *bo, *tmp;
2671 struct dma_fence *fence = NULL, *next = NULL;
2672
2673 DRM_INFO("recover vram bo from shadow\n");
2674 mutex_lock(&adev->shadow_list_lock);
2675 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2676 next = NULL;
2677 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2678 if (fence) {
2679 r = dma_fence_wait(fence, false);
2680 if (r) {
2681 WARN(r, "recovery from shadow isn't completed\n");
2682 break;
2683 }
2684 }
2685
2686 dma_fence_put(fence);
2687 fence = next;
2688 }
2689 mutex_unlock(&adev->shadow_list_lock);
2690 if (fence) {
2691 r = dma_fence_wait(fence, false);
2692 if (r)
2693 WARN(r, "recovery from shadow isn't completed\n");
2694 }
2695 dma_fence_put(fence);
2696 }
2697
2698 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2699 struct amdgpu_ring *ring = adev->rings[i];
2700
2701 if (!ring || !ring->sched.thread)
2702 continue;
2703 2756
2704 /* only focus on the ring hit timeout if &job not NULL */ 2757 if (!ring || !ring->sched.thread)
2705 if (job && job->ring->idx != i) 2758 continue;
2706 continue;
2707 2759
2760 /* only need recovery sched of the given job's ring
2761 * or all rings (in the case @job is NULL)
2762 * after above amdgpu_reset accomplished
2763 */
2764 if ((!job || job->ring->idx == i) && !r)
2708 drm_sched_job_recovery(&ring->sched); 2765 drm_sched_job_recovery(&ring->sched);
2709 kthread_unpark(ring->sched.thread);
2710 }
2711 } else {
2712 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2713 struct amdgpu_ring *ring = adev->rings[i];
2714
2715 if (!ring || !ring->sched.thread)
2716 continue;
2717 2766
2718 /* only focus on the ring hit timeout if &job not NULL */ 2767 kthread_unpark(ring->sched.thread);
2719 if (job && job->ring->idx != i)
2720 continue;
2721
2722 kthread_unpark(adev->rings[i]->sched.thread);
2723 }
2724 } 2768 }
2725 2769
2726 if (amdgpu_device_has_dc_support(adev)) { 2770 if (amdgpu_device_has_dc_support(adev)) {
2727 if (drm_atomic_helper_resume(adev->ddev, state)) 2771 if (drm_atomic_helper_resume(adev->ddev, state))
2728 dev_info(adev->dev, "drm resume failed:%d\n", r); 2772 dev_info(adev->dev, "drm resume failed:%d\n", r);
2729 amdgpu_dm_display_resume(adev);
2730 } else { 2773 } else {
2731 drm_helper_resume_force_mode(adev->ddev); 2774 drm_helper_resume_force_mode(adev->ddev);
2732 } 2775 }
@@ -2747,7 +2790,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2747 return r; 2790 return r;
2748} 2791}
2749 2792
2750void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 2793static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
2751{ 2794{
2752 u32 mask; 2795 u32 mask;
2753 int ret; 2796 int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 38d47559f098..93f700ab1bfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -29,6 +29,7 @@
29#include "amdgpu_i2c.h" 29#include "amdgpu_i2c.h"
30#include "atom.h" 30#include "atom.h"
31#include "amdgpu_connectors.h" 31#include "amdgpu_connectors.h"
32#include "amdgpu_display.h"
32#include <asm/div64.h> 33#include <asm/div64.h>
33 34
34#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
@@ -36,7 +37,8 @@
36#include <drm/drm_edid.h> 37#include <drm/drm_edid.h>
37#include <drm/drm_fb_helper.h> 38#include <drm/drm_fb_helper.h>
38 39
39static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) 40static void amdgpu_display_flip_callback(struct dma_fence *f,
41 struct dma_fence_cb *cb)
40{ 42{
41 struct amdgpu_flip_work *work = 43 struct amdgpu_flip_work *work =
42 container_of(cb, struct amdgpu_flip_work, cb); 44 container_of(cb, struct amdgpu_flip_work, cb);
@@ -45,8 +47,8 @@ static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
45 schedule_work(&work->flip_work.work); 47 schedule_work(&work->flip_work.work);
46} 48}
47 49
48static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, 50static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
49 struct dma_fence **f) 51 struct dma_fence **f)
50{ 52{
51 struct dma_fence *fence= *f; 53 struct dma_fence *fence= *f;
52 54
@@ -55,14 +57,15 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
55 57
56 *f = NULL; 58 *f = NULL;
57 59
58 if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) 60 if (!dma_fence_add_callback(fence, &work->cb,
61 amdgpu_display_flip_callback))
59 return true; 62 return true;
60 63
61 dma_fence_put(fence); 64 dma_fence_put(fence);
62 return false; 65 return false;
63} 66}
64 67
65static void amdgpu_flip_work_func(struct work_struct *__work) 68static void amdgpu_display_flip_work_func(struct work_struct *__work)
66{ 69{
67 struct delayed_work *delayed_work = 70 struct delayed_work *delayed_work =
68 container_of(__work, struct delayed_work, work); 71 container_of(__work, struct delayed_work, work);
@@ -76,20 +79,20 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
76 unsigned i; 79 unsigned i;
77 int vpos, hpos; 80 int vpos, hpos;
78 81
79 if (amdgpu_flip_handle_fence(work, &work->excl)) 82 if (amdgpu_display_flip_handle_fence(work, &work->excl))
80 return; 83 return;
81 84
82 for (i = 0; i < work->shared_count; ++i) 85 for (i = 0; i < work->shared_count; ++i)
83 if (amdgpu_flip_handle_fence(work, &work->shared[i])) 86 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
84 return; 87 return;
85 88
86 /* Wait until we're out of the vertical blank period before the one 89 /* Wait until we're out of the vertical blank period before the one
87 * targeted by the flip 90 * targeted by the flip
88 */ 91 */
89 if (amdgpu_crtc->enabled && 92 if (amdgpu_crtc->enabled &&
90 (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, 93 (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
91 &vpos, &hpos, NULL, NULL, 94 &vpos, &hpos, NULL, NULL,
92 &crtc->hwmode) 95 &crtc->hwmode)
93 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 96 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
94 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 97 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
95 (int)(work->target_vblank - 98 (int)(work->target_vblank -
@@ -117,7 +120,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
117/* 120/*
118 * Handle unpin events outside the interrupt handler proper. 121 * Handle unpin events outside the interrupt handler proper.
119 */ 122 */
120static void amdgpu_unpin_work_func(struct work_struct *__work) 123static void amdgpu_display_unpin_work_func(struct work_struct *__work)
121{ 124{
122 struct amdgpu_flip_work *work = 125 struct amdgpu_flip_work *work =
123 container_of(__work, struct amdgpu_flip_work, unpin_work); 126 container_of(__work, struct amdgpu_flip_work, unpin_work);
@@ -139,11 +142,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
139 kfree(work); 142 kfree(work);
140} 143}
141 144
142int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, 145int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
143 struct drm_framebuffer *fb, 146 struct drm_framebuffer *fb,
144 struct drm_pending_vblank_event *event, 147 struct drm_pending_vblank_event *event,
145 uint32_t page_flip_flags, uint32_t target, 148 uint32_t page_flip_flags, uint32_t target,
146 struct drm_modeset_acquire_ctx *ctx) 149 struct drm_modeset_acquire_ctx *ctx)
147{ 150{
148 struct drm_device *dev = crtc->dev; 151 struct drm_device *dev = crtc->dev;
149 struct amdgpu_device *adev = dev->dev_private; 152 struct amdgpu_device *adev = dev->dev_private;
@@ -162,8 +165,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
162 if (work == NULL) 165 if (work == NULL)
163 return -ENOMEM; 166 return -ENOMEM;
164 167
165 INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); 168 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
166 INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); 169 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
167 170
168 work->event = event; 171 work->event = event;
169 work->adev = adev; 172 work->adev = adev;
@@ -189,7 +192,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
189 goto cleanup; 192 goto cleanup;
190 } 193 }
191 194
192 r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base); 195 r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
193 if (unlikely(r != 0)) { 196 if (unlikely(r != 0)) {
194 DRM_ERROR("failed to pin new abo buffer before flip\n"); 197 DRM_ERROR("failed to pin new abo buffer before flip\n");
195 goto unreserve; 198 goto unreserve;
@@ -207,7 +210,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
207 amdgpu_bo_unreserve(new_abo); 210 amdgpu_bo_unreserve(new_abo);
208 211
209 work->base = base; 212 work->base = base;
210 work->target_vblank = target - drm_crtc_vblank_count(crtc) + 213 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
211 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 214 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
212 215
213 /* we borrow the event spin lock for protecting flip_wrok */ 216 /* we borrow the event spin lock for protecting flip_wrok */
@@ -228,7 +231,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
228 /* update crtc fb */ 231 /* update crtc fb */
229 crtc->primary->fb = fb; 232 crtc->primary->fb = fb;
230 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 233 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
231 amdgpu_flip_work_func(&work->flip_work.work); 234 amdgpu_display_flip_work_func(&work->flip_work.work);
232 return 0; 235 return 0;
233 236
234pflip_cleanup: 237pflip_cleanup:
@@ -254,8 +257,8 @@ cleanup:
254 return r; 257 return r;
255} 258}
256 259
257int amdgpu_crtc_set_config(struct drm_mode_set *set, 260int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
258 struct drm_modeset_acquire_ctx *ctx) 261 struct drm_modeset_acquire_ctx *ctx)
259{ 262{
260 struct drm_device *dev; 263 struct drm_device *dev;
261 struct amdgpu_device *adev; 264 struct amdgpu_device *adev;
@@ -352,7 +355,7 @@ static const char *hpd_names[6] = {
352 "HPD6", 355 "HPD6",
353}; 356};
354 357
355void amdgpu_print_display_setup(struct drm_device *dev) 358void amdgpu_display_print_display_setup(struct drm_device *dev)
356{ 359{
357 struct drm_connector *connector; 360 struct drm_connector *connector;
358 struct amdgpu_connector *amdgpu_connector; 361 struct amdgpu_connector *amdgpu_connector;
@@ -429,11 +432,11 @@ void amdgpu_print_display_setup(struct drm_device *dev)
429} 432}
430 433
431/** 434/**
432 * amdgpu_ddc_probe 435 * amdgpu_display_ddc_probe
433 * 436 *
434 */ 437 */
435bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, 438bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
436 bool use_aux) 439 bool use_aux)
437{ 440{
438 u8 out = 0x0; 441 u8 out = 0x0;
439 u8 buf[8]; 442 u8 buf[8];
@@ -479,7 +482,7 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
479 return true; 482 return true;
480} 483}
481 484
482static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) 485static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
483{ 486{
484 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); 487 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
485 488
@@ -488,9 +491,10 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
488 kfree(amdgpu_fb); 491 kfree(amdgpu_fb);
489} 492}
490 493
491static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb, 494static int amdgpu_display_user_framebuffer_create_handle(
492 struct drm_file *file_priv, 495 struct drm_framebuffer *fb,
493 unsigned int *handle) 496 struct drm_file *file_priv,
497 unsigned int *handle)
494{ 498{
495 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); 499 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
496 500
@@ -498,15 +502,28 @@ static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
498} 502}
499 503
500static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { 504static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
501 .destroy = amdgpu_user_framebuffer_destroy, 505 .destroy = amdgpu_display_user_framebuffer_destroy,
502 .create_handle = amdgpu_user_framebuffer_create_handle, 506 .create_handle = amdgpu_display_user_framebuffer_create_handle,
503}; 507};
504 508
505int 509uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
506amdgpu_framebuffer_init(struct drm_device *dev, 510{
507 struct amdgpu_framebuffer *rfb, 511 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
508 const struct drm_mode_fb_cmd2 *mode_cmd, 512
509 struct drm_gem_object *obj) 513#if defined(CONFIG_DRM_AMD_DC)
514 if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN &&
515 adev->flags & AMD_IS_APU &&
516 amdgpu_device_asic_has_dc_support(adev->asic_type))
517 domain |= AMDGPU_GEM_DOMAIN_GTT;
518#endif
519
520 return domain;
521}
522
523int amdgpu_display_framebuffer_init(struct drm_device *dev,
524 struct amdgpu_framebuffer *rfb,
525 const struct drm_mode_fb_cmd2 *mode_cmd,
526 struct drm_gem_object *obj)
510{ 527{
511 int ret; 528 int ret;
512 rfb->obj = obj; 529 rfb->obj = obj;
@@ -520,9 +537,9 @@ amdgpu_framebuffer_init(struct drm_device *dev,
520} 537}
521 538
522struct drm_framebuffer * 539struct drm_framebuffer *
523amdgpu_user_framebuffer_create(struct drm_device *dev, 540amdgpu_display_user_framebuffer_create(struct drm_device *dev,
524 struct drm_file *file_priv, 541 struct drm_file *file_priv,
525 const struct drm_mode_fb_cmd2 *mode_cmd) 542 const struct drm_mode_fb_cmd2 *mode_cmd)
526{ 543{
527 struct drm_gem_object *obj; 544 struct drm_gem_object *obj;
528 struct amdgpu_framebuffer *amdgpu_fb; 545 struct amdgpu_framebuffer *amdgpu_fb;
@@ -547,7 +564,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
547 return ERR_PTR(-ENOMEM); 564 return ERR_PTR(-ENOMEM);
548 } 565 }
549 566
550 ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); 567 ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
551 if (ret) { 568 if (ret) {
552 kfree(amdgpu_fb); 569 kfree(amdgpu_fb);
553 drm_gem_object_put_unlocked(obj); 570 drm_gem_object_put_unlocked(obj);
@@ -558,7 +575,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
558} 575}
559 576
560const struct drm_mode_config_funcs amdgpu_mode_funcs = { 577const struct drm_mode_config_funcs amdgpu_mode_funcs = {
561 .fb_create = amdgpu_user_framebuffer_create, 578 .fb_create = amdgpu_display_user_framebuffer_create,
562 .output_poll_changed = drm_fb_helper_output_poll_changed, 579 .output_poll_changed = drm_fb_helper_output_poll_changed,
563}; 580};
564 581
@@ -580,7 +597,7 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
580 { AMDGPU_FMT_DITHER_ENABLE, "on" }, 597 { AMDGPU_FMT_DITHER_ENABLE, "on" },
581}; 598};
582 599
583int amdgpu_modeset_create_props(struct amdgpu_device *adev) 600int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
584{ 601{
585 int sz; 602 int sz;
586 603
@@ -629,7 +646,7 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev)
629 return 0; 646 return 0;
630} 647}
631 648
632void amdgpu_update_display_priority(struct amdgpu_device *adev) 649void amdgpu_display_update_priority(struct amdgpu_device *adev)
633{ 650{
634 /* adjustment options for the display watermarks */ 651 /* adjustment options for the display watermarks */
635 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2)) 652 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
@@ -639,7 +656,7 @@ void amdgpu_update_display_priority(struct amdgpu_device *adev)
639 656
640} 657}
641 658
642static bool is_hdtv_mode(const struct drm_display_mode *mode) 659static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
643{ 660{
644 /* try and guess if this is a tv or a monitor */ 661 /* try and guess if this is a tv or a monitor */
645 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ 662 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
@@ -651,9 +668,9 @@ static bool is_hdtv_mode(const struct drm_display_mode *mode)
651 return false; 668 return false;
652} 669}
653 670
654bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 671bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
655 const struct drm_display_mode *mode, 672 const struct drm_display_mode *mode,
656 struct drm_display_mode *adjusted_mode) 673 struct drm_display_mode *adjusted_mode)
657{ 674{
658 struct drm_device *dev = crtc->dev; 675 struct drm_device *dev = crtc->dev;
659 struct drm_encoder *encoder; 676 struct drm_encoder *encoder;
@@ -696,7 +713,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
696 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || 713 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
697 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && 714 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
698 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && 715 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
699 is_hdtv_mode(mode)))) { 716 amdgpu_display_is_hdtv_mode(mode)))) {
700 if (amdgpu_encoder->underscan_hborder != 0) 717 if (amdgpu_encoder->underscan_hborder != 0)
701 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; 718 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
702 else 719 else
@@ -764,10 +781,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
764 * unknown small number of scanlines wrt. real scanout position. 781 * unknown small number of scanlines wrt. real scanout position.
765 * 782 *
766 */ 783 */
767int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 784int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
768 unsigned int flags, int *vpos, int *hpos, 785 unsigned int pipe, unsigned int flags, int *vpos,
769 ktime_t *stime, ktime_t *etime, 786 int *hpos, ktime_t *stime, ktime_t *etime,
770 const struct drm_display_mode *mode) 787 const struct drm_display_mode *mode)
771{ 788{
772 u32 vbl = 0, position = 0; 789 u32 vbl = 0, position = 0;
773 int vbl_start, vbl_end, vtotal, ret = 0; 790 int vbl_start, vbl_end, vtotal, ret = 0;
@@ -859,7 +876,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
859 return ret; 876 return ret;
860} 877}
861 878
862int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) 879int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
863{ 880{
864 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) 881 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
865 return AMDGPU_CRTC_IRQ_NONE; 882 return AMDGPU_CRTC_IRQ_NONE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 0bcb6c6e0ca9..2b11d808f297 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,9 +23,10 @@
23#ifndef __AMDGPU_DISPLAY_H__ 23#ifndef __AMDGPU_DISPLAY_H__
24#define __AMDGPU_DISPLAY_H__ 24#define __AMDGPU_DISPLAY_H__
25 25
26uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
26struct drm_framebuffer * 27struct drm_framebuffer *
27amdgpu_user_framebuffer_create(struct drm_device *dev, 28amdgpu_display_user_framebuffer_create(struct drm_device *dev,
28 struct drm_file *file_priv, 29 struct drm_file *file_priv,
29 const struct drm_mode_fb_cmd2 *mode_cmd); 30 const struct drm_mode_fb_cmd2 *mode_cmd);
30 31
31#endif 32#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index a8437a3296a6..643d008410c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -265,9 +265,6 @@ enum amdgpu_pcie_gen {
265#define amdgpu_dpm_read_sensor(adev, idx, value, size) \ 265#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
266 ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size))) 266 ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
267 267
268#define amdgpu_dpm_get_temperature(adev) \
269 ((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
270
271#define amdgpu_dpm_set_fan_control_mode(adev, m) \ 268#define amdgpu_dpm_set_fan_control_mode(adev, m) \
272 ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m))) 269 ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
273 270
@@ -328,8 +325,8 @@ enum amdgpu_pcie_gen {
328#define amdgpu_dpm_set_mclk_od(adev, value) \ 325#define amdgpu_dpm_set_mclk_od(adev, value) \
329 ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) 326 ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
330 327
331#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \ 328#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
332 ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output)) 329 ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
333 330
334#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ 331#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
335 ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) 332 ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
@@ -344,17 +341,9 @@ enum amdgpu_pcie_gen {
344 ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ 341 ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
345 (adev)->powerplay.pp_handle, request)) 342 (adev)->powerplay.pp_handle, request))
346 343
347#define amdgpu_dpm_get_power_profile_state(adev, query) \ 344#define amdgpu_dpm_switch_power_profile(adev, type, en) \
348 ((adev)->powerplay.pp_funcs->get_power_profile_state(\
349 (adev)->powerplay.pp_handle, query))
350
351#define amdgpu_dpm_set_power_profile_state(adev, request) \
352 ((adev)->powerplay.pp_funcs->set_power_profile_state(\
353 (adev)->powerplay.pp_handle, request))
354
355#define amdgpu_dpm_switch_power_profile(adev, type) \
356 ((adev)->powerplay.pp_funcs->switch_power_profile(\ 345 ((adev)->powerplay.pp_funcs->switch_power_profile(\
357 (adev)->powerplay.pp_handle, type)) 346 (adev)->powerplay.pp_handle, type, en))
358 347
359#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \ 348#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
360 ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ 349 ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
@@ -366,6 +355,22 @@ enum amdgpu_pcie_gen {
366 (adev)->powerplay.pp_handle, virtual_addr_low, \ 355 (adev)->powerplay.pp_handle, virtual_addr_low, \
367 virtual_addr_hi, mc_addr_low, mc_addr_hi, size) 356 virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
368 357
358#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
359 ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
360 (adev)->powerplay.pp_handle, buf))
361
362#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
363 ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
364 (adev)->powerplay.pp_handle, parameter, size))
365
366#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
367 ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
368 (adev)->powerplay.pp_handle, type, parameter, size))
369
370#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
371 ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
372 (adev)->powerplay.pp_handle))
373
369struct amdgpu_dpm { 374struct amdgpu_dpm {
370 struct amdgpu_ps *ps; 375 struct amdgpu_ps *ps;
371 /* number of valid power states */ 376 /* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ba4335fd4f65..2337d4bfd85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -73,9 +73,11 @@
73 * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl 73 * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
74 * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl 74 * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
75 * - 3.23.0 - Add query for VRAM lost counter 75 * - 3.23.0 - Add query for VRAM lost counter
76 * - 3.24.0 - Add high priority compute support for gfx9
77 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
76 */ 78 */
77#define KMS_DRIVER_MAJOR 3 79#define KMS_DRIVER_MAJOR 3
78#define KMS_DRIVER_MINOR 23 80#define KMS_DRIVER_MINOR 25
79#define KMS_DRIVER_PATCHLEVEL 0 81#define KMS_DRIVER_PATCHLEVEL 0
80 82
81int amdgpu_vram_limit = 0; 83int amdgpu_vram_limit = 0;
@@ -119,7 +121,7 @@ uint amdgpu_pg_mask = 0xffffffff;
119uint amdgpu_sdma_phase_quantum = 32; 121uint amdgpu_sdma_phase_quantum = 32;
120char *amdgpu_disable_cu = NULL; 122char *amdgpu_disable_cu = NULL;
121char *amdgpu_virtual_display = NULL; 123char *amdgpu_virtual_display = NULL;
122uint amdgpu_pp_feature_mask = 0xffffffff; 124uint amdgpu_pp_feature_mask = 0xffffbfff;
123int amdgpu_ngg = 0; 125int amdgpu_ngg = 0;
124int amdgpu_prim_buf_per_se = 0; 126int amdgpu_prim_buf_per_se = 0;
125int amdgpu_pos_buf_per_se = 0; 127int amdgpu_pos_buf_per_se = 0;
@@ -129,6 +131,7 @@ int amdgpu_job_hang_limit = 0;
129int amdgpu_lbpw = -1; 131int amdgpu_lbpw = -1;
130int amdgpu_compute_multipipe = -1; 132int amdgpu_compute_multipipe = -1;
131int amdgpu_gpu_recovery = -1; /* auto */ 133int amdgpu_gpu_recovery = -1; /* auto */
134int amdgpu_emu_mode = 0;
132 135
133MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 136MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
134module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 137module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -281,9 +284,12 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
281MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)"); 284MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
282module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); 285module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
283 286
284MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto"); 287MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
285module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); 288module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
286 289
290MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
291module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
292
287#ifdef CONFIG_DRM_AMDGPU_SI 293#ifdef CONFIG_DRM_AMDGPU_SI
288 294
289#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) 295#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -576,6 +582,11 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
576 struct drm_device *dev; 582 struct drm_device *dev;
577 unsigned long flags = ent->driver_data; 583 unsigned long flags = ent->driver_data;
578 int ret, retry = 0; 584 int ret, retry = 0;
585 bool supports_atomic = false;
586
587 if (!amdgpu_virtual_display &&
588 amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
589 supports_atomic = true;
579 590
580 if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { 591 if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
581 DRM_INFO("This hardware requires experimental hardware support.\n" 592 DRM_INFO("This hardware requires experimental hardware support.\n"
@@ -596,6 +607,13 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
596 if (ret) 607 if (ret)
597 return ret; 608 return ret;
598 609
610 /* warn the user if they mix atomic and non-atomic capable GPUs */
611 if ((kms_driver.driver_features & DRIVER_ATOMIC) && !supports_atomic)
612 DRM_ERROR("Mixing atomic and non-atomic capable GPUs!\n");
613 /* support atomic early so the atomic debugfs stuff gets created */
614 if (supports_atomic)
615 kms_driver.driver_features |= DRIVER_ATOMIC;
616
599 dev = drm_dev_alloc(&kms_driver, &pdev->dev); 617 dev = drm_dev_alloc(&kms_driver, &pdev->dev);
600 if (IS_ERR(dev)) 618 if (IS_ERR(dev))
601 return PTR_ERR(dev); 619 return PTR_ERR(dev);
@@ -833,8 +851,8 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
833 ktime_t *stime, ktime_t *etime, 851 ktime_t *stime, ktime_t *etime,
834 const struct drm_display_mode *mode) 852 const struct drm_display_mode *mode)
835{ 853{
836 return amdgpu_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, 854 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
837 stime, etime, mode); 855 stime, etime, mode);
838} 856}
839 857
840static struct drm_driver kms_driver = { 858static struct drm_driver kms_driver = {
@@ -852,9 +870,6 @@ static struct drm_driver kms_driver = {
852 .disable_vblank = amdgpu_disable_vblank_kms, 870 .disable_vblank = amdgpu_disable_vblank_kms,
853 .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, 871 .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
854 .get_scanout_position = amdgpu_get_crtc_scanout_position, 872 .get_scanout_position = amdgpu_get_crtc_scanout_position,
855 .irq_preinstall = amdgpu_irq_preinstall,
856 .irq_postinstall = amdgpu_irq_postinstall,
857 .irq_uninstall = amdgpu_irq_uninstall,
858 .irq_handler = amdgpu_irq_handler, 873 .irq_handler = amdgpu_irq_handler,
859 .ioctls = amdgpu_ioctls_kms, 874 .ioctls = amdgpu_ioctls_kms,
860 .gem_free_object_unlocked = amdgpu_gem_object_free, 875 .gem_free_object_unlocked = amdgpu_gem_object_free,
@@ -867,9 +882,7 @@ static struct drm_driver kms_driver = {
867 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 882 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
868 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 883 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
869 .gem_prime_export = amdgpu_gem_prime_export, 884 .gem_prime_export = amdgpu_gem_prime_export,
870 .gem_prime_import = drm_gem_prime_import, 885 .gem_prime_import = amdgpu_gem_prime_import,
871 .gem_prime_pin = amdgpu_gem_prime_pin,
872 .gem_prime_unpin = amdgpu_gem_prime_unpin,
873 .gem_prime_res_obj = amdgpu_gem_prime_res_obj, 886 .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
874 .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table, 887 .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
875 .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, 888 .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index ff3e9beb7d19..12063019751b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -38,6 +38,8 @@
38 38
39#include <linux/vga_switcheroo.h> 39#include <linux/vga_switcheroo.h>
40 40
41#include "amdgpu_display.h"
42
41/* object hierarchy - 43/* object hierarchy -
42 this contains a helper + a amdgpu fb 44 this contains a helper + a amdgpu fb
43 the helper contains a pointer to amdgpu framebuffer baseclass. 45 the helper contains a pointer to amdgpu framebuffer baseclass.
@@ -124,7 +126,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
124 struct drm_gem_object *gobj = NULL; 126 struct drm_gem_object *gobj = NULL;
125 struct amdgpu_bo *abo = NULL; 127 struct amdgpu_bo *abo = NULL;
126 bool fb_tiled = false; /* useful for testing */ 128 bool fb_tiled = false; /* useful for testing */
127 u32 tiling_flags = 0; 129 u32 tiling_flags = 0, domain;
128 int ret; 130 int ret;
129 int aligned_size, size; 131 int aligned_size, size;
130 int height = mode_cmd->height; 132 int height = mode_cmd->height;
@@ -135,12 +137,12 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
135 /* need to align pitch with crtc limits */ 137 /* need to align pitch with crtc limits */
136 mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, 138 mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
137 fb_tiled); 139 fb_tiled);
140 domain = amdgpu_display_framebuffer_domains(adev);
138 141
139 height = ALIGN(mode_cmd->height, 8); 142 height = ALIGN(mode_cmd->height, 8);
140 size = mode_cmd->pitches[0] * height; 143 size = mode_cmd->pitches[0] * height;
141 aligned_size = ALIGN(size, PAGE_SIZE); 144 aligned_size = ALIGN(size, PAGE_SIZE);
142 ret = amdgpu_gem_object_create(adev, aligned_size, 0, 145 ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
143 AMDGPU_GEM_DOMAIN_VRAM,
144 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 146 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
145 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 147 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
146 AMDGPU_GEM_CREATE_VRAM_CLEARED, 148 AMDGPU_GEM_CREATE_VRAM_CLEARED,
@@ -166,7 +168,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
166 } 168 }
167 169
168 170
169 ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL); 171 ret = amdgpu_bo_pin(abo, domain, NULL);
170 if (ret) { 172 if (ret) {
171 amdgpu_bo_unreserve(abo); 173 amdgpu_bo_unreserve(abo);
172 goto out_unref; 174 goto out_unref;
@@ -225,7 +227,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
225 info->par = rfbdev; 227 info->par = rfbdev;
226 info->skip_vt_switch = true; 228 info->skip_vt_switch = true;
227 229
228 ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 230 ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
231 &mode_cmd, gobj);
229 if (ret) { 232 if (ret) {
230 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 233 DRM_ERROR("failed to initialize framebuffer %d\n", ret);
231 goto out; 234 goto out;
@@ -242,8 +245,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
242 245
243 info->fbops = &amdgpufb_ops; 246 info->fbops = &amdgpufb_ops;
244 247
245 tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; 248 tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
246 info->fix.smem_start = adev->mc.aper_base + tmp; 249 info->fix.smem_start = adev->gmc.aper_base + tmp;
247 info->fix.smem_len = amdgpu_bo_size(abo); 250 info->fix.smem_len = amdgpu_bo_size(abo);
248 info->screen_base = amdgpu_bo_kptr(abo); 251 info->screen_base = amdgpu_bo_kptr(abo);
249 info->screen_size = amdgpu_bo_size(abo); 252 info->screen_size = amdgpu_bo_size(abo);
@@ -252,7 +255,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
252 255
253 /* setup aperture base/size for vesafb takeover */ 256 /* setup aperture base/size for vesafb takeover */
254 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; 257 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
255 info->apertures->ranges[0].size = adev->mc.aper_size; 258 info->apertures->ranges[0].size = adev->gmc.aper_size;
256 259
257 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 260 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
258 261
@@ -262,7 +265,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
262 } 265 }
263 266
264 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 267 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
265 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); 268 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
266 DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); 269 DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
267 DRM_INFO("fb depth is %d\n", fb->format->depth); 270 DRM_INFO("fb depth is %d\n", fb->format->depth);
268 DRM_INFO(" pitch is %d\n", fb->pitches[0]); 271 DRM_INFO(" pitch is %d\n", fb->pitches[0]);
@@ -319,7 +322,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
319 return 0; 322 return 0;
320 323
321 /* select 8 bpp console on low vram cards */ 324 /* select 8 bpp console on low vram cards */
322 if (adev->mc.real_vram_size <= (32*1024*1024)) 325 if (adev->gmc.real_vram_size <= (32*1024*1024))
323 bpp_sel = 8; 326 bpp_sel = 8;
324 327
325 rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); 328 rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 0a4f34afaaaa..cf0f186c6092 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -68,17 +68,15 @@
68 */ 68 */
69static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) 69static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
70{ 70{
71 if (adev->dummy_page.page) 71 struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
72
73 if (adev->dummy_page_addr)
72 return 0; 74 return 0;
73 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 75 adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
74 if (adev->dummy_page.page == NULL) 76 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
75 return -ENOMEM; 77 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
76 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
77 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
78 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
79 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 78 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
80 __free_page(adev->dummy_page.page); 79 adev->dummy_page_addr = 0;
81 adev->dummy_page.page = NULL;
82 return -ENOMEM; 80 return -ENOMEM;
83 } 81 }
84 return 0; 82 return 0;
@@ -93,12 +91,11 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
93 */ 91 */
94static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) 92static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
95{ 93{
96 if (adev->dummy_page.page == NULL) 94 if (!adev->dummy_page_addr)
97 return; 95 return;
98 pci_unmap_page(adev->pdev, adev->dummy_page.addr, 96 pci_unmap_page(adev->pdev, adev->dummy_page_addr,
99 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 97 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
100 __free_page(adev->dummy_page.page); 98 adev->dummy_page_addr = 0;
101 adev->dummy_page.page = NULL;
102} 99}
103 100
104/** 101/**
@@ -116,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
116 int r; 113 int r;
117 114
118 if (adev->gart.robj == NULL) { 115 if (adev->gart.robj == NULL) {
119 r = amdgpu_bo_create(adev, adev->gart.table_size, 116 r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
120 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 117 AMDGPU_GEM_DOMAIN_VRAM,
121 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 118 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
122 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 119 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
123 NULL, NULL, 0, &adev->gart.robj); 120 ttm_bo_type_kernel, NULL,
121 &adev->gart.robj);
124 if (r) { 122 if (r) {
125 return r; 123 return r;
126 } 124 }
@@ -236,18 +234,19 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
236#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 234#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
237 adev->gart.pages[p] = NULL; 235 adev->gart.pages[p] = NULL;
238#endif 236#endif
239 page_base = adev->dummy_page.addr; 237 page_base = adev->dummy_page_addr;
240 if (!adev->gart.ptr) 238 if (!adev->gart.ptr)
241 continue; 239 continue;
242 240
243 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { 241 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
244 amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, 242 amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
245 t, page_base, flags); 243 t, page_base, flags);
246 page_base += AMDGPU_GPU_PAGE_SIZE; 244 page_base += AMDGPU_GPU_PAGE_SIZE;
247 } 245 }
248 } 246 }
249 mb(); 247 mb();
250 amdgpu_gart_flush_gpu_tlb(adev, 0); 248 amdgpu_asic_flush_hdp(adev, NULL);
249 amdgpu_gmc_flush_gpu_tlb(adev, 0);
251 return 0; 250 return 0;
252} 251}
253 252
@@ -279,7 +278,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
279 for (i = 0; i < pages; i++) { 278 for (i = 0; i < pages; i++) {
280 page_base = dma_addr[i]; 279 page_base = dma_addr[i];
281 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { 280 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
282 amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); 281 amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
283 page_base += AMDGPU_GPU_PAGE_SIZE; 282 page_base += AMDGPU_GPU_PAGE_SIZE;
284 } 283 }
285 } 284 }
@@ -317,7 +316,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
317 t = offset / AMDGPU_GPU_PAGE_SIZE; 316 t = offset / AMDGPU_GPU_PAGE_SIZE;
318 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 317 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
319 for (i = 0; i < pages; i++, p++) 318 for (i = 0; i < pages; i++, p++)
320 adev->gart.pages[p] = pagelist[i]; 319 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
321#endif 320#endif
322 321
323 if (!adev->gart.ptr) 322 if (!adev->gart.ptr)
@@ -329,7 +328,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
329 return r; 328 return r;
330 329
331 mb(); 330 mb();
332 amdgpu_gart_flush_gpu_tlb(adev, 0); 331 amdgpu_asic_flush_hdp(adev, NULL);
332 amdgpu_gmc_flush_gpu_tlb(adev, 0);
333 return 0; 333 return 0;
334} 334}
335 335
@@ -345,7 +345,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
345{ 345{
346 int r; 346 int r;
347 347
348 if (adev->dummy_page.page) 348 if (adev->dummy_page_addr)
349 return 0; 349 return 0;
350 350
351 /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */ 351 /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
@@ -357,8 +357,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
357 if (r) 357 if (r)
358 return r; 358 return r;
359 /* Compute table size */ 359 /* Compute table size */
360 adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE; 360 adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
361 adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE; 361 adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
362 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 362 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
363 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); 363 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
364 364
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index d4a43302c2be..456295c00291 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -31,7 +31,6 @@
31 */ 31 */
32struct amdgpu_device; 32struct amdgpu_device;
33struct amdgpu_bo; 33struct amdgpu_bo;
34struct amdgpu_gart_funcs;
35 34
36#define AMDGPU_GPU_PAGE_SIZE 4096 35#define AMDGPU_GPU_PAGE_SIZE 4096
37#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) 36#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
@@ -52,8 +51,6 @@ struct amdgpu_gart {
52 51
53 /* Asic default pte flags */ 52 /* Asic default pte flags */
54 uint64_t gart_pte_flags; 53 uint64_t gart_pte_flags;
55
56 const struct amdgpu_gart_funcs *gart_funcs;
57}; 54};
58 55
59int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); 56int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e48b4ec88c8c..46b9ea4e6103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37 37
38 if (robj) { 38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 amdgpu_mn_unregister(robj); 39 amdgpu_mn_unregister(robj);
42 amdgpu_bo_unref(&robj); 40 amdgpu_bo_unref(&robj);
43 } 41 }
@@ -45,7 +43,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
45 43
46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 44int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
47 int alignment, u32 initial_domain, 45 int alignment, u32 initial_domain,
48 u64 flags, bool kernel, 46 u64 flags, enum ttm_bo_type type,
49 struct reservation_object *resv, 47 struct reservation_object *resv,
50 struct drm_gem_object **obj) 48 struct drm_gem_object **obj)
51{ 49{
@@ -59,8 +57,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
59 } 57 }
60 58
61retry: 59retry:
62 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, 60 r = amdgpu_bo_create(adev, size, alignment, initial_domain,
63 flags, NULL, resv, 0, &bo); 61 flags, type, resv, &bo);
64 if (r) { 62 if (r) {
65 if (r != -ERESTARTSYS) { 63 if (r != -ERESTARTSYS) {
66 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 64 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
@@ -523,12 +521,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
523 goto error; 521 goto error;
524 522
525 if (operation == AMDGPU_VA_OP_MAP || 523 if (operation == AMDGPU_VA_OP_MAP ||
526 operation == AMDGPU_VA_OP_REPLACE) 524 operation == AMDGPU_VA_OP_REPLACE) {
527 r = amdgpu_vm_bo_update(adev, bo_va, false); 525 r = amdgpu_vm_bo_update(adev, bo_va, false);
526 if (r)
527 goto error;
528 }
528 529
529 r = amdgpu_vm_update_directories(adev, vm); 530 r = amdgpu_vm_update_directories(adev, vm);
530 if (r)
531 goto error;
532 531
533error: 532error:
534 if (r && r != -ERESTARTSYS) 533 if (r && r != -ERESTARTSYS)
@@ -634,7 +633,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
634 if (r) 633 if (r)
635 goto error_backoff; 634 goto error_backoff;
636 635
637 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); 636 va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
638 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 637 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
639 args->offset_in_bo, args->map_size, 638 args->offset_in_bo, args->map_size,
640 va_flags); 639 va_flags);
@@ -654,7 +653,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
654 if (r) 653 if (r)
655 goto error_backoff; 654 goto error_backoff;
656 655
657 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); 656 va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
658 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, 657 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
659 args->offset_in_bo, args->map_size, 658 args->offset_in_bo, args->map_size,
660 va_flags); 659 va_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
new file mode 100644
index 000000000000..893c2490b783
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26#ifndef __AMDGPU_GMC_H__
27#define __AMDGPU_GMC_H__
28
29#include <linux/types.h>
30
31#include "amdgpu_irq.h"
32
33struct firmware;
34
35/*
36 * VMHUB structures, functions & helpers
37 */
38struct amdgpu_vmhub {
39 uint32_t ctx0_ptb_addr_lo32;
40 uint32_t ctx0_ptb_addr_hi32;
41 uint32_t vm_inv_eng0_req;
42 uint32_t vm_inv_eng0_ack;
43 uint32_t vm_context0_cntl;
44 uint32_t vm_l2_pro_fault_status;
45 uint32_t vm_l2_pro_fault_cntl;
46};
47
48/*
49 * GPU MC structures, functions & helpers
50 */
51struct amdgpu_gmc_funcs {
52 /* flush the vm tlb via mmio */
53 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
54 uint32_t vmid);
55 /* flush the vm tlb via ring */
56 uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
57 uint64_t pd_addr);
58 /* Change the VMID -> PASID mapping */
59 void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
60 unsigned pasid);
61 /* write pte/pde updates using the cpu */
62 int (*set_pte_pde)(struct amdgpu_device *adev,
63 void *cpu_pt_addr, /* cpu addr of page table */
64 uint32_t gpu_page_idx, /* pte/pde to update */
65 uint64_t addr, /* addr to write into pte/pde */
66 uint64_t flags); /* access flags */
67 /* enable/disable PRT support */
68 void (*set_prt)(struct amdgpu_device *adev, bool enable);
69 /* set pte flags based per asic */
70 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
71 uint32_t flags);
72 /* get the pde for a given mc addr */
73 void (*get_vm_pde)(struct amdgpu_device *adev, int level,
74 u64 *dst, u64 *flags);
75};
76
77struct amdgpu_gmc {
78 resource_size_t aper_size;
79 resource_size_t aper_base;
80 /* for some chips with <= 32MB we need to lie
81 * about vram size near mc fb location */
82 u64 mc_vram_size;
83 u64 visible_vram_size;
84 u64 gart_size;
85 u64 gart_start;
86 u64 gart_end;
87 u64 vram_start;
88 u64 vram_end;
89 unsigned vram_width;
90 u64 real_vram_size;
91 int vram_mtrr;
92 u64 mc_mask;
93 const struct firmware *fw; /* MC firmware */
94 uint32_t fw_version;
95 struct amdgpu_irq_src vm_fault;
96 uint32_t vram_type;
97 uint32_t srbm_soft_reset;
98 bool prt_warning;
99 uint64_t stolen_size;
100 /* apertures */
101 u64 shared_aperture_start;
102 u64 shared_aperture_end;
103 u64 private_aperture_start;
104 u64 private_aperture_end;
105 /* protects concurrent invalidation */
106 spinlock_t invalidate_lock;
107 bool translate_further;
108
109 const struct amdgpu_gmc_funcs *gmc_funcs;
110};
111
112#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index e14ab34d8262..da7b1b92d9cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -56,7 +56,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
56 return -ENOMEM; 56 return -ENOMEM;
57 57
58 start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; 58 start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
59 size = (adev->mc.gart_size >> PAGE_SHIFT) - start; 59 size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
60 drm_mm_init(&mgr->mm, start, size); 60 drm_mm_init(&mgr->mm, start, size);
61 spin_lock_init(&mgr->lock); 61 spin_lock_init(&mgr->lock);
62 atomic64_set(&mgr->available, p_size); 62 atomic64_set(&mgr->available, p_size);
@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
75static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) 75static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
76{ 76{
77 struct amdgpu_gtt_mgr *mgr = man->priv; 77 struct amdgpu_gtt_mgr *mgr = man->priv;
78 78 spin_lock(&mgr->lock);
79 drm_mm_takedown(&mgr->mm); 79 drm_mm_takedown(&mgr->mm);
80 spin_unlock(&mgr->lock); 80 spin_unlock(&mgr->lock);
81 kfree(mgr); 81 kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index a162d87ca0c8..311589e02d17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -181,15 +181,18 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
181 } 181 }
182 } 182 }
183 183
184 if (ring->funcs->init_cond_exec) 184 if (job && ring->funcs->init_cond_exec)
185 patch_offset = amdgpu_ring_init_cond_exec(ring); 185 patch_offset = amdgpu_ring_init_cond_exec(ring);
186 186
187 if (ring->funcs->emit_hdp_flush
188#ifdef CONFIG_X86_64 187#ifdef CONFIG_X86_64
189 && !(adev->flags & AMD_IS_APU) 188 if (!(adev->flags & AMD_IS_APU))
190#endif 189#endif
191 ) 190 {
192 amdgpu_ring_emit_hdp_flush(ring); 191 if (ring->funcs->emit_hdp_flush)
192 amdgpu_ring_emit_hdp_flush(ring);
193 else
194 amdgpu_asic_flush_hdp(adev, ring);
195 }
193 196
194 skip_preamble = ring->current_ctx == fence_ctx; 197 skip_preamble = ring->current_ctx == fence_ctx;
195 need_ctx_switch = ring->current_ctx != fence_ctx; 198 need_ctx_switch = ring->current_ctx != fence_ctx;
@@ -219,12 +222,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
219 if (ring->funcs->emit_tmz) 222 if (ring->funcs->emit_tmz)
220 amdgpu_ring_emit_tmz(ring, false); 223 amdgpu_ring_emit_tmz(ring, false);
221 224
222 if (ring->funcs->emit_hdp_invalidate
223#ifdef CONFIG_X86_64 225#ifdef CONFIG_X86_64
224 && !(adev->flags & AMD_IS_APU) 226 if (!(adev->flags & AMD_IS_APU))
225#endif 227#endif
226 ) 228 amdgpu_asic_invalidate_hdp(adev, ring);
227 amdgpu_ring_emit_hdp_invalidate(ring);
228 229
229 r = amdgpu_fence_emit(ring, f); 230 r = amdgpu_fence_emit(ring, f);
230 if (r) { 231 if (r) {
@@ -278,11 +279,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
278 return r; 279 return r;
279 } 280 }
280 281
281 r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
282 if (r) {
283 return r;
284 }
285
286 adev->ib_pool_ready = true; 282 adev->ib_pool_ready = true;
287 if (amdgpu_debugfs_sa_init(adev)) { 283 if (amdgpu_debugfs_sa_init(adev)) {
288 dev_err(adev->dev, "failed to register debugfs file for SA\n"); 284 dev_err(adev->dev, "failed to register debugfs file for SA\n");
@@ -301,7 +297,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
301void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 297void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
302{ 298{
303 if (adev->ib_pool_ready) { 299 if (adev->ib_pool_ready) {
304 amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
305 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); 300 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
306 adev->ib_pool_ready = false; 301 adev->ib_pool_ready = false;
307 } 302 }
@@ -321,14 +316,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
321{ 316{
322 unsigned i; 317 unsigned i;
323 int r, ret = 0; 318 int r, ret = 0;
319 long tmo_gfx, tmo_mm;
320
321 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
322 if (amdgpu_sriov_vf(adev)) {
323 /* for MM engines in hypervisor side they are not scheduled together
324 * with CP and SDMA engines, so even in exclusive mode MM engine could
325 * still running on other VF thus the IB TEST TIMEOUT for MM engines
326 * under SR-IOV should be set to a long time. 8 sec should be enough
327 * for the MM comes back to this VF.
328 */
329 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
330 }
331
332 if (amdgpu_sriov_runtime(adev)) {
333 /* for CP & SDMA engines since they are scheduled together so
334 * need to make the timeout width enough to cover the time
335 * cost waiting for it coming back under RUNTIME only
336 */
337 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
338 }
324 339
325 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 340 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
326 struct amdgpu_ring *ring = adev->rings[i]; 341 struct amdgpu_ring *ring = adev->rings[i];
342 long tmo;
327 343
328 if (!ring || !ring->ready) 344 if (!ring || !ring->ready)
329 continue; 345 continue;
330 346
331 r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); 347 /* MM engine need more time */
348 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
349 ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
350 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
351 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
352 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
353 tmo = tmo_mm;
354 else
355 tmo = tmo_gfx;
356
357 r = amdgpu_ring_test_ib(ring, tmo);
332 if (r) { 358 if (r) {
333 ring->ready = false; 359 ring->ready = false;
334 360
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 16884a0b677b..a1c78f90eadf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -40,6 +40,12 @@
40 */ 40 */
41static DEFINE_IDA(amdgpu_pasid_ida); 41static DEFINE_IDA(amdgpu_pasid_ida);
42 42
43/* Helper to free pasid from a fence callback */
44struct amdgpu_pasid_cb {
45 struct dma_fence_cb cb;
46 unsigned int pasid;
47};
48
43/** 49/**
44 * amdgpu_pasid_alloc - Allocate a PASID 50 * amdgpu_pasid_alloc - Allocate a PASID
45 * @bits: Maximum width of the PASID in bits, must be at least 1 51 * @bits: Maximum width of the PASID in bits, must be at least 1
@@ -63,6 +69,9 @@ int amdgpu_pasid_alloc(unsigned int bits)
63 break; 69 break;
64 } 70 }
65 71
72 if (pasid >= 0)
73 trace_amdgpu_pasid_allocated(pasid);
74
66 return pasid; 75 return pasid;
67} 76}
68 77
@@ -72,9 +81,86 @@ int amdgpu_pasid_alloc(unsigned int bits)
72 */ 81 */
73void amdgpu_pasid_free(unsigned int pasid) 82void amdgpu_pasid_free(unsigned int pasid)
74{ 83{
84 trace_amdgpu_pasid_freed(pasid);
75 ida_simple_remove(&amdgpu_pasid_ida, pasid); 85 ida_simple_remove(&amdgpu_pasid_ida, pasid);
76} 86}
77 87
88static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89 struct dma_fence_cb *_cb)
90{
91 struct amdgpu_pasid_cb *cb =
92 container_of(_cb, struct amdgpu_pasid_cb, cb);
93
94 amdgpu_pasid_free(cb->pasid);
95 dma_fence_put(fence);
96 kfree(cb);
97}
98
99/**
100 * amdgpu_pasid_free_delayed - free pasid when fences signal
101 *
102 * @resv: reservation object with the fences to wait for
103 * @pasid: pasid to free
104 *
105 * Free the pasid only after all the fences in resv are signaled.
106 */
107void amdgpu_pasid_free_delayed(struct reservation_object *resv,
108 unsigned int pasid)
109{
110 struct dma_fence *fence, **fences;
111 struct amdgpu_pasid_cb *cb;
112 unsigned count;
113 int r;
114
115 r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
116 if (r)
117 goto fallback;
118
119 if (count == 0) {
120 amdgpu_pasid_free(pasid);
121 return;
122 }
123
124 if (count == 1) {
125 fence = fences[0];
126 kfree(fences);
127 } else {
128 uint64_t context = dma_fence_context_alloc(1);
129 struct dma_fence_array *array;
130
131 array = dma_fence_array_create(count, fences, context,
132 1, false);
133 if (!array) {
134 kfree(fences);
135 goto fallback;
136 }
137 fence = &array->base;
138 }
139
140 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
141 if (!cb) {
142 /* Last resort when we are OOM */
143 dma_fence_wait(fence, false);
144 dma_fence_put(fence);
145 amdgpu_pasid_free(pasid);
146 } else {
147 cb->pasid = pasid;
148 if (dma_fence_add_callback(fence, &cb->cb,
149 amdgpu_pasid_free_cb))
150 amdgpu_pasid_free_cb(fence, &cb->cb);
151 }
152
153 return;
154
155fallback:
156 /* Not enough memory for the delayed delete, as last resort
157 * block for all the fences to complete.
158 */
159 reservation_object_wait_timeout_rcu(resv, true, false,
160 MAX_SCHEDULE_TIMEOUT);
161 amdgpu_pasid_free(pasid);
162}
163
78/* 164/*
79 * VMID manager 165 * VMID manager
80 * 166 *
@@ -96,164 +182,185 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
96 atomic_read(&adev->gpu_reset_counter); 182 atomic_read(&adev->gpu_reset_counter);
97} 183}
98 184
99/* idr_mgr->lock must be held */
100static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
101 struct amdgpu_ring *ring,
102 struct amdgpu_sync *sync,
103 struct dma_fence *fence,
104 struct amdgpu_job *job)
105{
106 struct amdgpu_device *adev = ring->adev;
107 unsigned vmhub = ring->funcs->vmhub;
108 uint64_t fence_context = adev->fence_context + ring->idx;
109 struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
110 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
111 struct dma_fence *updates = sync->last_vm_update;
112 int r = 0;
113 struct dma_fence *flushed, *tmp;
114 bool needs_flush = vm->use_cpu_for_update;
115
116 flushed = id->flushed_updates;
117 if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
118 (atomic64_read(&id->owner) != vm->entity.fence_context) ||
119 (job->vm_pd_addr != id->pd_gpu_addr) ||
120 (updates && (!flushed || updates->context != flushed->context ||
121 dma_fence_is_later(updates, flushed))) ||
122 (!id->last_flush || (id->last_flush->context != fence_context &&
123 !dma_fence_is_signaled(id->last_flush)))) {
124 needs_flush = true;
125 /* to prevent one context starved by another context */
126 id->pd_gpu_addr = 0;
127 tmp = amdgpu_sync_peek_fence(&id->active, ring);
128 if (tmp) {
129 r = amdgpu_sync_fence(adev, sync, tmp, false);
130 return r;
131 }
132 }
133
134 /* Good we can use this VMID. Remember this submission as
135 * user of the VMID.
136 */
137 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
138 if (r)
139 goto out;
140
141 if (updates && (!flushed || updates->context != flushed->context ||
142 dma_fence_is_later(updates, flushed))) {
143 dma_fence_put(id->flushed_updates);
144 id->flushed_updates = dma_fence_get(updates);
145 }
146 id->pd_gpu_addr = job->vm_pd_addr;
147 atomic64_set(&id->owner, vm->entity.fence_context);
148 job->vm_needs_flush = needs_flush;
149 if (needs_flush) {
150 dma_fence_put(id->last_flush);
151 id->last_flush = NULL;
152 }
153 job->vmid = id - id_mgr->ids;
154 trace_amdgpu_vm_grab_id(vm, ring, job);
155out:
156 return r;
157}
158
159/** 185/**
160 * amdgpu_vm_grab_id - allocate the next free VMID 186 * amdgpu_vm_grab_idle - grab idle VMID
161 * 187 *
162 * @vm: vm to allocate id for 188 * @vm: vm to allocate id for
163 * @ring: ring we want to submit job to 189 * @ring: ring we want to submit job to
164 * @sync: sync object where we add dependencies 190 * @sync: sync object where we add dependencies
165 * @fence: fence protecting ID from reuse 191 * @idle: resulting idle VMID
166 * 192 *
167 * Allocate an id for the vm, adding fences to the sync obj as necessary. 193 * Try to find an idle VMID, if none is idle add a fence to wait to the sync
194 * object. Returns -ENOMEM when we are out of memory.
168 */ 195 */
169int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 196static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
170 struct amdgpu_sync *sync, struct dma_fence *fence, 197 struct amdgpu_ring *ring,
171 struct amdgpu_job *job) 198 struct amdgpu_sync *sync,
199 struct amdgpu_vmid **idle)
172{ 200{
173 struct amdgpu_device *adev = ring->adev; 201 struct amdgpu_device *adev = ring->adev;
174 unsigned vmhub = ring->funcs->vmhub; 202 unsigned vmhub = ring->funcs->vmhub;
175 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 203 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
176 uint64_t fence_context = adev->fence_context + ring->idx;
177 struct dma_fence *updates = sync->last_vm_update;
178 struct amdgpu_vmid *id, *idle;
179 struct dma_fence **fences; 204 struct dma_fence **fences;
180 unsigned i; 205 unsigned i;
181 int r = 0; 206 int r;
207
208 if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
209 return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
182 210
183 mutex_lock(&id_mgr->lock);
184 if (vm->reserved_vmid[vmhub]) {
185 r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
186 mutex_unlock(&id_mgr->lock);
187 return r;
188 }
189 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); 211 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
190 if (!fences) { 212 if (!fences)
191 mutex_unlock(&id_mgr->lock);
192 return -ENOMEM; 213 return -ENOMEM;
193 } 214
194 /* Check if we have an idle VMID */ 215 /* Check if we have an idle VMID */
195 i = 0; 216 i = 0;
196 list_for_each_entry(idle, &id_mgr->ids_lru, list) { 217 list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
197 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); 218 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
198 if (!fences[i]) 219 if (!fences[i])
199 break; 220 break;
200 ++i; 221 ++i;
201 } 222 }
202 223
203 /* If we can't find a idle VMID to use, wait till one becomes available */ 224 /* If we can't find a idle VMID to use, wait till one becomes available */
204 if (&idle->list == &id_mgr->ids_lru) { 225 if (&(*idle)->list == &id_mgr->ids_lru) {
205 u64 fence_context = adev->vm_manager.fence_context + ring->idx; 226 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
206 unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 227 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
207 struct dma_fence_array *array; 228 struct dma_fence_array *array;
208 unsigned j; 229 unsigned j;
209 230
231 *idle = NULL;
210 for (j = 0; j < i; ++j) 232 for (j = 0; j < i; ++j)
211 dma_fence_get(fences[j]); 233 dma_fence_get(fences[j]);
212 234
213 array = dma_fence_array_create(i, fences, fence_context, 235 array = dma_fence_array_create(i, fences, fence_context,
214 seqno, true); 236 seqno, true);
215 if (!array) { 237 if (!array) {
216 for (j = 0; j < i; ++j) 238 for (j = 0; j < i; ++j)
217 dma_fence_put(fences[j]); 239 dma_fence_put(fences[j]);
218 kfree(fences); 240 kfree(fences);
219 r = -ENOMEM; 241 return -ENOMEM;
220 goto error;
221 } 242 }
222 243
244 r = amdgpu_sync_fence(adev, sync, &array->base, false);
245 dma_fence_put(ring->vmid_wait);
246 ring->vmid_wait = &array->base;
247 return r;
248 }
249 kfree(fences);
223 250
224 r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); 251 return 0;
225 dma_fence_put(&array->base); 252}
226 if (r)
227 goto error;
228 253
229 mutex_unlock(&id_mgr->lock); 254/**
230 return 0; 255 * amdgpu_vm_grab_reserved - try to assign reserved VMID
256 *
257 * @vm: vm to allocate id for
258 * @ring: ring we want to submit job to
259 * @sync: sync object where we add dependencies
260 * @fence: fence protecting ID from reuse
261 * @job: job who wants to use the VMID
262 *
263 * Try to assign a reserved VMID.
264 */
265static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
266 struct amdgpu_ring *ring,
267 struct amdgpu_sync *sync,
268 struct dma_fence *fence,
269 struct amdgpu_job *job,
270 struct amdgpu_vmid **id)
271{
272 struct amdgpu_device *adev = ring->adev;
273 unsigned vmhub = ring->funcs->vmhub;
274 uint64_t fence_context = adev->fence_context + ring->idx;
275 struct dma_fence *updates = sync->last_vm_update;
276 bool needs_flush = vm->use_cpu_for_update;
277 int r = 0;
278
279 *id = vm->reserved_vmid[vmhub];
280 if (updates && (*id)->flushed_updates &&
281 updates->context == (*id)->flushed_updates->context &&
282 !dma_fence_is_later(updates, (*id)->flushed_updates))
283 updates = NULL;
284
285 if ((*id)->owner != vm->entity.fence_context ||
286 job->vm_pd_addr != (*id)->pd_gpu_addr ||
287 updates || !(*id)->last_flush ||
288 ((*id)->last_flush->context != fence_context &&
289 !dma_fence_is_signaled((*id)->last_flush))) {
290 struct dma_fence *tmp;
231 291
292 /* to prevent one context starved by another context */
293 (*id)->pd_gpu_addr = 0;
294 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
295 if (tmp) {
296 *id = NULL;
297 r = amdgpu_sync_fence(adev, sync, tmp, false);
298 return r;
299 }
300 needs_flush = true;
232 } 301 }
233 kfree(fences); 302
303 /* Good we can use this VMID. Remember this submission as
304 * user of the VMID.
305 */
306 r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
307 if (r)
308 return r;
309
310 if (updates) {
311 dma_fence_put((*id)->flushed_updates);
312 (*id)->flushed_updates = dma_fence_get(updates);
313 }
314 job->vm_needs_flush = needs_flush;
315 return 0;
316}
317
318/**
319 * amdgpu_vm_grab_used - try to reuse a VMID
320 *
321 * @vm: vm to allocate id for
322 * @ring: ring we want to submit job to
323 * @sync: sync object where we add dependencies
324 * @fence: fence protecting ID from reuse
325 * @job: job who wants to use the VMID
326 * @id: resulting VMID
327 *
328 * Try to reuse a VMID for this submission.
329 */
330static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
331 struct amdgpu_ring *ring,
332 struct amdgpu_sync *sync,
333 struct dma_fence *fence,
334 struct amdgpu_job *job,
335 struct amdgpu_vmid **id)
336{
337 struct amdgpu_device *adev = ring->adev;
338 unsigned vmhub = ring->funcs->vmhub;
339 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
340 uint64_t fence_context = adev->fence_context + ring->idx;
341 struct dma_fence *updates = sync->last_vm_update;
342 int r;
234 343
235 job->vm_needs_flush = vm->use_cpu_for_update; 344 job->vm_needs_flush = vm->use_cpu_for_update;
345
236 /* Check if we can use a VMID already assigned to this VM */ 346 /* Check if we can use a VMID already assigned to this VM */
237 list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { 347 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
238 struct dma_fence *flushed;
239 bool needs_flush = vm->use_cpu_for_update; 348 bool needs_flush = vm->use_cpu_for_update;
349 struct dma_fence *flushed;
240 350
241 /* Check all the prerequisites to using this VMID */ 351 /* Check all the prerequisites to using this VMID */
242 if (amdgpu_vmid_had_gpu_reset(adev, id)) 352 if ((*id)->owner != vm->entity.fence_context)
243 continue;
244
245 if (atomic64_read(&id->owner) != vm->entity.fence_context)
246 continue; 353 continue;
247 354
248 if (job->vm_pd_addr != id->pd_gpu_addr) 355 if ((*id)->pd_gpu_addr != job->vm_pd_addr)
249 continue; 356 continue;
250 357
251 if (!id->last_flush || 358 if (!(*id)->last_flush ||
252 (id->last_flush->context != fence_context && 359 ((*id)->last_flush->context != fence_context &&
253 !dma_fence_is_signaled(id->last_flush))) 360 !dma_fence_is_signaled((*id)->last_flush)))
254 needs_flush = true; 361 needs_flush = true;
255 362
256 flushed = id->flushed_updates; 363 flushed = (*id)->flushed_updates;
257 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) 364 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
258 needs_flush = true; 365 needs_flush = true;
259 366
@@ -261,47 +368,91 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
261 if (adev->asic_type < CHIP_VEGA10 && needs_flush) 368 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
262 continue; 369 continue;
263 370
264 /* Good we can use this VMID. Remember this submission as 371 /* Good, we can use this VMID. Remember this submission as
265 * user of the VMID. 372 * user of the VMID.
266 */ 373 */
267 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 374 r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
268 if (r) 375 if (r)
269 goto error; 376 return r;
270 377
271 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { 378 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
272 dma_fence_put(id->flushed_updates); 379 dma_fence_put((*id)->flushed_updates);
273 id->flushed_updates = dma_fence_get(updates); 380 (*id)->flushed_updates = dma_fence_get(updates);
274 } 381 }
275 382
276 if (needs_flush) 383 job->vm_needs_flush |= needs_flush;
277 goto needs_flush; 384 return 0;
278 else 385 }
279 goto no_flush_needed;
280 386
281 }; 387 *id = NULL;
388 return 0;
389}
282 390
283 /* Still no ID to use? Then use the idle one found earlier */ 391/**
284 id = idle; 392 * amdgpu_vm_grab_id - allocate the next free VMID
393 *
394 * @vm: vm to allocate id for
395 * @ring: ring we want to submit job to
396 * @sync: sync object where we add dependencies
397 * @fence: fence protecting ID from reuse
398 * @job: job who wants to use the VMID
399 *
400 * Allocate an id for the vm, adding fences to the sync obj as necessary.
401 */
402int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
403 struct amdgpu_sync *sync, struct dma_fence *fence,
404 struct amdgpu_job *job)
405{
406 struct amdgpu_device *adev = ring->adev;
407 unsigned vmhub = ring->funcs->vmhub;
408 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
409 struct amdgpu_vmid *idle = NULL;
410 struct amdgpu_vmid *id = NULL;
411 int r = 0;
285 412
286 /* Remember this submission as user of the VMID */ 413 mutex_lock(&id_mgr->lock);
287 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 414 r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
288 if (r) 415 if (r || !idle)
289 goto error; 416 goto error;
290 417
291 id->pd_gpu_addr = job->vm_pd_addr; 418 if (vm->reserved_vmid[vmhub]) {
292 dma_fence_put(id->flushed_updates); 419 r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
293 id->flushed_updates = dma_fence_get(updates); 420 if (r || !id)
294 atomic64_set(&id->owner, vm->entity.fence_context); 421 goto error;
422 } else {
423 r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
424 if (r)
425 goto error;
295 426
296needs_flush: 427 if (!id) {
297 job->vm_needs_flush = true; 428 struct dma_fence *updates = sync->last_vm_update;
298 dma_fence_put(id->last_flush);
299 id->last_flush = NULL;
300 429
301no_flush_needed: 430 /* Still no ID to use? Then use the idle one found earlier */
302 list_move_tail(&id->list, &id_mgr->ids_lru); 431 id = idle;
303 432
433 /* Remember this submission as user of the VMID */
434 r = amdgpu_sync_fence(ring->adev, &id->active,
435 fence, false);
436 if (r)
437 goto error;
438
439 dma_fence_put(id->flushed_updates);
440 id->flushed_updates = dma_fence_get(updates);
441 job->vm_needs_flush = true;
442 }
443
444 list_move_tail(&id->list, &id_mgr->ids_lru);
445 }
446
447 id->pd_gpu_addr = job->vm_pd_addr;
448 id->owner = vm->entity.fence_context;
449
450 if (job->vm_needs_flush) {
451 dma_fence_put(id->last_flush);
452 id->last_flush = NULL;
453 }
304 job->vmid = id - id_mgr->ids; 454 job->vmid = id - id_mgr->ids;
455 job->pasid = vm->pasid;
305 trace_amdgpu_vm_grab_id(vm, ring, job); 456 trace_amdgpu_vm_grab_id(vm, ring, job);
306 457
307error: 458error:
@@ -370,13 +521,15 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
370 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 521 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
371 struct amdgpu_vmid *id = &id_mgr->ids[vmid]; 522 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
372 523
373 atomic64_set(&id->owner, 0); 524 mutex_lock(&id_mgr->lock);
525 id->owner = 0;
374 id->gds_base = 0; 526 id->gds_base = 0;
375 id->gds_size = 0; 527 id->gds_size = 0;
376 id->gws_base = 0; 528 id->gws_base = 0;
377 id->gws_size = 0; 529 id->gws_size = 0;
378 id->oa_base = 0; 530 id->oa_base = 0;
379 id->oa_size = 0; 531 id->oa_size = 0;
532 mutex_unlock(&id_mgr->lock);
380} 533}
381 534
382/** 535/**
@@ -454,6 +607,7 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
454 amdgpu_sync_free(&id->active); 607 amdgpu_sync_free(&id->active);
455 dma_fence_put(id->flushed_updates); 608 dma_fence_put(id->flushed_updates);
456 dma_fence_put(id->last_flush); 609 dma_fence_put(id->last_flush);
610 dma_fence_put(id->pasid_mapping);
457 } 611 }
458 } 612 }
459} 613}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index ad931fa570b3..7625419f0fc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -43,7 +43,7 @@ struct amdgpu_vmid {
43 struct list_head list; 43 struct list_head list;
44 struct amdgpu_sync active; 44 struct amdgpu_sync active;
45 struct dma_fence *last_flush; 45 struct dma_fence *last_flush;
46 atomic64_t owner; 46 uint64_t owner;
47 47
48 uint64_t pd_gpu_addr; 48 uint64_t pd_gpu_addr;
49 /* last flushed PD/PT update */ 49 /* last flushed PD/PT update */
@@ -57,6 +57,9 @@ struct amdgpu_vmid {
57 uint32_t gws_size; 57 uint32_t gws_size;
58 uint32_t oa_base; 58 uint32_t oa_base;
59 uint32_t oa_size; 59 uint32_t oa_size;
60
61 unsigned pasid;
62 struct dma_fence *pasid_mapping;
60}; 63};
61 64
62struct amdgpu_vmid_mgr { 65struct amdgpu_vmid_mgr {
@@ -69,6 +72,8 @@ struct amdgpu_vmid_mgr {
69 72
70int amdgpu_pasid_alloc(unsigned int bits); 73int amdgpu_pasid_alloc(unsigned int bits);
71void amdgpu_pasid_free(unsigned int pasid); 74void amdgpu_pasid_free(unsigned int pasid);
75void amdgpu_pasid_free_delayed(struct reservation_object *resv,
76 unsigned int pasid);
72 77
73bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 78bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
74 struct amdgpu_vmid *id); 79 struct amdgpu_vmid *id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 29cf10927a92..0e01f115bbe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -25,51 +25,12 @@
25#define __AMDGPU_IH_H__ 25#define __AMDGPU_IH_H__
26 26
27#include <linux/chash.h> 27#include <linux/chash.h>
28#include "soc15_ih_clientid.h"
28 29
29struct amdgpu_device; 30struct amdgpu_device;
30 /*
31 * vega10+ IH clients
32 */
33enum amdgpu_ih_clientid
34{
35 AMDGPU_IH_CLIENTID_IH = 0x00,
36 AMDGPU_IH_CLIENTID_ACP = 0x01,
37 AMDGPU_IH_CLIENTID_ATHUB = 0x02,
38 AMDGPU_IH_CLIENTID_BIF = 0x03,
39 AMDGPU_IH_CLIENTID_DCE = 0x04,
40 AMDGPU_IH_CLIENTID_ISP = 0x05,
41 AMDGPU_IH_CLIENTID_PCIE0 = 0x06,
42 AMDGPU_IH_CLIENTID_RLC = 0x07,
43 AMDGPU_IH_CLIENTID_SDMA0 = 0x08,
44 AMDGPU_IH_CLIENTID_SDMA1 = 0x09,
45 AMDGPU_IH_CLIENTID_SE0SH = 0x0a,
46 AMDGPU_IH_CLIENTID_SE1SH = 0x0b,
47 AMDGPU_IH_CLIENTID_SE2SH = 0x0c,
48 AMDGPU_IH_CLIENTID_SE3SH = 0x0d,
49 AMDGPU_IH_CLIENTID_SYSHUB = 0x0e,
50 AMDGPU_IH_CLIENTID_THM = 0x0f,
51 AMDGPU_IH_CLIENTID_UVD = 0x10,
52 AMDGPU_IH_CLIENTID_VCE0 = 0x11,
53 AMDGPU_IH_CLIENTID_VMC = 0x12,
54 AMDGPU_IH_CLIENTID_XDMA = 0x13,
55 AMDGPU_IH_CLIENTID_GRBM_CP = 0x14,
56 AMDGPU_IH_CLIENTID_ATS = 0x15,
57 AMDGPU_IH_CLIENTID_ROM_SMUIO = 0x16,
58 AMDGPU_IH_CLIENTID_DF = 0x17,
59 AMDGPU_IH_CLIENTID_VCE1 = 0x18,
60 AMDGPU_IH_CLIENTID_PWR = 0x19,
61 AMDGPU_IH_CLIENTID_UTCL2 = 0x1b,
62 AMDGPU_IH_CLIENTID_EA = 0x1c,
63 AMDGPU_IH_CLIENTID_UTCL2LOG = 0x1d,
64 AMDGPU_IH_CLIENTID_MP0 = 0x1e,
65 AMDGPU_IH_CLIENTID_MP1 = 0x1f,
66
67 AMDGPU_IH_CLIENTID_MAX,
68
69 AMDGPU_IH_CLIENTID_VCN = AMDGPU_IH_CLIENTID_UVD
70};
71 31
72#define AMDGPU_IH_CLIENTID_LEGACY 0 32#define AMDGPU_IH_CLIENTID_LEGACY 0
33#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
73 34
74#define AMDGPU_PAGEFAULT_HASH_BITS 8 35#define AMDGPU_PAGEFAULT_HASH_BITS 8
75struct amdgpu_retryfault_hashtable { 36struct amdgpu_retryfault_hashtable {
@@ -109,7 +70,7 @@ struct amdgpu_iv_entry {
109 unsigned vmid_src; 70 unsigned vmid_src;
110 uint64_t timestamp; 71 uint64_t timestamp;
111 unsigned timestamp_src; 72 unsigned timestamp_src;
112 unsigned pas_id; 73 unsigned pasid;
113 unsigned pasid_src; 74 unsigned pasid_src;
114 unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW]; 75 unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
115 const uint32_t *iv_entry; 76 const uint32_t *iv_entry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 56bcd59c3399..11dfe57bd8bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -92,7 +92,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
92} 92}
93 93
94/* Disable *all* interrupts */ 94/* Disable *all* interrupts */
95static void amdgpu_irq_disable_all(struct amdgpu_device *adev) 95void amdgpu_irq_disable_all(struct amdgpu_device *adev)
96{ 96{
97 unsigned long irqflags; 97 unsigned long irqflags;
98 unsigned i, j, k; 98 unsigned i, j, k;
@@ -123,55 +123,6 @@ static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
123} 123}
124 124
125/** 125/**
126 * amdgpu_irq_preinstall - drm irq preinstall callback
127 *
128 * @dev: drm dev pointer
129 *
130 * Gets the hw ready to enable irqs (all asics).
131 * This function disables all interrupt sources on the GPU.
132 */
133void amdgpu_irq_preinstall(struct drm_device *dev)
134{
135 struct amdgpu_device *adev = dev->dev_private;
136
137 /* Disable *all* interrupts */
138 amdgpu_irq_disable_all(adev);
139 /* Clear bits */
140 amdgpu_ih_process(adev);
141}
142
143/**
144 * amdgpu_irq_postinstall - drm irq preinstall callback
145 *
146 * @dev: drm dev pointer
147 *
148 * Handles stuff to be done after enabling irqs (all asics).
149 * Returns 0 on success.
150 */
151int amdgpu_irq_postinstall(struct drm_device *dev)
152{
153 dev->max_vblank_count = 0x00ffffff;
154 return 0;
155}
156
157/**
158 * amdgpu_irq_uninstall - drm irq uninstall callback
159 *
160 * @dev: drm dev pointer
161 *
162 * This function disables all interrupt sources on the GPU (all asics).
163 */
164void amdgpu_irq_uninstall(struct drm_device *dev)
165{
166 struct amdgpu_device *adev = dev->dev_private;
167
168 if (adev == NULL) {
169 return;
170 }
171 amdgpu_irq_disable_all(adev);
172}
173
174/**
175 * amdgpu_irq_handler - irq handler 126 * amdgpu_irq_handler - irq handler
176 * 127 *
177 * @int irq, void *arg: args 128 * @int irq, void *arg: args
@@ -257,10 +208,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
257 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); 208 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
258 if (r) { 209 if (r) {
259 adev->irq.installed = false; 210 adev->irq.installed = false;
260 flush_work(&adev->hotplug_work); 211 if (!amdgpu_device_has_dc_support(adev))
212 flush_work(&adev->hotplug_work);
261 cancel_work_sync(&adev->reset_work); 213 cancel_work_sync(&adev->reset_work);
262 return r; 214 return r;
263 } 215 }
216 adev->ddev->max_vblank_count = 0x00ffffff;
264 217
265 DRM_DEBUG("amdgpu: irq initialized.\n"); 218 DRM_DEBUG("amdgpu: irq initialized.\n");
266 return 0; 219 return 0;
@@ -282,7 +235,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
282 adev->irq.installed = false; 235 adev->irq.installed = false;
283 if (adev->irq.msi_enabled) 236 if (adev->irq.msi_enabled)
284 pci_disable_msi(adev->pdev); 237 pci_disable_msi(adev->pdev);
285 flush_work(&adev->hotplug_work); 238 if (!amdgpu_device_has_dc_support(adev))
239 flush_work(&adev->hotplug_work);
286 cancel_work_sync(&adev->reset_work); 240 cancel_work_sync(&adev->reset_work);
287 } 241 }
288 242
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 0610cc4a9788..3375ad778edc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -78,9 +78,7 @@ struct amdgpu_irq {
78 uint32_t srbm_soft_reset; 78 uint32_t srbm_soft_reset;
79}; 79};
80 80
81void amdgpu_irq_preinstall(struct drm_device *dev); 81void amdgpu_irq_disable_all(struct amdgpu_device *adev);
82int amdgpu_irq_postinstall(struct drm_device *dev);
83void amdgpu_irq_uninstall(struct drm_device *dev);
84irqreturn_t amdgpu_irq_handler(int irq, void *arg); 82irqreturn_t amdgpu_irq_handler(int irq, void *arg);
85 83
86int amdgpu_irq_init(struct amdgpu_device *adev); 84int amdgpu_irq_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bd6e9a40f421..e851c66cbb5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -191,7 +191,7 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
191 fw_info->feature = 0; 191 fw_info->feature = 0;
192 break; 192 break;
193 case AMDGPU_INFO_FW_GMC: 193 case AMDGPU_INFO_FW_GMC:
194 fw_info->ver = adev->mc.fw_version; 194 fw_info->ver = adev->gmc.fw_version;
195 fw_info->feature = 0; 195 fw_info->feature = 0;
196 break; 196 break;
197 case AMDGPU_INFO_FW_GFX_ME: 197 case AMDGPU_INFO_FW_GFX_ME:
@@ -470,9 +470,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
470 case AMDGPU_INFO_VRAM_GTT: { 470 case AMDGPU_INFO_VRAM_GTT: {
471 struct drm_amdgpu_info_vram_gtt vram_gtt; 471 struct drm_amdgpu_info_vram_gtt vram_gtt;
472 472
473 vram_gtt.vram_size = adev->mc.real_vram_size; 473 vram_gtt.vram_size = adev->gmc.real_vram_size;
474 vram_gtt.vram_size -= adev->vram_pin_size; 474 vram_gtt.vram_size -= adev->vram_pin_size;
475 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; 475 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
476 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); 476 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
477 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; 477 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
478 vram_gtt.gtt_size *= PAGE_SIZE; 478 vram_gtt.gtt_size *= PAGE_SIZE;
@@ -484,17 +484,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
484 struct drm_amdgpu_memory_info mem; 484 struct drm_amdgpu_memory_info mem;
485 485
486 memset(&mem, 0, sizeof(mem)); 486 memset(&mem, 0, sizeof(mem));
487 mem.vram.total_heap_size = adev->mc.real_vram_size; 487 mem.vram.total_heap_size = adev->gmc.real_vram_size;
488 mem.vram.usable_heap_size = 488 mem.vram.usable_heap_size =
489 adev->mc.real_vram_size - adev->vram_pin_size; 489 adev->gmc.real_vram_size - adev->vram_pin_size;
490 mem.vram.heap_usage = 490 mem.vram.heap_usage =
491 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 491 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
492 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 492 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
493 493
494 mem.cpu_accessible_vram.total_heap_size = 494 mem.cpu_accessible_vram.total_heap_size =
495 adev->mc.visible_vram_size; 495 adev->gmc.visible_vram_size;
496 mem.cpu_accessible_vram.usable_heap_size = 496 mem.cpu_accessible_vram.usable_heap_size =
497 adev->mc.visible_vram_size - 497 adev->gmc.visible_vram_size -
498 (adev->vram_pin_size - adev->invisible_pin_size); 498 (adev->vram_pin_size - adev->invisible_pin_size);
499 mem.cpu_accessible_vram.heap_usage = 499 mem.cpu_accessible_vram.heap_usage =
500 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 500 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -580,11 +580,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
580 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 580 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
581 581
582 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 582 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
583 vm_size -= AMDGPU_VA_RESERVED_SIZE;
584
585 /* Older VCE FW versions are buggy and can handle only 40bits */
586 if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
587 vm_size = min(vm_size, 1ULL << 40);
588
583 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 589 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
584 dev_info.virtual_address_max = 590 dev_info.virtual_address_max =
585 min(vm_size, AMDGPU_VA_HOLE_START); 591 min(vm_size, AMDGPU_VA_HOLE_START);
586 592
587 vm_size -= AMDGPU_VA_RESERVED_SIZE;
588 if (vm_size > AMDGPU_VA_HOLE_START) { 593 if (vm_size > AMDGPU_VA_HOLE_START) {
589 dev_info.high_va_offset = AMDGPU_VA_HOLE_END; 594 dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
590 dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size; 595 dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
@@ -599,8 +604,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
599 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 604 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
600 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 605 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
601 sizeof(adev->gfx.cu_info.bitmap)); 606 sizeof(adev->gfx.cu_info.bitmap));
602 dev_info.vram_type = adev->mc.vram_type; 607 dev_info.vram_type = adev->gmc.vram_type;
603 dev_info.vram_bit_width = adev->mc.vram_width; 608 dev_info.vram_bit_width = adev->gmc.vram_width;
604 dev_info.vce_harvest_config = adev->vce.harvest_config; 609 dev_info.vce_harvest_config = adev->vce.harvest_config;
605 dev_info.gc_double_offchip_lds_buf = 610 dev_info.gc_double_offchip_lds_buf =
606 adev->gfx.config.double_offchip_lds_buf; 611 adev->gfx.config.double_offchip_lds_buf;
@@ -758,6 +763,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
758 return -EINVAL; 763 return -EINVAL;
759 } 764 }
760 break; 765 break;
766 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
767 /* get stable pstate sclk in Mhz */
768 if (amdgpu_dpm_read_sensor(adev,
769 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
770 (void *)&ui32, &ui32_size)) {
771 return -EINVAL;
772 }
773 ui32 /= 100;
774 break;
775 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
776 /* get stable pstate mclk in Mhz */
777 if (amdgpu_dpm_read_sensor(adev,
778 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
779 (void *)&ui32, &ui32_size)) {
780 return -EINVAL;
781 }
782 ui32 /= 100;
783 break;
761 default: 784 default:
762 DRM_DEBUG_KMS("Invalid request %d\n", 785 DRM_DEBUG_KMS("Invalid request %d\n",
763 info->sensor_info.type); 786 info->sensor_info.type);
@@ -805,7 +828,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
805{ 828{
806 struct amdgpu_device *adev = dev->dev_private; 829 struct amdgpu_device *adev = dev->dev_private;
807 struct amdgpu_fpriv *fpriv; 830 struct amdgpu_fpriv *fpriv;
808 int r; 831 int r, pasid;
809 832
810 file_priv->driver_priv = NULL; 833 file_priv->driver_priv = NULL;
811 834
@@ -819,28 +842,25 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
819 goto out_suspend; 842 goto out_suspend;
820 } 843 }
821 844
822 r = amdgpu_vm_init(adev, &fpriv->vm, 845 pasid = amdgpu_pasid_alloc(16);
823 AMDGPU_VM_CONTEXT_GFX, 0); 846 if (pasid < 0) {
824 if (r) { 847 dev_warn(adev->dev, "No more PASIDs available!");
825 kfree(fpriv); 848 pasid = 0;
826 goto out_suspend;
827 } 849 }
850 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
851 if (r)
852 goto error_pasid;
828 853
829 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 854 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
830 if (!fpriv->prt_va) { 855 if (!fpriv->prt_va) {
831 r = -ENOMEM; 856 r = -ENOMEM;
832 amdgpu_vm_fini(adev, &fpriv->vm); 857 goto error_vm;
833 kfree(fpriv);
834 goto out_suspend;
835 } 858 }
836 859
837 if (amdgpu_sriov_vf(adev)) { 860 if (amdgpu_sriov_vf(adev)) {
838 r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); 861 r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
839 if (r) { 862 if (r)
840 amdgpu_vm_fini(adev, &fpriv->vm); 863 goto error_vm;
841 kfree(fpriv);
842 goto out_suspend;
843 }
844 } 864 }
845 865
846 mutex_init(&fpriv->bo_list_lock); 866 mutex_init(&fpriv->bo_list_lock);
@@ -849,6 +869,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
849 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 869 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
850 870
851 file_priv->driver_priv = fpriv; 871 file_priv->driver_priv = fpriv;
872 goto out_suspend;
873
874error_vm:
875 amdgpu_vm_fini(adev, &fpriv->vm);
876
877error_pasid:
878 if (pasid)
879 amdgpu_pasid_free(pasid);
880
881 kfree(fpriv);
852 882
853out_suspend: 883out_suspend:
854 pm_runtime_mark_last_busy(dev->dev); 884 pm_runtime_mark_last_busy(dev->dev);
@@ -871,6 +901,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
871 struct amdgpu_device *adev = dev->dev_private; 901 struct amdgpu_device *adev = dev->dev_private;
872 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 902 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
873 struct amdgpu_bo_list *list; 903 struct amdgpu_bo_list *list;
904 struct amdgpu_bo *pd;
905 unsigned int pasid;
874 int handle; 906 int handle;
875 907
876 if (!fpriv) 908 if (!fpriv)
@@ -895,7 +927,13 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
895 amdgpu_bo_unreserve(adev->virt.csa_obj); 927 amdgpu_bo_unreserve(adev->virt.csa_obj);
896 } 928 }
897 929
930 pasid = fpriv->vm.pasid;
931 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
932
898 amdgpu_vm_fini(adev, &fpriv->vm); 933 amdgpu_vm_fini(adev, &fpriv->vm);
934 if (pasid)
935 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
936 amdgpu_bo_unref(&pd);
899 937
900 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 938 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
901 amdgpu_bo_list_free(list); 939 amdgpu_bo_list_free(list);
@@ -947,11 +985,11 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
947 */ 985 */
948 do { 986 do {
949 count = amdgpu_display_vblank_get_counter(adev, pipe); 987 count = amdgpu_display_vblank_get_counter(adev, pipe);
950 /* Ask amdgpu_get_crtc_scanoutpos to return vpos as 988 /* Ask amdgpu_display_get_crtc_scanoutpos to return
951 * distance to start of vblank, instead of regular 989 * vpos as distance to start of vblank, instead of
952 * vertical scanout pos. 990 * regular vertical scanout pos.
953 */ 991 */
954 stat = amdgpu_get_crtc_scanoutpos( 992 stat = amdgpu_display_get_crtc_scanoutpos(
955 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 993 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
956 &vpos, &hpos, NULL, NULL, 994 &vpos, &hpos, NULL, NULL,
957 &adev->mode_info.crtcs[pipe]->base.hwmode); 995 &adev->mode_info.crtcs[pipe]->base.hwmode);
@@ -992,7 +1030,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
992int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1030int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
993{ 1031{
994 struct amdgpu_device *adev = dev->dev_private; 1032 struct amdgpu_device *adev = dev->dev_private;
995 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe); 1033 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
996 1034
997 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1035 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
998} 1036}
@@ -1008,7 +1046,7 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1008void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1046void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1009{ 1047{
1010 struct amdgpu_device *adev = dev->dev_private; 1048 struct amdgpu_device *adev = dev->dev_private;
1011 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe); 1049 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1012 1050
1013 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1051 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1014} 1052}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 54f06c959340..d6416ee52e32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -267,8 +267,6 @@ struct amdgpu_display_funcs {
267 void (*bandwidth_update)(struct amdgpu_device *adev); 267 void (*bandwidth_update)(struct amdgpu_device *adev);
268 /* get frame count */ 268 /* get frame count */
269 u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc); 269 u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
270 /* wait for vblank */
271 void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
272 /* set backlight level */ 270 /* set backlight level */
273 void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder, 271 void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
274 u8 level); 272 u8 level);
@@ -352,6 +350,7 @@ struct amdgpu_mode_info {
352 u16 firmware_flags; 350 u16 firmware_flags;
353 /* pointer to backlight encoder */ 351 /* pointer to backlight encoder */
354 struct amdgpu_encoder *bl_encoder; 352 struct amdgpu_encoder *bl_encoder;
353 u8 bl_level; /* saved backlight level */
355 struct amdgpu_audio audio; /* audio stuff */ 354 struct amdgpu_audio audio; /* audio stuff */
356 int num_crtc; /* number of crtcs */ 355 int num_crtc; /* number of crtcs */
357 int num_hpd; /* number of hpd pins */ 356 int num_hpd; /* number of hpd pins */
@@ -552,14 +551,6 @@ struct amdgpu_connector {
552 /* we need to mind the EDID between detect 551 /* we need to mind the EDID between detect
553 and get modes due to analog/digital/tvencoder */ 552 and get modes due to analog/digital/tvencoder */
554 struct edid *edid; 553 struct edid *edid;
555 /* number of modes generated from EDID at 'dc_sink' */
556 int num_modes;
557 /* The 'old' sink - before an HPD.
558 * The 'current' sink is in dc_link->sink. */
559 struct dc_sink *dc_sink;
560 struct dc_link *dc_link;
561 struct dc_sink *dc_em_sink;
562 const struct dc_stream *stream;
563 void *con_priv; 554 void *con_priv;
564 bool dac_load_detect; 555 bool dac_load_detect;
565 bool detected_by_load; /* if the connection status was determined by load */ 556 bool detected_by_load; /* if the connection status was determined by load */
@@ -570,27 +561,6 @@ struct amdgpu_connector {
570 enum amdgpu_connector_audio audio; 561 enum amdgpu_connector_audio audio;
571 enum amdgpu_connector_dither dither; 562 enum amdgpu_connector_dither dither;
572 unsigned pixelclock_for_modeset; 563 unsigned pixelclock_for_modeset;
573
574 struct drm_dp_mst_topology_mgr mst_mgr;
575 struct amdgpu_dm_dp_aux dm_dp_aux;
576 struct drm_dp_mst_port *port;
577 struct amdgpu_connector *mst_port;
578 struct amdgpu_encoder *mst_encoder;
579 struct semaphore mst_sem;
580
581 /* TODO see if we can merge with ddc_bus or make a dm_connector */
582 struct amdgpu_i2c_adapter *i2c;
583
584 /* Monitor range limits */
585 int min_vfreq ;
586 int max_vfreq ;
587 int pixel_clock_mhz;
588
589 /*freesync caps*/
590 struct mod_freesync_caps caps;
591
592 struct mutex hpd_lock;
593
594}; 564};
595 565
596/* TODO: start to use this struct and remove same field from base one */ 566/* TODO: start to use this struct and remove same field from base one */
@@ -608,7 +578,7 @@ struct amdgpu_mst_connector {
608#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ 578#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
609 ((em) == ATOM_ENCODER_MODE_DP_MST)) 579 ((em) == ATOM_ENCODER_MODE_DP_MST))
610 580
611/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ 581/* Driver internal use only flags of amdgpu_display_get_crtc_scanoutpos() */
612#define DRM_SCANOUTPOS_VALID (1 << 0) 582#define DRM_SCANOUTPOS_VALID (1 << 0)
613#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1) 583#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
614#define DRM_SCANOUTPOS_ACCURATE (1 << 2) 584#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
@@ -627,30 +597,31 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
627u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); 597u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
628struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder); 598struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
629 599
630bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux); 600bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
601 bool use_aux);
631 602
632void amdgpu_encoder_set_active_device(struct drm_encoder *encoder); 603void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
633 604
634int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 605int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
635 unsigned int flags, int *vpos, int *hpos, 606 unsigned int pipe, unsigned int flags, int *vpos,
636 ktime_t *stime, ktime_t *etime, 607 int *hpos, ktime_t *stime, ktime_t *etime,
637 const struct drm_display_mode *mode); 608 const struct drm_display_mode *mode);
638 609
639int amdgpu_framebuffer_init(struct drm_device *dev, 610int amdgpu_display_framebuffer_init(struct drm_device *dev,
640 struct amdgpu_framebuffer *rfb, 611 struct amdgpu_framebuffer *rfb,
641 const struct drm_mode_fb_cmd2 *mode_cmd, 612 const struct drm_mode_fb_cmd2 *mode_cmd,
642 struct drm_gem_object *obj); 613 struct drm_gem_object *obj);
643 614
644int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb); 615int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
645 616
646void amdgpu_enc_destroy(struct drm_encoder *encoder); 617void amdgpu_enc_destroy(struct drm_encoder *encoder);
647void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 618void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
648bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 619bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
649 const struct drm_display_mode *mode, 620 const struct drm_display_mode *mode,
650 struct drm_display_mode *adjusted_mode); 621 struct drm_display_mode *adjusted_mode);
651void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, 622void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
652 struct drm_display_mode *adjusted_mode); 623 struct drm_display_mode *adjusted_mode);
653int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc); 624int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
654 625
655/* fbdev layer */ 626/* fbdev layer */
656int amdgpu_fbdev_init(struct amdgpu_device *adev); 627int amdgpu_fbdev_init(struct amdgpu_device *adev);
@@ -662,15 +633,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
662int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled); 633int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
663 634
664/* amdgpu_display.c */ 635/* amdgpu_display.c */
665void amdgpu_print_display_setup(struct drm_device *dev); 636void amdgpu_display_print_display_setup(struct drm_device *dev);
666int amdgpu_modeset_create_props(struct amdgpu_device *adev); 637int amdgpu_display_modeset_create_props(struct amdgpu_device *adev);
667int amdgpu_crtc_set_config(struct drm_mode_set *set, 638int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
668 struct drm_modeset_acquire_ctx *ctx); 639 struct drm_modeset_acquire_ctx *ctx);
669int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, 640int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
670 struct drm_framebuffer *fb, 641 struct drm_framebuffer *fb,
671 struct drm_pending_vblank_event *event, 642 struct drm_pending_vblank_event *event,
672 uint32_t page_flip_flags, uint32_t target, 643 uint32_t page_flip_flags, uint32_t target,
673 struct drm_modeset_acquire_ctx *ctx); 644 struct drm_modeset_acquire_ctx *ctx);
674extern const struct drm_mode_config_funcs amdgpu_mode_funcs; 645extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
675 646
676#endif 647#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5c4c3e0d527b..6d08cde8443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -36,6 +36,7 @@
36#include <drm/drm_cache.h> 36#include <drm/drm_cache.h>
37#include "amdgpu.h" 37#include "amdgpu.h"
38#include "amdgpu_trace.h" 38#include "amdgpu_trace.h"
39#include "amdgpu_amdkfd.h"
39 40
40static bool amdgpu_need_backup(struct amdgpu_device *adev) 41static bool amdgpu_need_backup(struct amdgpu_device *adev)
41{ 42{
@@ -54,8 +55,13 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
54 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 55 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
55 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 56 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
56 57
58 if (bo->kfd_bo)
59 amdgpu_amdkfd_unreserve_system_memory_limit(bo);
60
57 amdgpu_bo_kunmap(bo); 61 amdgpu_bo_kunmap(bo);
58 62
63 if (bo->gem_base.import_attach)
64 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
59 drm_gem_object_release(&bo->gem_base); 65 drm_gem_object_release(&bo->gem_base);
60 amdgpu_bo_unref(&bo->parent); 66 amdgpu_bo_unref(&bo->parent);
61 if (!list_empty(&bo->shadow_list)) { 67 if (!list_empty(&bo->shadow_list)) {
@@ -83,7 +89,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
83 u32 c = 0; 89 u32 c = 0;
84 90
85 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 91 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
86 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 92 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
87 93
88 places[c].fpfn = 0; 94 places[c].fpfn = 0;
89 places[c].lpfn = 0; 95 places[c].lpfn = 0;
@@ -103,7 +109,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
103 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 109 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
104 places[c].fpfn = 0; 110 places[c].fpfn = 0;
105 if (flags & AMDGPU_GEM_CREATE_SHADOW) 111 if (flags & AMDGPU_GEM_CREATE_SHADOW)
106 places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; 112 places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
107 else 113 else
108 places[c].lpfn = 0; 114 places[c].lpfn = 0;
109 places[c].flags = TTM_PL_FLAG_TT; 115 places[c].flags = TTM_PL_FLAG_TT;
@@ -169,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
169 * @size: size for the new BO 175 * @size: size for the new BO
170 * @align: alignment for the new BO 176 * @align: alignment for the new BO
171 * @domain: where to place it 177 * @domain: where to place it
172 * @bo_ptr: resulting BO 178 * @bo_ptr: used to initialize BOs in structures
173 * @gpu_addr: GPU addr of the pinned BO 179 * @gpu_addr: GPU addr of the pinned BO
174 * @cpu_addr: optional CPU address mapping 180 * @cpu_addr: optional CPU address mapping
175 * 181 *
176 * Allocates and pins a BO for kernel internal use, and returns it still 182 * Allocates and pins a BO for kernel internal use, and returns it still
177 * reserved. 183 * reserved.
178 * 184 *
185 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
186 *
179 * Returns 0 on success, negative error code otherwise. 187 * Returns 0 on success, negative error code otherwise.
180 */ 188 */
181int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 189int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
@@ -187,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
187 int r; 195 int r;
188 196
189 if (!*bo_ptr) { 197 if (!*bo_ptr) {
190 r = amdgpu_bo_create(adev, size, align, true, domain, 198 r = amdgpu_bo_create(adev, size, align, domain,
191 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 199 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
192 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 200 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
193 NULL, NULL, 0, bo_ptr); 201 ttm_bo_type_kernel, NULL, bo_ptr);
194 if (r) { 202 if (r) {
195 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 203 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
196 r); 204 r);
@@ -238,12 +246,14 @@ error_free:
238 * @size: size for the new BO 246 * @size: size for the new BO
239 * @align: alignment for the new BO 247 * @align: alignment for the new BO
240 * @domain: where to place it 248 * @domain: where to place it
241 * @bo_ptr: resulting BO 249 * @bo_ptr: used to initialize BOs in structures
242 * @gpu_addr: GPU addr of the pinned BO 250 * @gpu_addr: GPU addr of the pinned BO
243 * @cpu_addr: optional CPU address mapping 251 * @cpu_addr: optional CPU address mapping
244 * 252 *
245 * Allocates and pins a BO for kernel internal use. 253 * Allocates and pins a BO for kernel internal use.
246 * 254 *
255 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
256 *
247 * Returns 0 on success, negative error code otherwise. 257 * Returns 0 on success, negative error code otherwise.
248 */ 258 */
249int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 259int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
@@ -331,22 +341,19 @@ fail:
331 return false; 341 return false;
332} 342}
333 343
334static int amdgpu_bo_do_create(struct amdgpu_device *adev, 344static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
335 unsigned long size, int byte_align, 345 int byte_align, u32 domain,
336 bool kernel, u32 domain, u64 flags, 346 u64 flags, enum ttm_bo_type type,
337 struct sg_table *sg,
338 struct reservation_object *resv, 347 struct reservation_object *resv,
339 uint64_t init_value,
340 struct amdgpu_bo **bo_ptr) 348 struct amdgpu_bo **bo_ptr)
341{ 349{
342 struct ttm_operation_ctx ctx = { 350 struct ttm_operation_ctx ctx = {
343 .interruptible = !kernel, 351 .interruptible = (type != ttm_bo_type_kernel),
344 .no_wait_gpu = false, 352 .no_wait_gpu = false,
345 .allow_reserved_eviction = true, 353 .resv = resv,
346 .resv = resv 354 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
347 }; 355 };
348 struct amdgpu_bo *bo; 356 struct amdgpu_bo *bo;
349 enum ttm_bo_type type;
350 unsigned long page_align; 357 unsigned long page_align;
351 size_t acc_size; 358 size_t acc_size;
352 int r; 359 int r;
@@ -357,13 +364,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
357 if (!amdgpu_bo_validate_size(adev, size, domain)) 364 if (!amdgpu_bo_validate_size(adev, size, domain))
358 return -ENOMEM; 365 return -ENOMEM;
359 366
360 if (kernel) {
361 type = ttm_bo_type_kernel;
362 } else if (sg) {
363 type = ttm_bo_type_sg;
364 } else {
365 type = ttm_bo_type_device;
366 }
367 *bo_ptr = NULL; 367 *bo_ptr = NULL;
368 368
369 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 369 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
@@ -372,11 +372,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
372 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); 372 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
373 if (bo == NULL) 373 if (bo == NULL)
374 return -ENOMEM; 374 return -ENOMEM;
375 r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); 375 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
376 if (unlikely(r)) {
377 kfree(bo);
378 return r;
379 }
380 INIT_LIST_HEAD(&bo->shadow_list); 376 INIT_LIST_HEAD(&bo->shadow_list);
381 INIT_LIST_HEAD(&bo->va); 377 INIT_LIST_HEAD(&bo->va);
382 bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | 378 bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -386,7 +382,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
386 AMDGPU_GEM_DOMAIN_GWS | 382 AMDGPU_GEM_DOMAIN_GWS |
387 AMDGPU_GEM_DOMAIN_OA); 383 AMDGPU_GEM_DOMAIN_OA);
388 bo->allowed_domains = bo->preferred_domains; 384 bo->allowed_domains = bo->preferred_domains;
389 if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 385 if (type != ttm_bo_type_kernel &&
386 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
390 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 387 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
391 388
392 bo->flags = flags; 389 bo->flags = flags;
@@ -423,27 +420,27 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
423 amdgpu_ttm_placement_from_domain(bo, domain); 420 amdgpu_ttm_placement_from_domain(bo, domain);
424 421
425 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 422 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
426 &bo->placement, page_align, &ctx, NULL, 423 &bo->placement, page_align, &ctx, acc_size,
427 acc_size, sg, resv, &amdgpu_ttm_bo_destroy); 424 NULL, resv, &amdgpu_ttm_bo_destroy);
428 if (unlikely(r != 0)) 425 if (unlikely(r != 0))
429 return r; 426 return r;
430 427
431 if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 428 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
432 bo->tbo.mem.mem_type == TTM_PL_VRAM && 429 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
433 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) 430 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 431 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
435 ctx.bytes_moved); 432 ctx.bytes_moved);
436 else 433 else
437 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
438 435
439 if (kernel) 436 if (type == ttm_bo_type_kernel)
440 bo->tbo.priority = 1; 437 bo->tbo.priority = 1;
441 438
442 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 439 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
443 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 440 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
444 struct dma_fence *fence; 441 struct dma_fence *fence;
445 442
446 r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); 443 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
447 if (unlikely(r)) 444 if (unlikely(r))
448 goto fail_unreserve; 445 goto fail_unreserve;
449 446
@@ -480,12 +477,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
480 if (bo->shadow) 477 if (bo->shadow)
481 return 0; 478 return 0;
482 479
483 r = amdgpu_bo_do_create(adev, size, byte_align, true, 480 r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
484 AMDGPU_GEM_DOMAIN_GTT,
485 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 481 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
486 AMDGPU_GEM_CREATE_SHADOW, 482 AMDGPU_GEM_CREATE_SHADOW,
487 NULL, bo->tbo.resv, 0, 483 ttm_bo_type_kernel,
488 &bo->shadow); 484 bo->tbo.resv, &bo->shadow);
489 if (!r) { 485 if (!r) {
490 bo->shadow->parent = amdgpu_bo_ref(bo); 486 bo->shadow->parent = amdgpu_bo_ref(bo);
491 mutex_lock(&adev->shadow_list_lock); 487 mutex_lock(&adev->shadow_list_lock);
@@ -496,22 +492,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
496 return r; 492 return r;
497} 493}
498 494
499/* init_value will only take effect when flags contains 495int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
500 * AMDGPU_GEM_CREATE_VRAM_CLEARED. 496 int byte_align, u32 domain,
501 */ 497 u64 flags, enum ttm_bo_type type,
502int amdgpu_bo_create(struct amdgpu_device *adev,
503 unsigned long size, int byte_align,
504 bool kernel, u32 domain, u64 flags,
505 struct sg_table *sg,
506 struct reservation_object *resv, 498 struct reservation_object *resv,
507 uint64_t init_value,
508 struct amdgpu_bo **bo_ptr) 499 struct amdgpu_bo **bo_ptr)
509{ 500{
510 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; 501 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
511 int r; 502 int r;
512 503
513 r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, 504 r = amdgpu_bo_do_create(adev, size, byte_align, domain,
514 parent_flags, sg, resv, init_value, bo_ptr); 505 parent_flags, type, resv, bo_ptr);
515 if (r) 506 if (r)
516 return r; 507 return r;
517 508
@@ -826,31 +817,32 @@ static const char *amdgpu_vram_names[] = {
826 "GDDR4", 817 "GDDR4",
827 "GDDR5", 818 "GDDR5",
828 "HBM", 819 "HBM",
829 "DDR3" 820 "DDR3",
821 "DDR4",
830}; 822};
831 823
832int amdgpu_bo_init(struct amdgpu_device *adev) 824int amdgpu_bo_init(struct amdgpu_device *adev)
833{ 825{
834 /* reserve PAT memory space to WC for VRAM */ 826 /* reserve PAT memory space to WC for VRAM */
835 arch_io_reserve_memtype_wc(adev->mc.aper_base, 827 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
836 adev->mc.aper_size); 828 adev->gmc.aper_size);
837 829
838 /* Add an MTRR for the VRAM */ 830 /* Add an MTRR for the VRAM */
839 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, 831 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
840 adev->mc.aper_size); 832 adev->gmc.aper_size);
841 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 833 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
842 adev->mc.mc_vram_size >> 20, 834 adev->gmc.mc_vram_size >> 20,
843 (unsigned long long)adev->mc.aper_size >> 20); 835 (unsigned long long)adev->gmc.aper_size >> 20);
844 DRM_INFO("RAM width %dbits %s\n", 836 DRM_INFO("RAM width %dbits %s\n",
845 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); 837 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
846 return amdgpu_ttm_init(adev); 838 return amdgpu_ttm_init(adev);
847} 839}
848 840
849void amdgpu_bo_fini(struct amdgpu_device *adev) 841void amdgpu_bo_fini(struct amdgpu_device *adev)
850{ 842{
851 amdgpu_ttm_fini(adev); 843 amdgpu_ttm_fini(adev);
852 arch_phys_wc_del(adev->mc.vram_mtrr); 844 arch_phys_wc_del(adev->gmc.vram_mtrr);
853 arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size); 845 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
854} 846}
855 847
856int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 848int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
@@ -980,7 +972,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
980 972
981 size = bo->mem.num_pages << PAGE_SHIFT; 973 size = bo->mem.num_pages << PAGE_SHIFT;
982 offset = bo->mem.start << PAGE_SHIFT; 974 offset = bo->mem.start << PAGE_SHIFT;
983 if ((offset + size) <= adev->mc.visible_vram_size) 975 if ((offset + size) <= adev->gmc.visible_vram_size)
984 return 0; 976 return 0;
985 977
986 /* Can't move a pinned BO to visible VRAM */ 978 /* Can't move a pinned BO to visible VRAM */
@@ -1003,7 +995,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1003 offset = bo->mem.start << PAGE_SHIFT; 995 offset = bo->mem.start << PAGE_SHIFT;
1004 /* this should never happen */ 996 /* this should never happen */
1005 if (bo->mem.mem_type == TTM_PL_VRAM && 997 if (bo->mem.mem_type == TTM_PL_VRAM &&
1006 (offset + size) > adev->mc.visible_vram_size) 998 (offset + size) > adev->gmc.visible_vram_size)
1007 return -EINVAL; 999 return -EINVAL;
1008 1000
1009 return 0; 1001 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 33615e2ea2e6..546f77cb7882 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -92,6 +92,8 @@ struct amdgpu_bo {
92 struct list_head mn_list; 92 struct list_head mn_list;
93 struct list_head shadow_list; 93 struct list_head shadow_list;
94 }; 94 };
95
96 struct kgd_mem *kfd_bo;
95}; 97};
96 98
97static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 99static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
@@ -201,13 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
201 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 203 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
202} 204}
203 205
204int amdgpu_bo_create(struct amdgpu_device *adev, 206int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
205 unsigned long size, int byte_align, 207 int byte_align, u32 domain,
206 bool kernel, u32 domain, u64 flags, 208 u64 flags, enum ttm_bo_type type,
207 struct sg_table *sg, 209 struct reservation_object *resv,
208 struct reservation_object *resv, 210 struct amdgpu_bo **bo_ptr);
209 uint64_t init_value,
210 struct amdgpu_bo **bo_ptr);
211int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 211int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
212 unsigned long size, int align, 212 unsigned long size, int align,
213 u32 domain, struct amdgpu_bo **bo_ptr, 213 u32 domain, struct amdgpu_bo **bo_ptr,
@@ -282,8 +282,6 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
282 struct amdgpu_sa_manager *sa_manager); 282 struct amdgpu_sa_manager *sa_manager);
283int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 283int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
284 struct amdgpu_sa_manager *sa_manager); 284 struct amdgpu_sa_manager *sa_manager);
285int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
286 struct amdgpu_sa_manager *sa_manager);
287int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 285int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
288 struct amdgpu_sa_bo **sa_bo, 286 struct amdgpu_sa_bo **sa_bo,
289 unsigned size, unsigned align); 287 unsigned size, unsigned align);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 01a996c6b802..361975cf45a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -116,7 +116,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
116 } 116 }
117 117
118 if (adev->powerplay.pp_funcs->dispatch_tasks) { 118 if (adev->powerplay.pp_funcs->dispatch_tasks) {
119 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); 119 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
120 } else { 120 } else {
121 mutex_lock(&adev->pm.mutex); 121 mutex_lock(&adev->pm.mutex);
122 adev->pm.dpm.user_state = state; 122 adev->pm.dpm.user_state = state;
@@ -316,7 +316,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
316 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 316 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
317 state != POWER_STATE_TYPE_DEFAULT) { 317 state != POWER_STATE_TYPE_DEFAULT) {
318 amdgpu_dpm_dispatch_task(adev, 318 amdgpu_dpm_dispatch_task(adev,
319 AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); 319 AMD_PP_TASK_ENABLE_USER_STATE, &state);
320 adev->pp_force_state_enabled = true; 320 adev->pp_force_state_enabled = true;
321 } 321 }
322 } 322 }
@@ -360,6 +360,90 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
360 return count; 360 return count;
361} 361}
362 362
363static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
364 struct device_attribute *attr,
365 const char *buf,
366 size_t count)
367{
368 struct drm_device *ddev = dev_get_drvdata(dev);
369 struct amdgpu_device *adev = ddev->dev_private;
370 int ret;
371 uint32_t parameter_size = 0;
372 long parameter[64];
373 char buf_cpy[128];
374 char *tmp_str;
375 char *sub_str;
376 const char delimiter[3] = {' ', '\n', '\0'};
377 uint32_t type;
378
379 if (count > 127)
380 return -EINVAL;
381
382 if (*buf == 's')
383 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
384 else if (*buf == 'm')
385 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
386 else if(*buf == 'r')
387 type = PP_OD_RESTORE_DEFAULT_TABLE;
388 else if (*buf == 'c')
389 type = PP_OD_COMMIT_DPM_TABLE;
390 else
391 return -EINVAL;
392
393 memcpy(buf_cpy, buf, count+1);
394
395 tmp_str = buf_cpy;
396
397 while (isspace(*++tmp_str));
398
399 while (tmp_str[0]) {
400 sub_str = strsep(&tmp_str, delimiter);
401 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
402 if (ret)
403 return -EINVAL;
404 parameter_size++;
405
406 while (isspace(*tmp_str))
407 tmp_str++;
408 }
409
410 if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
411 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
412 parameter, parameter_size);
413
414 if (ret)
415 return -EINVAL;
416
417 if (type == PP_OD_COMMIT_DPM_TABLE) {
418 if (adev->powerplay.pp_funcs->dispatch_tasks) {
419 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
420 return count;
421 } else {
422 return -EINVAL;
423 }
424 }
425
426 return count;
427}
428
429static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
430 struct device_attribute *attr,
431 char *buf)
432{
433 struct drm_device *ddev = dev_get_drvdata(dev);
434 struct amdgpu_device *adev = ddev->dev_private;
435 uint32_t size = 0;
436
437 if (adev->powerplay.pp_funcs->print_clock_levels) {
438 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
439 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
440 return size;
441 } else {
442 return snprintf(buf, PAGE_SIZE, "\n");
443 }
444
445}
446
363static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 447static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
364 struct device_attribute *attr, 448 struct device_attribute *attr,
365 char *buf) 449 char *buf)
@@ -530,7 +614,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
530 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 614 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
531 615
532 if (adev->powerplay.pp_funcs->dispatch_tasks) { 616 if (adev->powerplay.pp_funcs->dispatch_tasks) {
533 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 617 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
534 } else { 618 } else {
535 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 619 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
536 amdgpu_pm_compute_clocks(adev); 620 amdgpu_pm_compute_clocks(adev);
@@ -574,7 +658,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
574 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 658 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
575 659
576 if (adev->powerplay.pp_funcs->dispatch_tasks) { 660 if (adev->powerplay.pp_funcs->dispatch_tasks) {
577 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 661 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
578 } else { 662 } else {
579 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 663 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
580 amdgpu_pm_compute_clocks(adev); 664 amdgpu_pm_compute_clocks(adev);
@@ -584,159 +668,70 @@ fail:
584 return count; 668 return count;
585} 669}
586 670
587static ssize_t amdgpu_get_pp_power_profile(struct device *dev, 671static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
588 char *buf, struct amd_pp_profile *query) 672 struct device_attribute *attr,
673 char *buf)
589{ 674{
590 struct drm_device *ddev = dev_get_drvdata(dev); 675 struct drm_device *ddev = dev_get_drvdata(dev);
591 struct amdgpu_device *adev = ddev->dev_private; 676 struct amdgpu_device *adev = ddev->dev_private;
592 int ret = 0xff;
593 677
594 if (adev->powerplay.pp_funcs->get_power_profile_state) 678 if (adev->powerplay.pp_funcs->get_power_profile_mode)
595 ret = amdgpu_dpm_get_power_profile_state( 679 return amdgpu_dpm_get_power_profile_mode(adev, buf);
596 adev, query);
597 680
598 if (ret) 681 return snprintf(buf, PAGE_SIZE, "\n");
599 return ret;
600
601 return snprintf(buf, PAGE_SIZE,
602 "%d %d %d %d %d\n",
603 query->min_sclk / 100,
604 query->min_mclk / 100,
605 query->activity_threshold,
606 query->up_hyst,
607 query->down_hyst);
608} 682}
609 683
610static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev,
611 struct device_attribute *attr,
612 char *buf)
613{
614 struct amd_pp_profile query = {0};
615
616 query.type = AMD_PP_GFX_PROFILE;
617
618 return amdgpu_get_pp_power_profile(dev, buf, &query);
619}
620 684
621static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev, 685static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
622 struct device_attribute *attr, 686 struct device_attribute *attr,
623 char *buf)
624{
625 struct amd_pp_profile query = {0};
626
627 query.type = AMD_PP_COMPUTE_PROFILE;
628
629 return amdgpu_get_pp_power_profile(dev, buf, &query);
630}
631
632static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
633 const char *buf, 687 const char *buf,
634 size_t count, 688 size_t count)
635 struct amd_pp_profile *request)
636{ 689{
690 int ret = 0xff;
637 struct drm_device *ddev = dev_get_drvdata(dev); 691 struct drm_device *ddev = dev_get_drvdata(dev);
638 struct amdgpu_device *adev = ddev->dev_private; 692 struct amdgpu_device *adev = ddev->dev_private;
639 uint32_t loop = 0; 693 uint32_t parameter_size = 0;
640 char *sub_str, buf_cpy[128], *tmp_str; 694 long parameter[64];
695 char *sub_str, buf_cpy[128];
696 char *tmp_str;
697 uint32_t i = 0;
698 char tmp[2];
699 long int profile_mode = 0;
641 const char delimiter[3] = {' ', '\n', '\0'}; 700 const char delimiter[3] = {' ', '\n', '\0'};
642 long int value;
643 int ret = 0xff;
644
645 if (strncmp("reset", buf, strlen("reset")) == 0) {
646 if (adev->powerplay.pp_funcs->reset_power_profile_state)
647 ret = amdgpu_dpm_reset_power_profile_state(
648 adev, request);
649 if (ret) {
650 count = -EINVAL;
651 goto fail;
652 }
653 return count;
654 }
655
656 if (strncmp("set", buf, strlen("set")) == 0) {
657 if (adev->powerplay.pp_funcs->set_power_profile_state)
658 ret = amdgpu_dpm_set_power_profile_state(
659 adev, request);
660
661 if (ret) {
662 count = -EINVAL;
663 goto fail;
664 }
665 return count;
666 }
667 701
668 if (count + 1 >= 128) { 702 tmp[0] = *(buf);
669 count = -EINVAL; 703 tmp[1] = '\0';
704 ret = kstrtol(tmp, 0, &profile_mode);
705 if (ret)
670 goto fail; 706 goto fail;
671 }
672
673 memcpy(buf_cpy, buf, count + 1);
674 tmp_str = buf_cpy;
675
676 while (tmp_str[0]) {
677 sub_str = strsep(&tmp_str, delimiter);
678 ret = kstrtol(sub_str, 0, &value);
679 if (ret) {
680 count = -EINVAL;
681 goto fail;
682 }
683 707
684 switch (loop) { 708 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
685 case 0: 709 if (count < 2 || count > 127)
686 /* input unit MHz convert to dpm table unit 10KHz*/ 710 return -EINVAL;
687 request->min_sclk = (uint32_t)value * 100; 711 while (isspace(*++buf))
688 break; 712 i++;
689 case 1: 713 memcpy(buf_cpy, buf, count-i);
690 /* input unit MHz convert to dpm table unit 10KHz*/ 714 tmp_str = buf_cpy;
691 request->min_mclk = (uint32_t)value * 100; 715 while (tmp_str[0]) {
692 break; 716 sub_str = strsep(&tmp_str, delimiter);
693 case 2: 717 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
694 request->activity_threshold = (uint16_t)value; 718 if (ret) {
695 break; 719 count = -EINVAL;
696 case 3: 720 goto fail;
697 request->up_hyst = (uint8_t)value; 721 }
698 break; 722 parameter_size++;
699 case 4: 723 while (isspace(*tmp_str))
700 request->down_hyst = (uint8_t)value; 724 tmp_str++;
701 break;
702 default:
703 break;
704 } 725 }
705
706 loop++;
707 } 726 }
708 if (adev->powerplay.pp_funcs->set_power_profile_state) 727 parameter[parameter_size] = profile_mode;
709 ret = amdgpu_dpm_set_power_profile_state(adev, request); 728 if (adev->powerplay.pp_funcs->set_power_profile_mode)
710 729 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
711 if (ret)
712 count = -EINVAL;
713 730
731 if (!ret)
732 return count;
714fail: 733fail:
715 return count; 734 return -EINVAL;
716}
717
718static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev,
719 struct device_attribute *attr,
720 const char *buf,
721 size_t count)
722{
723 struct amd_pp_profile request = {0};
724
725 request.type = AMD_PP_GFX_PROFILE;
726
727 return amdgpu_set_pp_power_profile(dev, buf, count, &request);
728}
729
730static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev,
731 struct device_attribute *attr,
732 const char *buf,
733 size_t count)
734{
735 struct amd_pp_profile request = {0};
736
737 request.type = AMD_PP_COMPUTE_PROFILE;
738
739 return amdgpu_set_pp_power_profile(dev, buf, count, &request);
740} 735}
741 736
742static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 737static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
@@ -766,12 +761,12 @@ static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
766static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 761static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
767 amdgpu_get_pp_mclk_od, 762 amdgpu_get_pp_mclk_od,
768 amdgpu_set_pp_mclk_od); 763 amdgpu_set_pp_mclk_od);
769static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR, 764static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
770 amdgpu_get_pp_gfx_power_profile, 765 amdgpu_get_pp_power_profile_mode,
771 amdgpu_set_pp_gfx_power_profile); 766 amdgpu_set_pp_power_profile_mode);
772static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR, 767static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
773 amdgpu_get_pp_compute_power_profile, 768 amdgpu_get_pp_od_clk_voltage,
774 amdgpu_set_pp_compute_power_profile); 769 amdgpu_set_pp_od_clk_voltage);
775 770
776static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 771static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
777 struct device_attribute *attr, 772 struct device_attribute *attr,
@@ -779,17 +774,23 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
779{ 774{
780 struct amdgpu_device *adev = dev_get_drvdata(dev); 775 struct amdgpu_device *adev = dev_get_drvdata(dev);
781 struct drm_device *ddev = adev->ddev; 776 struct drm_device *ddev = adev->ddev;
782 int temp; 777 int r, temp, size = sizeof(temp);
783 778
784 /* Can't get temperature when the card is off */ 779 /* Can't get temperature when the card is off */
785 if ((adev->flags & AMD_IS_PX) && 780 if ((adev->flags & AMD_IS_PX) &&
786 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 781 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
787 return -EINVAL; 782 return -EINVAL;
788 783
789 if (!adev->powerplay.pp_funcs->get_temperature) 784 /* sanity check PP is enabled */
790 temp = 0; 785 if (!(adev->powerplay.pp_funcs &&
791 else 786 adev->powerplay.pp_funcs->read_sensor))
792 temp = amdgpu_dpm_get_temperature(adev); 787 return -EINVAL;
788
789 /* get the temperature */
790 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
791 (void *)&temp, &size);
792 if (r)
793 return r;
793 794
794 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 795 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
795} 796}
@@ -834,6 +835,11 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
834 int err; 835 int err;
835 int value; 836 int value;
836 837
838 /* Can't adjust fan when the card is off */
839 if ((adev->flags & AMD_IS_PX) &&
840 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
841 return -EINVAL;
842
837 if (!adev->powerplay.pp_funcs->set_fan_control_mode) 843 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
838 return -EINVAL; 844 return -EINVAL;
839 845
@@ -868,6 +874,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
868 int err; 874 int err;
869 u32 value; 875 u32 value;
870 876
877 /* Can't adjust fan when the card is off */
878 if ((adev->flags & AMD_IS_PX) &&
879 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
880 return -EINVAL;
881
871 err = kstrtou32(buf, 10, &value); 882 err = kstrtou32(buf, 10, &value);
872 if (err) 883 if (err)
873 return err; 884 return err;
@@ -891,6 +902,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
891 int err; 902 int err;
892 u32 speed = 0; 903 u32 speed = 0;
893 904
905 /* Can't adjust fan when the card is off */
906 if ((adev->flags & AMD_IS_PX) &&
907 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
908 return -EINVAL;
909
894 if (adev->powerplay.pp_funcs->get_fan_speed_percent) { 910 if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
895 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 911 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
896 if (err) 912 if (err)
@@ -910,6 +926,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
910 int err; 926 int err;
911 u32 speed = 0; 927 u32 speed = 0;
912 928
929 /* Can't adjust fan when the card is off */
930 if ((adev->flags & AMD_IS_PX) &&
931 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
932 return -EINVAL;
933
913 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 934 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
914 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 935 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
915 if (err) 936 if (err)
@@ -919,6 +940,175 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
919 return sprintf(buf, "%i\n", speed); 940 return sprintf(buf, "%i\n", speed);
920} 941}
921 942
943static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
944 struct device_attribute *attr,
945 char *buf)
946{
947 struct amdgpu_device *adev = dev_get_drvdata(dev);
948 struct drm_device *ddev = adev->ddev;
949 u32 vddgfx;
950 int r, size = sizeof(vddgfx);
951
952 /* Can't get voltage when the card is off */
953 if ((adev->flags & AMD_IS_PX) &&
954 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
955 return -EINVAL;
956
957 /* sanity check PP is enabled */
958 if (!(adev->powerplay.pp_funcs &&
959 adev->powerplay.pp_funcs->read_sensor))
960 return -EINVAL;
961
962 /* get the voltage */
963 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
964 (void *)&vddgfx, &size);
965 if (r)
966 return r;
967
968 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
969}
970
971static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
972 struct device_attribute *attr,
973 char *buf)
974{
975 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
976}
977
978static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
979 struct device_attribute *attr,
980 char *buf)
981{
982 struct amdgpu_device *adev = dev_get_drvdata(dev);
983 struct drm_device *ddev = adev->ddev;
984 u32 vddnb;
985 int r, size = sizeof(vddnb);
986
987 /* only APUs have vddnb */
988 if (adev->flags & AMD_IS_APU)
989 return -EINVAL;
990
991 /* Can't get voltage when the card is off */
992 if ((adev->flags & AMD_IS_PX) &&
993 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
994 return -EINVAL;
995
996 /* sanity check PP is enabled */
997 if (!(adev->powerplay.pp_funcs &&
998 adev->powerplay.pp_funcs->read_sensor))
999 return -EINVAL;
1000
1001 /* get the voltage */
1002 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
1003 (void *)&vddnb, &size);
1004 if (r)
1005 return r;
1006
1007 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
1008}
1009
1010static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
1011 struct device_attribute *attr,
1012 char *buf)
1013{
1014 return snprintf(buf, PAGE_SIZE, "vddnb\n");
1015}
1016
1017static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1018 struct device_attribute *attr,
1019 char *buf)
1020{
1021 struct amdgpu_device *adev = dev_get_drvdata(dev);
1022 struct drm_device *ddev = adev->ddev;
1023 struct pp_gpu_power query = {0};
1024 int r, size = sizeof(query);
1025 unsigned uw;
1026
1027 /* Can't get power when the card is off */
1028 if ((adev->flags & AMD_IS_PX) &&
1029 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1030 return -EINVAL;
1031
1032 /* sanity check PP is enabled */
1033 if (!(adev->powerplay.pp_funcs &&
1034 adev->powerplay.pp_funcs->read_sensor))
1035 return -EINVAL;
1036
1037 /* get the voltage */
1038 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
1039 (void *)&query, &size);
1040 if (r)
1041 return r;
1042
1043 /* convert to microwatts */
1044 uw = (query.average_gpu_power >> 8) * 1000000;
1045
1046 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
1047}
1048
1049static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
1050 struct device_attribute *attr,
1051 char *buf)
1052{
1053 return sprintf(buf, "%i\n", 0);
1054}
1055
1056static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
1057 struct device_attribute *attr,
1058 char *buf)
1059{
1060 struct amdgpu_device *adev = dev_get_drvdata(dev);
1061 uint32_t limit = 0;
1062
1063 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1064 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
1065 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1066 } else {
1067 return snprintf(buf, PAGE_SIZE, "\n");
1068 }
1069}
1070
1071static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
1072 struct device_attribute *attr,
1073 char *buf)
1074{
1075 struct amdgpu_device *adev = dev_get_drvdata(dev);
1076 uint32_t limit = 0;
1077
1078 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1079 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
1080 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1081 } else {
1082 return snprintf(buf, PAGE_SIZE, "\n");
1083 }
1084}
1085
1086
1087static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1088 struct device_attribute *attr,
1089 const char *buf,
1090 size_t count)
1091{
1092 struct amdgpu_device *adev = dev_get_drvdata(dev);
1093 int err;
1094 u32 value;
1095
1096 err = kstrtou32(buf, 10, &value);
1097 if (err)
1098 return err;
1099
1100 value = value / 1000000; /* convert to Watt */
1101 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
1102 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
1103 if (err)
1104 return err;
1105 } else {
1106 return -EINVAL;
1107 }
1108
1109 return count;
1110}
1111
922static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); 1112static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
923static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 1113static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
924static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 1114static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@@ -927,6 +1117,14 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_
927static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 1117static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
928static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 1118static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
929static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 1119static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
1120static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
1121static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
1122static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
1123static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
1124static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
1125static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
1126static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
1127static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
930 1128
931static struct attribute *hwmon_attributes[] = { 1129static struct attribute *hwmon_attributes[] = {
932 &sensor_dev_attr_temp1_input.dev_attr.attr, 1130 &sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -937,6 +1135,14 @@ static struct attribute *hwmon_attributes[] = {
937 &sensor_dev_attr_pwm1_min.dev_attr.attr, 1135 &sensor_dev_attr_pwm1_min.dev_attr.attr,
938 &sensor_dev_attr_pwm1_max.dev_attr.attr, 1136 &sensor_dev_attr_pwm1_max.dev_attr.attr,
939 &sensor_dev_attr_fan1_input.dev_attr.attr, 1137 &sensor_dev_attr_fan1_input.dev_attr.attr,
1138 &sensor_dev_attr_in0_input.dev_attr.attr,
1139 &sensor_dev_attr_in0_label.dev_attr.attr,
1140 &sensor_dev_attr_in1_input.dev_attr.attr,
1141 &sensor_dev_attr_in1_label.dev_attr.attr,
1142 &sensor_dev_attr_power1_average.dev_attr.attr,
1143 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
1144 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
1145 &sensor_dev_attr_power1_cap.dev_attr.attr,
940 NULL 1146 NULL
941}; 1147};
942 1148
@@ -947,9 +1153,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
947 struct amdgpu_device *adev = dev_get_drvdata(dev); 1153 struct amdgpu_device *adev = dev_get_drvdata(dev);
948 umode_t effective_mode = attr->mode; 1154 umode_t effective_mode = attr->mode;
949 1155
950 /* no skipping for powerplay */ 1156 /* handle non-powerplay limitations */
951 if (adev->powerplay.cgs_device) 1157 if (!adev->powerplay.pp_handle) {
952 return effective_mode; 1158 /* Skip fan attributes if fan is not present */
1159 if (adev->pm.no_fan &&
1160 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1161 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1162 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1163 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1164 return 0;
1165 /* requires powerplay */
1166 if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
1167 return 0;
1168 }
953 1169
954 /* Skip limit attributes if DPM is not enabled */ 1170 /* Skip limit attributes if DPM is not enabled */
955 if (!adev->pm.dpm_enabled && 1171 if (!adev->pm.dpm_enabled &&
@@ -961,14 +1177,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
961 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 1177 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
962 return 0; 1178 return 0;
963 1179
964 /* Skip fan attributes if fan is not present */
965 if (adev->pm.no_fan &&
966 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
967 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
968 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
969 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
970 return 0;
971
972 /* mask fan attributes if we have no bindings for this asic to expose */ 1180 /* mask fan attributes if we have no bindings for this asic to expose */
973 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 1181 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
974 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 1182 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
@@ -982,6 +1190,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
982 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 1190 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
983 effective_mode &= ~S_IWUSR; 1191 effective_mode &= ~S_IWUSR;
984 1192
1193 if ((adev->flags & AMD_IS_APU) &&
1194 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1195 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1196 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1197 return 0;
1198
985 /* hide max/min values if we can't both query and manage the fan */ 1199 /* hide max/min values if we can't both query and manage the fan */
986 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 1200 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
987 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 1201 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
@@ -989,8 +1203,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
989 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 1203 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
990 return 0; 1204 return 0;
991 1205
992 /* requires powerplay */ 1206 /* only APUs have vddnb */
993 if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr) 1207 if (!(adev->flags & AMD_IS_APU) &&
1208 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
1209 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
994 return 0; 1210 return 0;
995 1211
996 return effective_mode; 1212 return effective_mode;
@@ -1013,13 +1229,15 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1013 pm.dpm.thermal.work); 1229 pm.dpm.thermal.work);
1014 /* switch to the thermal state */ 1230 /* switch to the thermal state */
1015 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1231 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1232 int temp, size = sizeof(temp);
1016 1233
1017 if (!adev->pm.dpm_enabled) 1234 if (!adev->pm.dpm_enabled)
1018 return; 1235 return;
1019 1236
1020 if (adev->powerplay.pp_funcs->get_temperature) { 1237 if (adev->powerplay.pp_funcs &&
1021 int temp = amdgpu_dpm_get_temperature(adev); 1238 adev->powerplay.pp_funcs->read_sensor &&
1022 1239 !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1240 (void *)&temp, &size)) {
1023 if (temp < adev->pm.dpm.thermal.min_temp) 1241 if (temp < adev->pm.dpm.thermal.min_temp)
1024 /* switch back the user state */ 1242 /* switch back the user state */
1025 dpm_state = adev->pm.dpm.user_state; 1243 dpm_state = adev->pm.dpm.user_state;
@@ -1319,9 +1537,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1319 if (adev->pm.dpm_enabled == 0) 1537 if (adev->pm.dpm_enabled == 0)
1320 return 0; 1538 return 0;
1321 1539
1322 if (adev->powerplay.pp_funcs->get_temperature == NULL)
1323 return 0;
1324
1325 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 1540 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
1326 DRIVER_NAME, adev, 1541 DRIVER_NAME, adev,
1327 hwmon_groups); 1542 hwmon_groups);
@@ -1391,20 +1606,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1391 return ret; 1606 return ret;
1392 } 1607 }
1393 ret = device_create_file(adev->dev, 1608 ret = device_create_file(adev->dev,
1394 &dev_attr_pp_gfx_power_profile); 1609 &dev_attr_pp_power_profile_mode);
1395 if (ret) { 1610 if (ret) {
1396 DRM_ERROR("failed to create device file " 1611 DRM_ERROR("failed to create device file "
1397 "pp_gfx_power_profile\n"); 1612 "pp_power_profile_mode\n");
1398 return ret; 1613 return ret;
1399 } 1614 }
1400 ret = device_create_file(adev->dev, 1615 ret = device_create_file(adev->dev,
1401 &dev_attr_pp_compute_power_profile); 1616 &dev_attr_pp_od_clk_voltage);
1402 if (ret) { 1617 if (ret) {
1403 DRM_ERROR("failed to create device file " 1618 DRM_ERROR("failed to create device file "
1404 "pp_compute_power_profile\n"); 1619 "pp_od_clk_voltage\n");
1405 return ret; 1620 return ret;
1406 } 1621 }
1407
1408 ret = amdgpu_debugfs_pm_init(adev); 1622 ret = amdgpu_debugfs_pm_init(adev);
1409 if (ret) { 1623 if (ret) {
1410 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1624 DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1437,9 +1651,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1437 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 1651 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
1438 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 1652 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
1439 device_remove_file(adev->dev, 1653 device_remove_file(adev->dev,
1440 &dev_attr_pp_gfx_power_profile); 1654 &dev_attr_pp_power_profile_mode);
1441 device_remove_file(adev->dev, 1655 device_remove_file(adev->dev,
1442 &dev_attr_pp_compute_power_profile); 1656 &dev_attr_pp_od_clk_voltage);
1443} 1657}
1444 1658
1445void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1659void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1462,7 +1676,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1462 } 1676 }
1463 1677
1464 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1678 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1465 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL); 1679 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1466 } else { 1680 } else {
1467 mutex_lock(&adev->pm.mutex); 1681 mutex_lock(&adev->pm.mutex);
1468 adev->pm.dpm.new_active_crtcs = 0; 1682 adev->pm.dpm.new_active_crtcs = 0;
@@ -1512,6 +1726,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
1512 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 1726 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
1513 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 1727 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
1514 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 1728 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
1729 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
1730 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
1731 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
1732 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
1515 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 1733 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
1516 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 1734 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
1517 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 1735 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
deleted file mode 100644
index 5f5aa5fddc16..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ /dev/null
@@ -1,290 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "atom.h"
26#include "amdgpu.h"
27#include "amd_shared.h"
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include "amdgpu_pm.h"
31#include <drm/amdgpu_drm.h>
32#include "amdgpu_powerplay.h"
33#include "si_dpm.h"
34#include "cik_dpm.h"
35#include "vi_dpm.h"
36
37static int amdgpu_pp_early_init(void *handle)
38{
39 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
40 struct amd_powerplay *amd_pp;
41 int ret = 0;
42
43 amd_pp = &(adev->powerplay);
44 amd_pp->pp_handle = (void *)adev;
45
46 switch (adev->asic_type) {
47 case CHIP_POLARIS11:
48 case CHIP_POLARIS10:
49 case CHIP_POLARIS12:
50 case CHIP_TONGA:
51 case CHIP_FIJI:
52 case CHIP_TOPAZ:
53 case CHIP_CARRIZO:
54 case CHIP_STONEY:
55 case CHIP_VEGA10:
56 case CHIP_RAVEN:
57 amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
58 amd_pp->ip_funcs = &pp_ip_funcs;
59 amd_pp->pp_funcs = &pp_dpm_funcs;
60 break;
61 /* These chips don't have powerplay implemenations */
62#ifdef CONFIG_DRM_AMDGPU_SI
63 case CHIP_TAHITI:
64 case CHIP_PITCAIRN:
65 case CHIP_VERDE:
66 case CHIP_OLAND:
67 case CHIP_HAINAN:
68 amd_pp->ip_funcs = &si_dpm_ip_funcs;
69 amd_pp->pp_funcs = &si_dpm_funcs;
70 break;
71#endif
72#ifdef CONFIG_DRM_AMDGPU_CIK
73 case CHIP_BONAIRE:
74 case CHIP_HAWAII:
75 if (amdgpu_dpm == -1) {
76 amd_pp->ip_funcs = &ci_dpm_ip_funcs;
77 amd_pp->pp_funcs = &ci_dpm_funcs;
78 } else {
79 amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
80 amd_pp->ip_funcs = &pp_ip_funcs;
81 amd_pp->pp_funcs = &pp_dpm_funcs;
82 }
83 break;
84 case CHIP_KABINI:
85 case CHIP_MULLINS:
86 case CHIP_KAVERI:
87 amd_pp->ip_funcs = &kv_dpm_ip_funcs;
88 amd_pp->pp_funcs = &kv_dpm_funcs;
89 break;
90#endif
91 default:
92 ret = -EINVAL;
93 break;
94 }
95
96 if (adev->powerplay.ip_funcs->early_init)
97 ret = adev->powerplay.ip_funcs->early_init(
98 amd_pp->cgs_device ? amd_pp->cgs_device :
99 amd_pp->pp_handle);
100
101 return ret;
102}
103
104
105static int amdgpu_pp_late_init(void *handle)
106{
107 int ret = 0;
108 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
109
110 if (adev->powerplay.ip_funcs->late_init)
111 ret = adev->powerplay.ip_funcs->late_init(
112 adev->powerplay.pp_handle);
113
114 return ret;
115}
116
117static int amdgpu_pp_sw_init(void *handle)
118{
119 int ret = 0;
120 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
121
122 if (adev->powerplay.ip_funcs->sw_init)
123 ret = adev->powerplay.ip_funcs->sw_init(
124 adev->powerplay.pp_handle);
125
126 return ret;
127}
128
129static int amdgpu_pp_sw_fini(void *handle)
130{
131 int ret = 0;
132 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
133
134 if (adev->powerplay.ip_funcs->sw_fini)
135 ret = adev->powerplay.ip_funcs->sw_fini(
136 adev->powerplay.pp_handle);
137 if (ret)
138 return ret;
139
140 return ret;
141}
142
143static int amdgpu_pp_hw_init(void *handle)
144{
145 int ret = 0;
146 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147
148 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
149 amdgpu_ucode_init_bo(adev);
150
151 if (adev->powerplay.ip_funcs->hw_init)
152 ret = adev->powerplay.ip_funcs->hw_init(
153 adev->powerplay.pp_handle);
154
155 return ret;
156}
157
158static int amdgpu_pp_hw_fini(void *handle)
159{
160 int ret = 0;
161 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
162
163 if (adev->powerplay.ip_funcs->hw_fini)
164 ret = adev->powerplay.ip_funcs->hw_fini(
165 adev->powerplay.pp_handle);
166
167 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
168 amdgpu_ucode_fini_bo(adev);
169
170 return ret;
171}
172
173static void amdgpu_pp_late_fini(void *handle)
174{
175 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
176
177 if (adev->powerplay.ip_funcs->late_fini)
178 adev->powerplay.ip_funcs->late_fini(
179 adev->powerplay.pp_handle);
180
181 if (adev->powerplay.cgs_device)
182 amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
183}
184
185static int amdgpu_pp_suspend(void *handle)
186{
187 int ret = 0;
188 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
189
190 if (adev->powerplay.ip_funcs->suspend)
191 ret = adev->powerplay.ip_funcs->suspend(
192 adev->powerplay.pp_handle);
193 return ret;
194}
195
196static int amdgpu_pp_resume(void *handle)
197{
198 int ret = 0;
199 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
200
201 if (adev->powerplay.ip_funcs->resume)
202 ret = adev->powerplay.ip_funcs->resume(
203 adev->powerplay.pp_handle);
204 return ret;
205}
206
207static int amdgpu_pp_set_clockgating_state(void *handle,
208 enum amd_clockgating_state state)
209{
210 int ret = 0;
211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
212
213 if (adev->powerplay.ip_funcs->set_clockgating_state)
214 ret = adev->powerplay.ip_funcs->set_clockgating_state(
215 adev->powerplay.pp_handle, state);
216 return ret;
217}
218
219static int amdgpu_pp_set_powergating_state(void *handle,
220 enum amd_powergating_state state)
221{
222 int ret = 0;
223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224
225 if (adev->powerplay.ip_funcs->set_powergating_state)
226 ret = adev->powerplay.ip_funcs->set_powergating_state(
227 adev->powerplay.pp_handle, state);
228 return ret;
229}
230
231
232static bool amdgpu_pp_is_idle(void *handle)
233{
234 bool ret = true;
235 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
236
237 if (adev->powerplay.ip_funcs->is_idle)
238 ret = adev->powerplay.ip_funcs->is_idle(
239 adev->powerplay.pp_handle);
240 return ret;
241}
242
243static int amdgpu_pp_wait_for_idle(void *handle)
244{
245 int ret = 0;
246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
247
248 if (adev->powerplay.ip_funcs->wait_for_idle)
249 ret = adev->powerplay.ip_funcs->wait_for_idle(
250 adev->powerplay.pp_handle);
251 return ret;
252}
253
254static int amdgpu_pp_soft_reset(void *handle)
255{
256 int ret = 0;
257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258
259 if (adev->powerplay.ip_funcs->soft_reset)
260 ret = adev->powerplay.ip_funcs->soft_reset(
261 adev->powerplay.pp_handle);
262 return ret;
263}
264
265static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
266 .name = "amdgpu_powerplay",
267 .early_init = amdgpu_pp_early_init,
268 .late_init = amdgpu_pp_late_init,
269 .sw_init = amdgpu_pp_sw_init,
270 .sw_fini = amdgpu_pp_sw_fini,
271 .hw_init = amdgpu_pp_hw_init,
272 .hw_fini = amdgpu_pp_hw_fini,
273 .late_fini = amdgpu_pp_late_fini,
274 .suspend = amdgpu_pp_suspend,
275 .resume = amdgpu_pp_resume,
276 .is_idle = amdgpu_pp_is_idle,
277 .wait_for_idle = amdgpu_pp_wait_for_idle,
278 .soft_reset = amdgpu_pp_soft_reset,
279 .set_clockgating_state = amdgpu_pp_set_clockgating_state,
280 .set_powergating_state = amdgpu_pp_set_powergating_state,
281};
282
283const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
284{
285 .type = AMD_IP_BLOCK_TYPE_SMC,
286 .major = 1,
287 .minor = 0,
288 .rev = 0,
289 .funcs = &amdgpu_pp_ip_funcs,
290};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index ae9c106979d7..1c9991738477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -26,9 +26,12 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27 27
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_display.h"
29#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
30#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
31 32
33static const struct dma_buf_ops amdgpu_dmabuf_ops;
34
32struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) 35struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
33{ 36{
34 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 37 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -102,59 +105,93 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
102 int ret; 105 int ret;
103 106
104 ww_mutex_lock(&resv->lock, NULL); 107 ww_mutex_lock(&resv->lock, NULL);
105 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, 108 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
106 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); 109 AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
107 ww_mutex_unlock(&resv->lock); 110 resv, &bo);
108 if (ret) 111 if (ret)
109 return ERR_PTR(ret); 112 goto error;
110 113
111 bo->prime_shared_count = 1; 114 bo->tbo.sg = sg;
115 bo->tbo.ttm->sg = sg;
116 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
117 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
118 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
119 bo->prime_shared_count = 1;
120
121 ww_mutex_unlock(&resv->lock);
112 return &bo->gem_base; 122 return &bo->gem_base;
123
124error:
125 ww_mutex_unlock(&resv->lock);
126 return ERR_PTR(ret);
113} 127}
114 128
115int amdgpu_gem_prime_pin(struct drm_gem_object *obj) 129static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
130 struct device *target_dev,
131 struct dma_buf_attachment *attach)
116{ 132{
133 struct drm_gem_object *obj = dma_buf->priv;
117 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 134 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
118 long ret = 0; 135 long r;
119 136
120 ret = amdgpu_bo_reserve(bo, false); 137 r = drm_gem_map_attach(dma_buf, target_dev, attach);
121 if (unlikely(ret != 0)) 138 if (r)
122 return ret; 139 return r;
123 140
124 /* 141 r = amdgpu_bo_reserve(bo, false);
125 * Wait for all shared fences to complete before we switch to future 142 if (unlikely(r != 0))
126 * use of exclusive fence on this prime shared bo. 143 goto error_detach;
127 */ 144
128 ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, 145
129 MAX_SCHEDULE_TIMEOUT); 146 if (dma_buf->ops != &amdgpu_dmabuf_ops) {
130 if (unlikely(ret < 0)) { 147 /*
131 DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); 148 * Wait for all shared fences to complete before we switch to future
132 amdgpu_bo_unreserve(bo); 149 * use of exclusive fence on this prime shared bo.
133 return ret; 150 */
151 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
152 true, false,
153 MAX_SCHEDULE_TIMEOUT);
154 if (unlikely(r < 0)) {
155 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
156 goto error_unreserve;
157 }
134 } 158 }
135 159
136 /* pin buffer into GTT */ 160 /* pin buffer into GTT */
137 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); 161 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
138 if (likely(ret == 0)) 162 if (r)
163 goto error_unreserve;
164
165 if (dma_buf->ops != &amdgpu_dmabuf_ops)
139 bo->prime_shared_count++; 166 bo->prime_shared_count++;
140 167
168error_unreserve:
141 amdgpu_bo_unreserve(bo); 169 amdgpu_bo_unreserve(bo);
142 return ret; 170
171error_detach:
172 if (r)
173 drm_gem_map_detach(dma_buf, attach);
174 return r;
143} 175}
144 176
145void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) 177static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
178 struct dma_buf_attachment *attach)
146{ 179{
180 struct drm_gem_object *obj = dma_buf->priv;
147 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 181 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
148 int ret = 0; 182 int ret = 0;
149 183
150 ret = amdgpu_bo_reserve(bo, true); 184 ret = amdgpu_bo_reserve(bo, true);
151 if (unlikely(ret != 0)) 185 if (unlikely(ret != 0))
152 return; 186 goto error;
153 187
154 amdgpu_bo_unpin(bo); 188 amdgpu_bo_unpin(bo);
155 if (bo->prime_shared_count) 189 if (dma_buf->ops != &amdgpu_dmabuf_ops && bo->prime_shared_count)
156 bo->prime_shared_count--; 190 bo->prime_shared_count--;
157 amdgpu_bo_unreserve(bo); 191 amdgpu_bo_unreserve(bo);
192
193error:
194 drm_gem_map_detach(dma_buf, attach);
158} 195}
159 196
160struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) 197struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
@@ -164,6 +201,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
164 return bo->tbo.resv; 201 return bo->tbo.resv;
165} 202}
166 203
204static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
205 enum dma_data_direction direction)
206{
207 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
208 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
209 struct ttm_operation_ctx ctx = { true, false };
210 u32 domain = amdgpu_display_framebuffer_domains(adev);
211 int ret;
212 bool reads = (direction == DMA_BIDIRECTIONAL ||
213 direction == DMA_FROM_DEVICE);
214
215 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
216 return 0;
217
218 /* move to gtt */
219 ret = amdgpu_bo_reserve(bo, false);
220 if (unlikely(ret != 0))
221 return ret;
222
223 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
224 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
225 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
226 }
227
228 amdgpu_bo_unreserve(bo);
229 return ret;
230}
231
232static const struct dma_buf_ops amdgpu_dmabuf_ops = {
233 .attach = amdgpu_gem_map_attach,
234 .detach = amdgpu_gem_map_detach,
235 .map_dma_buf = drm_gem_map_dma_buf,
236 .unmap_dma_buf = drm_gem_unmap_dma_buf,
237 .release = drm_gem_dmabuf_release,
238 .begin_cpu_access = amdgpu_gem_begin_cpu_access,
239 .map = drm_gem_dmabuf_kmap,
240 .map_atomic = drm_gem_dmabuf_kmap_atomic,
241 .unmap = drm_gem_dmabuf_kunmap,
242 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
243 .mmap = drm_gem_dmabuf_mmap,
244 .vmap = drm_gem_dmabuf_vmap,
245 .vunmap = drm_gem_dmabuf_vunmap,
246};
247
167struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 248struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
168 struct drm_gem_object *gobj, 249 struct drm_gem_object *gobj,
169 int flags) 250 int flags)
@@ -176,7 +257,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
176 return ERR_PTR(-EPERM); 257 return ERR_PTR(-EPERM);
177 258
178 buf = drm_gem_prime_export(dev, gobj, flags); 259 buf = drm_gem_prime_export(dev, gobj, flags);
179 if (!IS_ERR(buf)) 260 if (!IS_ERR(buf)) {
180 buf->file->f_mapping = dev->anon_inode->i_mapping; 261 buf->file->f_mapping = dev->anon_inode->i_mapping;
262 buf->ops = &amdgpu_dmabuf_ops;
263 }
264
181 return buf; 265 return buf;
182} 266}
267
268struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
269 struct dma_buf *dma_buf)
270{
271 struct drm_gem_object *obj;
272
273 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
274 obj = dma_buf->priv;
275 if (obj->dev == dev) {
276 /*
277 * Importing dmabuf exported from out own gem increases
278 * refcount on gem itself instead of f_count of dmabuf.
279 */
280 drm_gem_object_get(obj);
281 return obj;
282 }
283 }
284
285 return drm_gem_prime_import(dev, dma_buf);
286}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 2157d4509e84..9a75410cd576 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -51,29 +51,10 @@ static int psp_sw_init(void *handle)
51 51
52 switch (adev->asic_type) { 52 switch (adev->asic_type) {
53 case CHIP_VEGA10: 53 case CHIP_VEGA10:
54 psp->init_microcode = psp_v3_1_init_microcode; 54 psp_v3_1_set_psp_funcs(psp);
55 psp->bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv;
56 psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
57 psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
58 psp->ring_init = psp_v3_1_ring_init;
59 psp->ring_create = psp_v3_1_ring_create;
60 psp->ring_stop = psp_v3_1_ring_stop;
61 psp->ring_destroy = psp_v3_1_ring_destroy;
62 psp->cmd_submit = psp_v3_1_cmd_submit;
63 psp->compare_sram_data = psp_v3_1_compare_sram_data;
64 psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
65 psp->mode1_reset = psp_v3_1_mode1_reset;
66 break; 55 break;
67 case CHIP_RAVEN: 56 case CHIP_RAVEN:
68 psp->init_microcode = psp_v10_0_init_microcode; 57 psp_v10_0_set_psp_funcs(psp);
69 psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
70 psp->ring_init = psp_v10_0_ring_init;
71 psp->ring_create = psp_v10_0_ring_create;
72 psp->ring_stop = psp_v10_0_ring_stop;
73 psp->ring_destroy = psp_v10_0_ring_destroy;
74 psp->cmd_submit = psp_v10_0_cmd_submit;
75 psp->compare_sram_data = psp_v10_0_compare_sram_data;
76 psp->mode1_reset = psp_v10_0_mode1_reset;
77 break; 58 break;
78 default: 59 default:
79 return -EINVAL; 60 return -EINVAL;
@@ -81,6 +62,9 @@ static int psp_sw_init(void *handle)
81 62
82 psp->adev = adev; 63 psp->adev = adev;
83 64
65 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
66 return 0;
67
84 ret = psp_init_microcode(psp); 68 ret = psp_init_microcode(psp);
85 if (ret) { 69 if (ret) {
86 DRM_ERROR("Failed to load psp firmware!\n"); 70 DRM_ERROR("Failed to load psp firmware!\n");
@@ -94,6 +78,9 @@ static int psp_sw_fini(void *handle)
94{ 78{
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 79 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 80
81 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
82 return 0;
83
97 release_firmware(adev->psp.sos_fw); 84 release_firmware(adev->psp.sos_fw);
98 adev->psp.sos_fw = NULL; 85 adev->psp.sos_fw = NULL;
99 release_firmware(adev->psp.asd_fw); 86 release_firmware(adev->psp.asd_fw);
@@ -472,6 +459,9 @@ static int psp_suspend(void *handle)
472 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
473 struct psp_context *psp = &adev->psp; 460 struct psp_context *psp = &adev->psp;
474 461
462 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
463 return 0;
464
475 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 465 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
476 if (ret) { 466 if (ret) {
477 DRM_ERROR("PSP ring stop failed\n"); 467 DRM_ERROR("PSP ring stop failed\n");
@@ -512,19 +502,8 @@ failed:
512 return ret; 502 return ret;
513} 503}
514 504
515static bool psp_check_reset(void* handle) 505int psp_gpu_reset(struct amdgpu_device *adev)
516{ 506{
517 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
518
519 if (adev->flags & AMD_IS_APU)
520 return true;
521
522 return false;
523}
524
525static int psp_reset(void* handle)
526{
527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
528 return psp_mode1_reset(&adev->psp); 507 return psp_mode1_reset(&adev->psp);
529} 508}
530 509
@@ -571,9 +550,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
571 .suspend = psp_suspend, 550 .suspend = psp_suspend,
572 .resume = psp_resume, 551 .resume = psp_resume,
573 .is_idle = NULL, 552 .is_idle = NULL,
574 .check_soft_reset = psp_check_reset, 553 .check_soft_reset = NULL,
575 .wait_for_idle = NULL, 554 .wait_for_idle = NULL,
576 .soft_reset = psp_reset, 555 .soft_reset = NULL,
577 .set_clockgating_state = psp_set_clockgating_state, 556 .set_clockgating_state = psp_set_clockgating_state,
578 .set_powergating_state = psp_set_powergating_state, 557 .set_powergating_state = psp_set_powergating_state,
579}; 558};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index ce4654550416..129209686848 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -33,6 +33,8 @@
33#define PSP_ASD_SHARED_MEM_SIZE 0x4000 33#define PSP_ASD_SHARED_MEM_SIZE 0x4000
34#define PSP_1_MEG 0x100000 34#define PSP_1_MEG 0x100000
35 35
36struct psp_context;
37
36enum psp_ring_type 38enum psp_ring_type
37{ 39{
38 PSP_RING_TYPE__INVALID = 0, 40 PSP_RING_TYPE__INVALID = 0,
@@ -53,12 +55,8 @@ struct psp_ring
53 uint32_t ring_size; 55 uint32_t ring_size;
54}; 56};
55 57
56struct psp_context 58struct psp_funcs
57{ 59{
58 struct amdgpu_device *adev;
59 struct psp_ring km_ring;
60 struct psp_gfx_cmd_resp *cmd;
61
62 int (*init_microcode)(struct psp_context *psp); 60 int (*init_microcode)(struct psp_context *psp);
63 int (*bootloader_load_sysdrv)(struct psp_context *psp); 61 int (*bootloader_load_sysdrv)(struct psp_context *psp);
64 int (*bootloader_load_sos)(struct psp_context *psp); 62 int (*bootloader_load_sos)(struct psp_context *psp);
@@ -77,6 +75,15 @@ struct psp_context
77 enum AMDGPU_UCODE_ID ucode_type); 75 enum AMDGPU_UCODE_ID ucode_type);
78 bool (*smu_reload_quirk)(struct psp_context *psp); 76 bool (*smu_reload_quirk)(struct psp_context *psp);
79 int (*mode1_reset)(struct psp_context *psp); 77 int (*mode1_reset)(struct psp_context *psp);
78};
79
80struct psp_context
81{
82 struct amdgpu_device *adev;
83 struct psp_ring km_ring;
84 struct psp_gfx_cmd_resp *cmd;
85
86 const struct psp_funcs *funcs;
80 87
81 /* fence buffer */ 88 /* fence buffer */
82 struct amdgpu_bo *fw_pri_bo; 89 struct amdgpu_bo *fw_pri_bo;
@@ -123,25 +130,25 @@ struct amdgpu_psp_funcs {
123 enum AMDGPU_UCODE_ID); 130 enum AMDGPU_UCODE_ID);
124}; 131};
125 132
126#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type)) 133#define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
127#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type)) 134#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
128#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type)) 135#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
129#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type)) 136#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
130#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type))) 137#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
131#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \ 138#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
132 (psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index)) 139 (psp)->funcs->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
133#define psp_compare_sram_data(psp, ucode, type) \ 140#define psp_compare_sram_data(psp, ucode, type) \
134 (psp)->compare_sram_data((psp), (ucode), (type)) 141 (psp)->funcs->compare_sram_data((psp), (ucode), (type))
135#define psp_init_microcode(psp) \ 142#define psp_init_microcode(psp) \
136 ((psp)->init_microcode ? (psp)->init_microcode((psp)) : 0) 143 ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
137#define psp_bootloader_load_sysdrv(psp) \ 144#define psp_bootloader_load_sysdrv(psp) \
138 ((psp)->bootloader_load_sysdrv ? (psp)->bootloader_load_sysdrv((psp)) : 0) 145 ((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
139#define psp_bootloader_load_sos(psp) \ 146#define psp_bootloader_load_sos(psp) \
140 ((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0) 147 ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
141#define psp_smu_reload_quirk(psp) \ 148#define psp_smu_reload_quirk(psp) \
142 ((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false) 149 ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
143#define psp_mode1_reset(psp) \ 150#define psp_mode1_reset(psp) \
144 ((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false) 151 ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
145 152
146extern const struct amd_ip_funcs psp_ip_funcs; 153extern const struct amd_ip_funcs psp_ip_funcs;
147 154
@@ -151,4 +158,6 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
151 158
152extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; 159extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
153 160
161int psp_gpu_reset(struct amdgpu_device *adev);
162
154#endif 163#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13044e66dcaf..d5f526f38e50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
360 360
361 amdgpu_debugfs_ring_fini(ring); 361 amdgpu_debugfs_ring_fini(ring);
362 362
363 dma_fence_put(ring->vmid_wait);
364 ring->vmid_wait = NULL;
365
363 ring->adev->rings[ring->idx] = NULL; 366 ring->adev->rings[ring->idx] = NULL;
364} 367}
365 368
@@ -481,7 +484,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
481 result = 0; 484 result = 0;
482 485
483 if (*pos < 12) { 486 if (*pos < 12) {
484 early[0] = amdgpu_ring_get_rptr(ring); 487 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
485 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; 488 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
486 early[2] = ring->wptr & ring->buf_mask; 489 early[2] = ring->wptr & ring->buf_mask;
487 for (i = *pos / 4; i < 3 && size; i++) { 490 for (i = *pos / 4; i < 3 && size; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 102dad3edf6a..1a5911882657 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -26,6 +26,7 @@
26 26
27#include <drm/amdgpu_drm.h> 27#include <drm/amdgpu_drm.h>
28#include <drm/gpu_scheduler.h> 28#include <drm/gpu_scheduler.h>
29#include <drm/drm_print.h>
29 30
30/* max number of rings */ 31/* max number of rings */
31#define AMDGPU_MAX_RINGS 18 32#define AMDGPU_MAX_RINGS 18
@@ -35,8 +36,9 @@
35#define AMDGPU_MAX_UVD_ENC_RINGS 2 36#define AMDGPU_MAX_UVD_ENC_RINGS 2
36 37
37/* some special values for the owner field */ 38/* some special values for the owner field */
38#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 39#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
39#define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 40#define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
41#define AMDGPU_FENCE_OWNER_KFD ((void *)2ul)
40 42
41#define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 43#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
42#define AMDGPU_FENCE_FLAG_INT (1 << 1) 44#define AMDGPU_FENCE_FLAG_INT (1 << 1)
@@ -128,7 +130,6 @@ struct amdgpu_ring_funcs {
128 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, 130 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
129 uint64_t pd_addr); 131 uint64_t pd_addr);
130 void (*emit_hdp_flush)(struct amdgpu_ring *ring); 132 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
131 void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
132 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 133 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
133 uint32_t gds_base, uint32_t gds_size, 134 uint32_t gds_base, uint32_t gds_size,
134 uint32_t gws_base, uint32_t gws_size, 135 uint32_t gws_base, uint32_t gws_size,
@@ -151,6 +152,8 @@ struct amdgpu_ring_funcs {
151 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); 152 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
152 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); 153 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
153 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); 154 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
155 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
156 uint32_t val, uint32_t mask);
154 void (*emit_tmz)(struct amdgpu_ring *ring, bool start); 157 void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
155 /* priority functions */ 158 /* priority functions */
156 void (*set_priority) (struct amdgpu_ring *ring, 159 void (*set_priority) (struct amdgpu_ring *ring,
@@ -195,6 +198,7 @@ struct amdgpu_ring {
195 u64 cond_exe_gpu_addr; 198 u64 cond_exe_gpu_addr;
196 volatile u32 *cond_exe_cpu_addr; 199 volatile u32 *cond_exe_cpu_addr;
197 unsigned vm_inv_eng; 200 unsigned vm_inv_eng;
201 struct dma_fence *vmid_wait;
198 bool has_compute_vm_bug; 202 bool has_compute_vm_bug;
199 203
200 atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; 204 atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 3144400435b7..fb1667b35daa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -63,21 +63,27 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
63 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 63 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
64 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65 65
66 r = amdgpu_bo_create(adev, size, align, true, domain, 66 r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
67 0, NULL, NULL, 0, &sa_manager->bo); 67 &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
68 if (r) { 68 if (r) {
69 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); 69 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
70 return r; 70 return r;
71 } 71 }
72 72
73 memset(sa_manager->cpu_ptr, 0, sa_manager->size);
73 return r; 74 return r;
74} 75}
75 76
76void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 77void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
77 struct amdgpu_sa_manager *sa_manager) 78 struct amdgpu_sa_manager *sa_manager)
78{ 79{
79 struct amdgpu_sa_bo *sa_bo, *tmp; 80 struct amdgpu_sa_bo *sa_bo, *tmp;
80 81
82 if (sa_manager->bo == NULL) {
83 dev_err(adev->dev, "no bo for sa manager\n");
84 return;
85 }
86
81 if (!list_empty(&sa_manager->olist)) { 87 if (!list_empty(&sa_manager->olist)) {
82 sa_manager->hole = &sa_manager->olist, 88 sa_manager->hole = &sa_manager->olist,
83 amdgpu_sa_bo_try_free(sa_manager); 89 amdgpu_sa_bo_try_free(sa_manager);
@@ -88,55 +94,9 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
88 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { 94 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
89 amdgpu_sa_bo_remove_locked(sa_bo); 95 amdgpu_sa_bo_remove_locked(sa_bo);
90 } 96 }
91 amdgpu_bo_unref(&sa_manager->bo);
92 sa_manager->size = 0;
93}
94
95int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
96 struct amdgpu_sa_manager *sa_manager)
97{
98 int r;
99
100 if (sa_manager->bo == NULL) {
101 dev_err(adev->dev, "no bo for sa manager\n");
102 return -EINVAL;
103 }
104 97
105 /* map the buffer */ 98 amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
106 r = amdgpu_bo_reserve(sa_manager->bo, false); 99 sa_manager->size = 0;
107 if (r) {
108 dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
109 return r;
110 }
111 r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
112 if (r) {
113 amdgpu_bo_unreserve(sa_manager->bo);
114 dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
115 return r;
116 }
117 r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
118 memset(sa_manager->cpu_ptr, 0, sa_manager->size);
119 amdgpu_bo_unreserve(sa_manager->bo);
120 return r;
121}
122
123int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
124 struct amdgpu_sa_manager *sa_manager)
125{
126 int r;
127
128 if (sa_manager->bo == NULL) {
129 dev_err(adev->dev, "no bo for sa manager\n");
130 return -EINVAL;
131 }
132
133 r = amdgpu_bo_reserve(sa_manager->bo, true);
134 if (!r) {
135 amdgpu_bo_kunmap(sa_manager->bo);
136 amdgpu_bo_unpin(sa_manager->bo);
137 amdgpu_bo_unreserve(sa_manager->bo);
138 }
139 return r;
140} 100}
141 101
142static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) 102static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index df65c66dc956..2d6f5ec77a68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -31,6 +31,7 @@
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include "amdgpu.h" 32#include "amdgpu.h"
33#include "amdgpu_trace.h" 33#include "amdgpu_trace.h"
34#include "amdgpu_amdkfd.h"
34 35
35struct amdgpu_sync_entry { 36struct amdgpu_sync_entry {
36 struct hlist_node node; 37 struct hlist_node node;
@@ -85,11 +86,20 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
85 */ 86 */
86static void *amdgpu_sync_get_owner(struct dma_fence *f) 87static void *amdgpu_sync_get_owner(struct dma_fence *f)
87{ 88{
88 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); 89 struct drm_sched_fence *s_fence;
90 struct amdgpu_amdkfd_fence *kfd_fence;
91
92 if (!f)
93 return AMDGPU_FENCE_OWNER_UNDEFINED;
89 94
95 s_fence = to_drm_sched_fence(f);
90 if (s_fence) 96 if (s_fence)
91 return s_fence->owner; 97 return s_fence->owner;
92 98
99 kfd_fence = to_amdgpu_amdkfd_fence(f);
100 if (kfd_fence)
101 return AMDGPU_FENCE_OWNER_KFD;
102
93 return AMDGPU_FENCE_OWNER_UNDEFINED; 103 return AMDGPU_FENCE_OWNER_UNDEFINED;
94} 104}
95 105
@@ -204,11 +214,18 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
204 for (i = 0; i < flist->shared_count; ++i) { 214 for (i = 0; i < flist->shared_count; ++i) {
205 f = rcu_dereference_protected(flist->shared[i], 215 f = rcu_dereference_protected(flist->shared[i],
206 reservation_object_held(resv)); 216 reservation_object_held(resv));
217 /* We only want to trigger KFD eviction fences on
218 * evict or move jobs. Skip KFD fences otherwise.
219 */
220 fence_owner = amdgpu_sync_get_owner(f);
221 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
222 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
223 continue;
224
207 if (amdgpu_sync_same_dev(adev, f)) { 225 if (amdgpu_sync_same_dev(adev, f)) {
208 /* VM updates are only interesting 226 /* VM updates are only interesting
209 * for other VM updates and moves. 227 * for other VM updates and moves.
210 */ 228 */
211 fence_owner = amdgpu_sync_get_owner(f);
212 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) && 229 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
213 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) && 230 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
214 ((owner == AMDGPU_FENCE_OWNER_VM) != 231 ((owner == AMDGPU_FENCE_OWNER_VM) !=
@@ -305,6 +322,41 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit
305 return NULL; 322 return NULL;
306} 323}
307 324
325/**
326 * amdgpu_sync_clone - clone a sync object
327 *
328 * @source: sync object to clone
329 * @clone: pointer to destination sync object
330 *
331 * Adds references to all unsignaled fences in @source to @clone. Also
332 * removes signaled fences from @source while at it.
333 */
334int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
335{
336 struct amdgpu_sync_entry *e;
337 struct hlist_node *tmp;
338 struct dma_fence *f;
339 int i, r;
340
341 hash_for_each_safe(source->fences, i, tmp, e, node) {
342 f = e->fence;
343 if (!dma_fence_is_signaled(f)) {
344 r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
345 if (r)
346 return r;
347 } else {
348 hash_del(&e->node);
349 dma_fence_put(f);
350 kmem_cache_free(amdgpu_sync_slab, e);
351 }
352 }
353
354 dma_fence_put(clone->last_vm_update);
355 clone->last_vm_update = dma_fence_get(source->last_vm_update);
356
357 return 0;
358}
359
308int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) 360int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
309{ 361{
310 struct amdgpu_sync_entry *e; 362 struct amdgpu_sync_entry *e;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 7aba38d5c9df..10cf23a57f17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -50,6 +50,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
50struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 50struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
51 struct amdgpu_ring *ring); 51 struct amdgpu_ring *ring);
52struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit); 52struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit);
53int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
53int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); 54int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
54void amdgpu_sync_free(struct amdgpu_sync *sync); 55void amdgpu_sync_free(struct amdgpu_sync *sync);
55int amdgpu_sync_init(void); 56int amdgpu_sync_init(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index ed8c3739015b..2dbe87591f81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffers) / test size 43 * (Total GTT - IB pool - writeback page - ring buffers) / test size
44 */ 44 */
45 n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; 45 n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
46 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 46 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
47 if (adev->rings[i]) 47 if (adev->rings[i])
48 n -= adev->rings[i]->ring_size; 48 n -= adev->rings[i]->ring_size;
@@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
63 AMDGPU_GEM_DOMAIN_VRAM, 0, 63 ttm_bo_type_kernel, NULL, &vram_obj);
64 NULL, NULL, 0, &vram_obj);
65 if (r) { 64 if (r) {
66 DRM_ERROR("Failed to create VRAM object\n"); 65 DRM_ERROR("Failed to create VRAM object\n");
67 goto out_cleanup; 66 goto out_cleanup;
@@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
80 void **vram_start, **vram_end; 79 void **vram_start, **vram_end;
81 struct dma_fence *fence = NULL; 80 struct dma_fence *fence = NULL;
82 81
83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 82 r = amdgpu_bo_create(adev, size, PAGE_SIZE,
84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 83 AMDGPU_GEM_DOMAIN_GTT, 0,
85 NULL, 0, gtt_obj + i); 84 ttm_bo_type_kernel, NULL, gtt_obj + i);
86 if (r) { 85 if (r) {
87 DRM_ERROR("Failed to create GTT object %d\n", i); 86 DRM_ERROR("Failed to create GTT object %d\n", i);
88 goto out_lclean; 87 goto out_lclean;
@@ -142,10 +141,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
142 "0x%16llx/0x%16llx)\n", 141 "0x%16llx/0x%16llx)\n",
143 i, *vram_start, gart_start, 142 i, *vram_start, gart_start,
144 (unsigned long long) 143 (unsigned long long)
145 (gart_addr - adev->mc.gart_start + 144 (gart_addr - adev->gmc.gart_start +
146 (void*)gart_start - gtt_map), 145 (void*)gart_start - gtt_map),
147 (unsigned long long) 146 (unsigned long long)
148 (vram_addr - adev->mc.vram_start + 147 (vram_addr - adev->gmc.vram_start +
149 (void*)gart_start - gtt_map)); 148 (void*)gart_start - gtt_map));
150 amdgpu_bo_kunmap(vram_obj); 149 amdgpu_bo_kunmap(vram_obj);
151 goto out_lclean_unpin; 150 goto out_lclean_unpin;
@@ -187,10 +186,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
187 "0x%16llx/0x%16llx)\n", 186 "0x%16llx/0x%16llx)\n",
188 i, *gart_start, vram_start, 187 i, *gart_start, vram_start,
189 (unsigned long long) 188 (unsigned long long)
190 (vram_addr - adev->mc.vram_start + 189 (vram_addr - adev->gmc.vram_start +
191 (void*)vram_start - vram_map), 190 (void*)vram_start - vram_map),
192 (unsigned long long) 191 (unsigned long long)
193 (gart_addr - adev->mc.gart_start + 192 (gart_addr - adev->gmc.gart_start +
194 (void*)vram_start - vram_map)); 193 (void*)vram_start - vram_map));
195 amdgpu_bo_kunmap(gtt_obj[i]); 194 amdgpu_bo_kunmap(gtt_obj[i]);
196 goto out_lclean_unpin; 195 goto out_lclean_unpin;
@@ -200,7 +199,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
200 amdgpu_bo_kunmap(gtt_obj[i]); 199 amdgpu_bo_kunmap(gtt_obj[i]);
201 200
202 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 201 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
203 gart_addr - adev->mc.gart_start); 202 gart_addr - adev->gmc.gart_start);
204 continue; 203 continue;
205 204
206out_lclean_unpin: 205out_lclean_unpin:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index cace7a93fc94..532263ab6e16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -86,7 +86,7 @@ TRACE_EVENT(amdgpu_iv,
86 __field(unsigned, vmid_src) 86 __field(unsigned, vmid_src)
87 __field(uint64_t, timestamp) 87 __field(uint64_t, timestamp)
88 __field(unsigned, timestamp_src) 88 __field(unsigned, timestamp_src)
89 __field(unsigned, pas_id) 89 __field(unsigned, pasid)
90 __array(unsigned, src_data, 4) 90 __array(unsigned, src_data, 4)
91 ), 91 ),
92 TP_fast_assign( 92 TP_fast_assign(
@@ -97,16 +97,16 @@ TRACE_EVENT(amdgpu_iv,
97 __entry->vmid_src = iv->vmid_src; 97 __entry->vmid_src = iv->vmid_src;
98 __entry->timestamp = iv->timestamp; 98 __entry->timestamp = iv->timestamp;
99 __entry->timestamp_src = iv->timestamp_src; 99 __entry->timestamp_src = iv->timestamp_src;
100 __entry->pas_id = iv->pas_id; 100 __entry->pasid = iv->pasid;
101 __entry->src_data[0] = iv->src_data[0]; 101 __entry->src_data[0] = iv->src_data[0];
102 __entry->src_data[1] = iv->src_data[1]; 102 __entry->src_data[1] = iv->src_data[1];
103 __entry->src_data[2] = iv->src_data[2]; 103 __entry->src_data[2] = iv->src_data[2];
104 __entry->src_data[3] = iv->src_data[3]; 104 __entry->src_data[3] = iv->src_data[3];
105 ), 105 ),
106 TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", 106 TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n",
107 __entry->client_id, __entry->src_id, 107 __entry->client_id, __entry->src_id,
108 __entry->ring_id, __entry->vmid, 108 __entry->ring_id, __entry->vmid,
109 __entry->timestamp, __entry->pas_id, 109 __entry->timestamp, __entry->pasid,
110 __entry->src_data[0], __entry->src_data[1], 110 __entry->src_data[0], __entry->src_data[1],
111 __entry->src_data[2], __entry->src_data[3]) 111 __entry->src_data[2], __entry->src_data[3])
112); 112);
@@ -217,7 +217,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
217 struct amdgpu_job *job), 217 struct amdgpu_job *job),
218 TP_ARGS(vm, ring, job), 218 TP_ARGS(vm, ring, job),
219 TP_STRUCT__entry( 219 TP_STRUCT__entry(
220 __field(struct amdgpu_vm *, vm) 220 __field(u32, pasid)
221 __field(u32, ring) 221 __field(u32, ring)
222 __field(u32, vmid) 222 __field(u32, vmid)
223 __field(u32, vm_hub) 223 __field(u32, vm_hub)
@@ -226,15 +226,15 @@ TRACE_EVENT(amdgpu_vm_grab_id,
226 ), 226 ),
227 227
228 TP_fast_assign( 228 TP_fast_assign(
229 __entry->vm = vm; 229 __entry->pasid = vm->pasid;
230 __entry->ring = ring->idx; 230 __entry->ring = ring->idx;
231 __entry->vmid = job->vmid; 231 __entry->vmid = job->vmid;
232 __entry->vm_hub = ring->funcs->vmhub, 232 __entry->vm_hub = ring->funcs->vmhub,
233 __entry->pd_addr = job->vm_pd_addr; 233 __entry->pd_addr = job->vm_pd_addr;
234 __entry->needs_flush = job->vm_needs_flush; 234 __entry->needs_flush = job->vm_needs_flush;
235 ), 235 ),
236 TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", 236 TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
237 __entry->vm, __entry->ring, __entry->vmid, 237 __entry->pasid, __entry->ring, __entry->vmid,
238 __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) 238 __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
239); 239);
240 240
@@ -378,6 +378,28 @@ TRACE_EVENT(amdgpu_vm_flush,
378 __entry->vm_hub,__entry->pd_addr) 378 __entry->vm_hub,__entry->pd_addr)
379); 379);
380 380
381DECLARE_EVENT_CLASS(amdgpu_pasid,
382 TP_PROTO(unsigned pasid),
383 TP_ARGS(pasid),
384 TP_STRUCT__entry(
385 __field(unsigned, pasid)
386 ),
387 TP_fast_assign(
388 __entry->pasid = pasid;
389 ),
390 TP_printk("pasid=%u", __entry->pasid)
391);
392
393DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated,
394 TP_PROTO(unsigned pasid),
395 TP_ARGS(pasid)
396);
397
398DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
399 TP_PROTO(unsigned pasid),
400 TP_ARGS(pasid)
401);
402
381TRACE_EVENT(amdgpu_bo_list_set, 403TRACE_EVENT(amdgpu_bo_list_set,
382 TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), 404 TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
383 TP_ARGS(list, bo), 405 TP_ARGS(list, bo),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d897c4c61a01..e28b73609fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -46,6 +46,7 @@
46#include "amdgpu.h" 46#include "amdgpu.h"
47#include "amdgpu_object.h" 47#include "amdgpu_object.h"
48#include "amdgpu_trace.h" 48#include "amdgpu_trace.h"
49#include "amdgpu_amdkfd.h"
49#include "bif/bif_4_1_d.h" 50#include "bif/bif_4_1_d.h"
50 51
51#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
@@ -161,7 +162,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
161 break; 162 break;
162 case TTM_PL_TT: 163 case TTM_PL_TT:
163 man->func = &amdgpu_gtt_mgr_func; 164 man->func = &amdgpu_gtt_mgr_func;
164 man->gpu_offset = adev->mc.gart_start; 165 man->gpu_offset = adev->gmc.gart_start;
165 man->available_caching = TTM_PL_MASK_CACHING; 166 man->available_caching = TTM_PL_MASK_CACHING;
166 man->default_caching = TTM_PL_FLAG_CACHED; 167 man->default_caching = TTM_PL_FLAG_CACHED;
167 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 168 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -169,7 +170,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
169 case TTM_PL_VRAM: 170 case TTM_PL_VRAM:
170 /* "On-card" video ram */ 171 /* "On-card" video ram */
171 man->func = &amdgpu_vram_mgr_func; 172 man->func = &amdgpu_vram_mgr_func;
172 man->gpu_offset = adev->mc.vram_start; 173 man->gpu_offset = adev->gmc.vram_start;
173 man->flags = TTM_MEMTYPE_FLAG_FIXED | 174 man->flags = TTM_MEMTYPE_FLAG_FIXED |
174 TTM_MEMTYPE_FLAG_MAPPABLE; 175 TTM_MEMTYPE_FLAG_MAPPABLE;
175 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 176 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
@@ -203,6 +204,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
203 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM 204 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
204 }; 205 };
205 206
207 if (bo->type == ttm_bo_type_sg) {
208 placement->num_placement = 0;
209 placement->num_busy_placement = 0;
210 return;
211 }
212
206 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { 213 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
207 placement->placement = &placements; 214 placement->placement = &placements;
208 placement->busy_placement = &placements; 215 placement->busy_placement = &placements;
@@ -213,13 +220,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
213 abo = ttm_to_amdgpu_bo(bo); 220 abo = ttm_to_amdgpu_bo(bo);
214 switch (bo->mem.mem_type) { 221 switch (bo->mem.mem_type) {
215 case TTM_PL_VRAM: 222 case TTM_PL_VRAM:
216 if (adev->mman.buffer_funcs && 223 if (!adev->mman.buffer_funcs_enabled) {
217 adev->mman.buffer_funcs_ring &&
218 adev->mman.buffer_funcs_ring->ready == false) {
219 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 224 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
220 } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 225 } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
221 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 226 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
222 unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 227 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
223 struct drm_mm_node *node = bo->mem.mm_node; 228 struct drm_mm_node *node = bo->mem.mm_node;
224 unsigned long pages_left; 229 unsigned long pages_left;
225 230
@@ -260,6 +265,13 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
260{ 265{
261 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 266 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
262 267
268 /*
269 * Don't verify access for KFD BOs. They don't have a GEM
270 * object associated with them.
271 */
272 if (abo->kfd_bo)
273 return 0;
274
263 if (amdgpu_ttm_tt_get_usermm(bo->ttm)) 275 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
264 return -EPERM; 276 return -EPERM;
265 return drm_vma_node_verify_access(&abo->gem_base.vma_node, 277 return drm_vma_node_verify_access(&abo->gem_base.vma_node,
@@ -331,7 +343,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
331 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 343 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
332 AMDGPU_GPU_PAGE_SIZE); 344 AMDGPU_GPU_PAGE_SIZE);
333 345
334 if (!ring->ready) { 346 if (!adev->mman.buffer_funcs_enabled) {
335 DRM_ERROR("Trying to move memory with ring turned off.\n"); 347 DRM_ERROR("Trying to move memory with ring turned off.\n");
336 return -EINVAL; 348 return -EINVAL;
337 } 349 }
@@ -577,12 +589,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
577 amdgpu_move_null(bo, new_mem); 589 amdgpu_move_null(bo, new_mem);
578 return 0; 590 return 0;
579 } 591 }
580 if (adev->mman.buffer_funcs == NULL || 592
581 adev->mman.buffer_funcs_ring == NULL || 593 if (!adev->mman.buffer_funcs_enabled)
582 !adev->mman.buffer_funcs_ring->ready) {
583 /* use memcpy */
584 goto memcpy; 594 goto memcpy;
585 }
586 595
587 if (old_mem->mem_type == TTM_PL_VRAM && 596 if (old_mem->mem_type == TTM_PL_VRAM &&
588 new_mem->mem_type == TTM_PL_SYSTEM) { 597 new_mem->mem_type == TTM_PL_SYSTEM) {
@@ -621,6 +630,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
621{ 630{
622 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 631 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
623 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 632 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
633 struct drm_mm_node *mm_node = mem->mm_node;
624 634
625 mem->bus.addr = NULL; 635 mem->bus.addr = NULL;
626 mem->bus.offset = 0; 636 mem->bus.offset = 0;
@@ -638,9 +648,18 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
638 case TTM_PL_VRAM: 648 case TTM_PL_VRAM:
639 mem->bus.offset = mem->start << PAGE_SHIFT; 649 mem->bus.offset = mem->start << PAGE_SHIFT;
640 /* check if it's visible */ 650 /* check if it's visible */
641 if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) 651 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
642 return -EINVAL; 652 return -EINVAL;
643 mem->bus.base = adev->mc.aper_base; 653 /* Only physically contiguous buffers apply. In a contiguous
654 * buffer, size of the first mm_node would match the number of
655 * pages in ttm_mem_reg.
656 */
657 if (adev->mman.aper_base_kaddr &&
658 (mm_node->size == mem->num_pages))
659 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
660 mem->bus.offset;
661
662 mem->bus.base = adev->gmc.aper_base;
644 mem->bus.is_iomem = true; 663 mem->bus.is_iomem = true;
645 break; 664 break;
646 default: 665 default:
@@ -674,7 +693,6 @@ struct amdgpu_ttm_gup_task_list {
674 693
675struct amdgpu_ttm_tt { 694struct amdgpu_ttm_tt {
676 struct ttm_dma_tt ttm; 695 struct ttm_dma_tt ttm;
677 struct amdgpu_device *adev;
678 u64 offset; 696 u64 offset;
679 uint64_t userptr; 697 uint64_t userptr;
680 struct mm_struct *usermm; 698 struct mm_struct *usermm;
@@ -832,6 +850,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
832static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, 850static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
833 struct ttm_mem_reg *bo_mem) 851 struct ttm_mem_reg *bo_mem)
834{ 852{
853 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
835 struct amdgpu_ttm_tt *gtt = (void*)ttm; 854 struct amdgpu_ttm_tt *gtt = (void*)ttm;
836 uint64_t flags; 855 uint64_t flags;
837 int r = 0; 856 int r = 0;
@@ -858,9 +877,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
858 return 0; 877 return 0;
859 } 878 }
860 879
861 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 880 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
862 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 881 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
863 r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, 882 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
864 ttm->pages, gtt->ttm.dma_address, flags); 883 ttm->pages, gtt->ttm.dma_address, flags);
865 884
866 if (r) 885 if (r)
@@ -891,7 +910,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
891 placement.num_busy_placement = 1; 910 placement.num_busy_placement = 1;
892 placement.busy_placement = &placements; 911 placement.busy_placement = &placements;
893 placements.fpfn = 0; 912 placements.fpfn = 0;
894 placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; 913 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
895 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | 914 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
896 TTM_PL_FLAG_TT; 915 TTM_PL_FLAG_TT;
897 916
@@ -937,6 +956,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
937 956
938static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) 957static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
939{ 958{
959 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
940 struct amdgpu_ttm_tt *gtt = (void *)ttm; 960 struct amdgpu_ttm_tt *gtt = (void *)ttm;
941 int r; 961 int r;
942 962
@@ -947,7 +967,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
947 return 0; 967 return 0;
948 968
949 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 969 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
950 r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); 970 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
951 if (r) 971 if (r)
952 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", 972 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
953 gtt->ttm.ttm.num_pages, gtt->offset); 973 gtt->ttm.ttm.num_pages, gtt->offset);
@@ -968,22 +988,20 @@ static struct ttm_backend_func amdgpu_backend_func = {
968 .destroy = &amdgpu_ttm_backend_destroy, 988 .destroy = &amdgpu_ttm_backend_destroy,
969}; 989};
970 990
971static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, 991static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
972 unsigned long size, uint32_t page_flags, 992 uint32_t page_flags)
973 struct page *dummy_read_page)
974{ 993{
975 struct amdgpu_device *adev; 994 struct amdgpu_device *adev;
976 struct amdgpu_ttm_tt *gtt; 995 struct amdgpu_ttm_tt *gtt;
977 996
978 adev = amdgpu_ttm_adev(bdev); 997 adev = amdgpu_ttm_adev(bo->bdev);
979 998
980 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 999 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
981 if (gtt == NULL) { 1000 if (gtt == NULL) {
982 return NULL; 1001 return NULL;
983 } 1002 }
984 gtt->ttm.ttm.func = &amdgpu_backend_func; 1003 gtt->ttm.ttm.func = &amdgpu_backend_func;
985 gtt->adev = adev; 1004 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
986 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
987 kfree(gtt); 1005 kfree(gtt);
988 return NULL; 1006 return NULL;
989 } 1007 }
@@ -997,9 +1015,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
997 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1015 struct amdgpu_ttm_tt *gtt = (void *)ttm;
998 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1016 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
999 1017
1000 if (ttm->state != tt_unpopulated)
1001 return 0;
1002
1003 if (gtt && gtt->userptr) { 1018 if (gtt && gtt->userptr) {
1004 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 1019 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1005 if (!ttm->sg) 1020 if (!ttm->sg)
@@ -1012,7 +1027,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1012 1027
1013 if (slave && ttm->sg) { 1028 if (slave && ttm->sg) {
1014 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 1029 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1015 gtt->ttm.dma_address, ttm->num_pages); 1030 gtt->ttm.dma_address,
1031 ttm->num_pages);
1016 ttm->state = tt_unbound; 1032 ttm->state = tt_unbound;
1017 return 0; 1033 return 0;
1018 } 1034 }
@@ -1170,6 +1186,23 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1170{ 1186{
1171 unsigned long num_pages = bo->mem.num_pages; 1187 unsigned long num_pages = bo->mem.num_pages;
1172 struct drm_mm_node *node = bo->mem.mm_node; 1188 struct drm_mm_node *node = bo->mem.mm_node;
1189 struct reservation_object_list *flist;
1190 struct dma_fence *f;
1191 int i;
1192
1193 /* If bo is a KFD BO, check if the bo belongs to the current process.
1194 * If true, then return false as any KFD process needs all its BOs to
1195 * be resident to run successfully
1196 */
1197 flist = reservation_object_get_list(bo->resv);
1198 if (flist) {
1199 for (i = 0; i < flist->shared_count; ++i) {
1200 f = rcu_dereference_protected(flist->shared[i],
1201 reservation_object_held(bo->resv));
1202 if (amdkfd_fence_check_mm(f, current->mm))
1203 return false;
1204 }
1205 }
1173 1206
1174 switch (bo->mem.mem_type) { 1207 switch (bo->mem.mem_type) {
1175 case TTM_PL_TT: 1208 case TTM_PL_TT:
@@ -1212,7 +1245,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1212 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); 1245 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1213 pos = (nodes->start << PAGE_SHIFT) + offset; 1246 pos = (nodes->start << PAGE_SHIFT) + offset;
1214 1247
1215 while (len && pos < adev->mc.mc_vram_size) { 1248 while (len && pos < adev->gmc.mc_vram_size) {
1216 uint64_t aligned_pos = pos & ~(uint64_t)3; 1249 uint64_t aligned_pos = pos & ~(uint64_t)3;
1217 uint32_t bytes = 4 - (pos & 3); 1250 uint32_t bytes = 4 - (pos & 3);
1218 uint32_t shift = (pos & 3) * 8; 1251 uint32_t shift = (pos & 3) * 8;
@@ -1298,7 +1331,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1298 struct ttm_operation_ctx ctx = { false, false }; 1331 struct ttm_operation_ctx ctx = { false, false };
1299 int r = 0; 1332 int r = 0;
1300 int i; 1333 int i;
1301 u64 vram_size = adev->mc.visible_vram_size; 1334 u64 vram_size = adev->gmc.visible_vram_size;
1302 u64 offset = adev->fw_vram_usage.start_offset; 1335 u64 offset = adev->fw_vram_usage.start_offset;
1303 u64 size = adev->fw_vram_usage.size; 1336 u64 size = adev->fw_vram_usage.size;
1304 struct amdgpu_bo *bo; 1337 struct amdgpu_bo *bo;
@@ -1309,11 +1342,12 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1309 if (adev->fw_vram_usage.size > 0 && 1342 if (adev->fw_vram_usage.size > 0 &&
1310 adev->fw_vram_usage.size <= vram_size) { 1343 adev->fw_vram_usage.size <= vram_size) {
1311 1344
1312 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, 1345 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
1313 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 1346 AMDGPU_GEM_DOMAIN_VRAM,
1314 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1347 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1315 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, 1348 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1316 &adev->fw_vram_usage.reserved_bo); 1349 ttm_bo_type_kernel, NULL,
1350 &adev->fw_vram_usage.reserved_bo);
1317 if (r) 1351 if (r)
1318 goto error_create; 1352 goto error_create;
1319 1353
@@ -1387,8 +1421,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1387 return r; 1421 return r;
1388 } 1422 }
1389 adev->mman.initialized = true; 1423 adev->mman.initialized = true;
1424
1425 /* We opt to avoid OOM on system pages allocations */
1426 adev->mman.bdev.no_retry = true;
1427
1390 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, 1428 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1391 adev->mc.real_vram_size >> PAGE_SHIFT); 1429 adev->gmc.real_vram_size >> PAGE_SHIFT);
1392 if (r) { 1430 if (r) {
1393 DRM_ERROR("Failed initializing VRAM heap.\n"); 1431 DRM_ERROR("Failed initializing VRAM heap.\n");
1394 return r; 1432 return r;
@@ -1397,11 +1435,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1397 /* Reduce size of CPU-visible VRAM if requested */ 1435 /* Reduce size of CPU-visible VRAM if requested */
1398 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; 1436 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1399 if (amdgpu_vis_vram_limit > 0 && 1437 if (amdgpu_vis_vram_limit > 0 &&
1400 vis_vram_limit <= adev->mc.visible_vram_size) 1438 vis_vram_limit <= adev->gmc.visible_vram_size)
1401 adev->mc.visible_vram_size = vis_vram_limit; 1439 adev->gmc.visible_vram_size = vis_vram_limit;
1402 1440
1403 /* Change the size here instead of the init above so only lpfn is affected */ 1441 /* Change the size here instead of the init above so only lpfn is affected */
1404 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 1442 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1443#ifdef CONFIG_64BIT
1444 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1445 adev->gmc.visible_vram_size);
1446#endif
1405 1447
1406 /* 1448 /*
1407 *The reserved vram for firmware must be pinned to the specified 1449 *The reserved vram for firmware must be pinned to the specified
@@ -1412,21 +1454,21 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1412 return r; 1454 return r;
1413 } 1455 }
1414 1456
1415 r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, 1457 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1416 AMDGPU_GEM_DOMAIN_VRAM, 1458 AMDGPU_GEM_DOMAIN_VRAM,
1417 &adev->stolen_vga_memory, 1459 &adev->stolen_vga_memory,
1418 NULL, NULL); 1460 NULL, NULL);
1419 if (r) 1461 if (r)
1420 return r; 1462 return r;
1421 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1463 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1422 (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); 1464 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1423 1465
1424 if (amdgpu_gtt_size == -1) { 1466 if (amdgpu_gtt_size == -1) {
1425 struct sysinfo si; 1467 struct sysinfo si;
1426 1468
1427 si_meminfo(&si); 1469 si_meminfo(&si);
1428 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), 1470 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1429 adev->mc.mc_vram_size), 1471 adev->gmc.mc_vram_size),
1430 ((uint64_t)si.totalram * si.mem_unit * 3/4)); 1472 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1431 } 1473 }
1432 else 1474 else
@@ -1494,6 +1536,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
1494 amdgpu_ttm_debugfs_fini(adev); 1536 amdgpu_ttm_debugfs_fini(adev);
1495 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); 1537 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1496 amdgpu_ttm_fw_reserve_vram_fini(adev); 1538 amdgpu_ttm_fw_reserve_vram_fini(adev);
1539 if (adev->mman.aper_base_kaddr)
1540 iounmap(adev->mman.aper_base_kaddr);
1541 adev->mman.aper_base_kaddr = NULL;
1497 1542
1498 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); 1543 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1499 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); 1544 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
@@ -1509,18 +1554,30 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
1509 DRM_INFO("amdgpu: ttm finalized\n"); 1554 DRM_INFO("amdgpu: ttm finalized\n");
1510} 1555}
1511 1556
1512/* this should only be called at bootup or when userspace 1557/**
1513 * isn't running */ 1558 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1514void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) 1559 *
1560 * @adev: amdgpu_device pointer
1561 * @enable: true when we can use buffer functions.
1562 *
1563 * Enable/disable use of buffer functions during suspend/resume. This should
1564 * only be called at bootup or when userspace isn't running.
1565 */
1566void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1515{ 1567{
1516 struct ttm_mem_type_manager *man; 1568 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1569 uint64_t size;
1517 1570
1518 if (!adev->mman.initialized) 1571 if (!adev->mman.initialized || adev->in_gpu_reset)
1519 return; 1572 return;
1520 1573
1521 man = &adev->mman.bdev.man[TTM_PL_VRAM];
1522 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 1574 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1575 if (enable)
1576 size = adev->gmc.real_vram_size;
1577 else
1578 size = adev->gmc.visible_vram_size;
1523 man->size = size >> PAGE_SHIFT; 1579 man->size = size >> PAGE_SHIFT;
1580 adev->mman.buffer_funcs_enabled = enable;
1524} 1581}
1525 1582
1526int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) 1583int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1559,7 +1616,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1559 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < 1616 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1560 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); 1617 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1561 1618
1562 *addr = adev->mc.gart_start; 1619 *addr = adev->gmc.gart_start;
1563 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 1620 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1564 AMDGPU_GPU_PAGE_SIZE; 1621 AMDGPU_GPU_PAGE_SIZE;
1565 1622
@@ -1619,6 +1676,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1619 unsigned i; 1676 unsigned i;
1620 int r; 1677 int r;
1621 1678
1679 if (direct_submit && !ring->ready) {
1680 DRM_ERROR("Trying to move memory with ring turned off.\n");
1681 return -EINVAL;
1682 }
1683
1622 max_bytes = adev->mman.buffer_funcs->copy_max_bytes; 1684 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1623 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 1685 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1624 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; 1686 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
@@ -1677,13 +1739,12 @@ error_free:
1677} 1739}
1678 1740
1679int amdgpu_fill_buffer(struct amdgpu_bo *bo, 1741int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1680 uint64_t src_data, 1742 uint32_t src_data,
1681 struct reservation_object *resv, 1743 struct reservation_object *resv,
1682 struct dma_fence **fence) 1744 struct dma_fence **fence)
1683{ 1745{
1684 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1746 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1685 uint32_t max_bytes = 8 * 1747 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1686 adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
1687 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 1748 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1688 1749
1689 struct drm_mm_node *mm_node; 1750 struct drm_mm_node *mm_node;
@@ -1693,7 +1754,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1693 struct amdgpu_job *job; 1754 struct amdgpu_job *job;
1694 int r; 1755 int r;
1695 1756
1696 if (!ring->ready) { 1757 if (!adev->mman.buffer_funcs_enabled) {
1697 DRM_ERROR("Trying to clear memory with ring turned off.\n"); 1758 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1698 return -EINVAL; 1759 return -EINVAL;
1699 } 1760 }
@@ -1714,9 +1775,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1714 num_pages -= mm_node->size; 1775 num_pages -= mm_node->size;
1715 ++mm_node; 1776 ++mm_node;
1716 } 1777 }
1717 1778 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1718 /* num of dwords for each SDMA_OP_PTEPDE cmd */
1719 num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
1720 1779
1721 /* for IB padding */ 1780 /* for IB padding */
1722 num_dw += 64; 1781 num_dw += 64;
@@ -1741,16 +1800,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1741 uint32_t byte_count = mm_node->size << PAGE_SHIFT; 1800 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1742 uint64_t dst_addr; 1801 uint64_t dst_addr;
1743 1802
1744 WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
1745
1746 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); 1803 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
1747 while (byte_count) { 1804 while (byte_count) {
1748 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 1805 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1749 1806
1750 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], 1807 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1751 dst_addr, 0, 1808 dst_addr, cur_size_in_bytes);
1752 cur_size_in_bytes >> 3, 0,
1753 src_data);
1754 1809
1755 dst_addr += cur_size_in_bytes; 1810 dst_addr += cur_size_in_bytes;
1756 byte_count -= cur_size_in_bytes; 1811 byte_count -= cur_size_in_bytes;
@@ -1811,14 +1866,14 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1811 if (size & 0x3 || *pos & 0x3) 1866 if (size & 0x3 || *pos & 0x3)
1812 return -EINVAL; 1867 return -EINVAL;
1813 1868
1814 if (*pos >= adev->mc.mc_vram_size) 1869 if (*pos >= adev->gmc.mc_vram_size)
1815 return -ENXIO; 1870 return -ENXIO;
1816 1871
1817 while (size) { 1872 while (size) {
1818 unsigned long flags; 1873 unsigned long flags;
1819 uint32_t value; 1874 uint32_t value;
1820 1875
1821 if (*pos >= adev->mc.mc_vram_size) 1876 if (*pos >= adev->gmc.mc_vram_size)
1822 return result; 1877 return result;
1823 1878
1824 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 1879 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
@@ -1850,14 +1905,14 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
1850 if (size & 0x3 || *pos & 0x3) 1905 if (size & 0x3 || *pos & 0x3)
1851 return -EINVAL; 1906 return -EINVAL;
1852 1907
1853 if (*pos >= adev->mc.mc_vram_size) 1908 if (*pos >= adev->gmc.mc_vram_size)
1854 return -ENXIO; 1909 return -ENXIO;
1855 1910
1856 while (size) { 1911 while (size) {
1857 unsigned long flags; 1912 unsigned long flags;
1858 uint32_t value; 1913 uint32_t value;
1859 1914
1860 if (*pos >= adev->mc.mc_vram_size) 1915 if (*pos >= adev->gmc.mc_vram_size)
1861 return result; 1916 return result;
1862 1917
1863 r = get_user(value, (uint32_t *)buf); 1918 r = get_user(value, (uint32_t *)buf);
@@ -1935,38 +1990,98 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
1935 1990
1936#endif 1991#endif
1937 1992
1938static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf, 1993static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
1939 size_t size, loff_t *pos) 1994 size_t size, loff_t *pos)
1940{ 1995{
1941 struct amdgpu_device *adev = file_inode(f)->i_private; 1996 struct amdgpu_device *adev = file_inode(f)->i_private;
1942 int r;
1943 uint64_t phys;
1944 struct iommu_domain *dom; 1997 struct iommu_domain *dom;
1998 ssize_t result = 0;
1999 int r;
1945 2000
1946 // always return 8 bytes 2001 dom = iommu_get_domain_for_dev(adev->dev);
1947 if (size != 8)
1948 return -EINVAL;
1949 2002
1950 // only accept page addresses 2003 while (size) {
1951 if (*pos & 0xFFF) 2004 phys_addr_t addr = *pos & PAGE_MASK;
1952 return -EINVAL; 2005 loff_t off = *pos & ~PAGE_MASK;
2006 size_t bytes = PAGE_SIZE - off;
2007 unsigned long pfn;
2008 struct page *p;
2009 void *ptr;
2010
2011 bytes = bytes < size ? bytes : size;
2012
2013 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2014
2015 pfn = addr >> PAGE_SHIFT;
2016 if (!pfn_valid(pfn))
2017 return -EPERM;
2018
2019 p = pfn_to_page(pfn);
2020 if (p->mapping != adev->mman.bdev.dev_mapping)
2021 return -EPERM;
2022
2023 ptr = kmap(p);
2024 r = copy_to_user(buf, ptr, bytes);
2025 kunmap(p);
2026 if (r)
2027 return -EFAULT;
2028
2029 size -= bytes;
2030 *pos += bytes;
2031 result += bytes;
2032 }
2033
2034 return result;
2035}
2036
2037static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2038 size_t size, loff_t *pos)
2039{
2040 struct amdgpu_device *adev = file_inode(f)->i_private;
2041 struct iommu_domain *dom;
2042 ssize_t result = 0;
2043 int r;
1953 2044
1954 dom = iommu_get_domain_for_dev(adev->dev); 2045 dom = iommu_get_domain_for_dev(adev->dev);
1955 if (dom)
1956 phys = iommu_iova_to_phys(dom, *pos);
1957 else
1958 phys = *pos;
1959 2046
1960 r = copy_to_user(buf, &phys, 8); 2047 while (size) {
1961 if (r) 2048 phys_addr_t addr = *pos & PAGE_MASK;
1962 return -EFAULT; 2049 loff_t off = *pos & ~PAGE_MASK;
2050 size_t bytes = PAGE_SIZE - off;
2051 unsigned long pfn;
2052 struct page *p;
2053 void *ptr;
2054
2055 bytes = bytes < size ? bytes : size;
2056
2057 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
1963 2058
1964 return 8; 2059 pfn = addr >> PAGE_SHIFT;
2060 if (!pfn_valid(pfn))
2061 return -EPERM;
2062
2063 p = pfn_to_page(pfn);
2064 if (p->mapping != adev->mman.bdev.dev_mapping)
2065 return -EPERM;
2066
2067 ptr = kmap(p);
2068 r = copy_from_user(ptr, buf, bytes);
2069 kunmap(p);
2070 if (r)
2071 return -EFAULT;
2072
2073 size -= bytes;
2074 *pos += bytes;
2075 result += bytes;
2076 }
2077
2078 return result;
1965} 2079}
1966 2080
1967static const struct file_operations amdgpu_ttm_iova_fops = { 2081static const struct file_operations amdgpu_ttm_iomem_fops = {
1968 .owner = THIS_MODULE, 2082 .owner = THIS_MODULE,
1969 .read = amdgpu_iova_to_phys_read, 2083 .read = amdgpu_iomem_read,
2084 .write = amdgpu_iomem_write,
1970 .llseek = default_llseek 2085 .llseek = default_llseek
1971}; 2086};
1972 2087
@@ -1979,7 +2094,7 @@ static const struct {
1979#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 2094#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1980 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, 2095 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
1981#endif 2096#endif
1982 { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM }, 2097 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
1983}; 2098};
1984 2099
1985#endif 2100#endif
@@ -2001,9 +2116,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2001 if (IS_ERR(ent)) 2116 if (IS_ERR(ent))
2002 return PTR_ERR(ent); 2117 return PTR_ERR(ent);
2003 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) 2118 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2004 i_size_write(ent->d_inode, adev->mc.mc_vram_size); 2119 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2005 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) 2120 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2006 i_size_write(ent->d_inode, adev->mc.gart_size); 2121 i_size_write(ent->d_inode, adev->gmc.gart_size);
2007 adev->mman.debugfs_entries[count] = ent; 2122 adev->mman.debugfs_entries[count] = ent;
2008 } 2123 }
2009 2124
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 167856f6080f..6ea7de863041 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -44,6 +44,7 @@ struct amdgpu_mman {
44 struct ttm_bo_device bdev; 44 struct ttm_bo_device bdev;
45 bool mem_global_referenced; 45 bool mem_global_referenced;
46 bool initialized; 46 bool initialized;
47 void __iomem *aper_base_kaddr;
47 48
48#if defined(CONFIG_DEBUG_FS) 49#if defined(CONFIG_DEBUG_FS)
49 struct dentry *debugfs_entries[8]; 50 struct dentry *debugfs_entries[8];
@@ -52,6 +53,7 @@ struct amdgpu_mman {
52 /* buffer handling */ 53 /* buffer handling */
53 const struct amdgpu_buffer_funcs *buffer_funcs; 54 const struct amdgpu_buffer_funcs *buffer_funcs;
54 struct amdgpu_ring *buffer_funcs_ring; 55 struct amdgpu_ring *buffer_funcs_ring;
56 bool buffer_funcs_enabled;
55 57
56 struct mutex gtt_window_lock; 58 struct mutex gtt_window_lock;
57 /* Scheduler entity for buffer moves */ 59 /* Scheduler entity for buffer moves */
@@ -74,6 +76,11 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
74uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 76uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
75uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 77uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
76 78
79int amdgpu_ttm_init(struct amdgpu_device *adev);
80void amdgpu_ttm_fini(struct amdgpu_device *adev);
81void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
82 bool enable);
83
77int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 84int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
78 uint64_t dst_offset, uint32_t byte_count, 85 uint64_t dst_offset, uint32_t byte_count,
79 struct reservation_object *resv, 86 struct reservation_object *resv,
@@ -86,7 +93,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
86 struct reservation_object *resv, 93 struct reservation_object *resv,
87 struct dma_fence **f); 94 struct dma_fence **f);
88int amdgpu_fill_buffer(struct amdgpu_bo *bo, 95int amdgpu_fill_buffer(struct amdgpu_bo *bo,
89 uint64_t src_data, 96 uint32_t src_data,
90 struct reservation_object *resv, 97 struct reservation_object *resv,
91 struct dma_fence **fence); 98 struct dma_fence **fence);
92 99
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2eae86bf906..f3c459b7c0bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
299 299
300 cancel_delayed_work_sync(&adev->uvd.idle_work); 300 cancel_delayed_work_sync(&adev->uvd.idle_work);
301 301
302 for (i = 0; i < adev->uvd.max_handles; ++i) 302 /* only valid for physical mode */
303 if (atomic_read(&adev->uvd.handles[i])) 303 if (adev->asic_type < CHIP_POLARIS10) {
304 break; 304 for (i = 0; i < adev->uvd.max_handles; ++i)
305 if (atomic_read(&adev->uvd.handles[i]))
306 break;
305 307
306 if (i == AMDGPU_MAX_UVD_HANDLES) 308 if (i == adev->uvd.max_handles)
307 return 0; 309 return 0;
310 }
308 311
309 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 312 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
310 ptr = adev->uvd.cpu_addr; 313 ptr = adev->uvd.cpu_addr;
@@ -952,37 +955,28 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
952static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 955static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
953 bool direct, struct dma_fence **fence) 956 bool direct, struct dma_fence **fence)
954{ 957{
955 struct ttm_operation_ctx ctx = { true, false }; 958 struct amdgpu_device *adev = ring->adev;
956 struct ttm_validate_buffer tv; 959 struct dma_fence *f = NULL;
957 struct ww_acquire_ctx ticket;
958 struct list_head head;
959 struct amdgpu_job *job; 960 struct amdgpu_job *job;
960 struct amdgpu_ib *ib; 961 struct amdgpu_ib *ib;
961 struct dma_fence *f = NULL;
962 struct amdgpu_device *adev = ring->adev;
963 uint64_t addr;
964 uint32_t data[4]; 962 uint32_t data[4];
965 int i, r; 963 uint64_t addr;
966 964 long r;
967 memset(&tv, 0, sizeof(tv)); 965 int i;
968 tv.bo = &bo->tbo;
969
970 INIT_LIST_HEAD(&head);
971 list_add(&tv.head, &head);
972 966
973 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); 967 amdgpu_bo_kunmap(bo);
974 if (r) 968 amdgpu_bo_unpin(bo);
975 return r;
976 969
977 if (!ring->adev->uvd.address_64_bit) { 970 if (!ring->adev->uvd.address_64_bit) {
971 struct ttm_operation_ctx ctx = { true, false };
972
978 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 973 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
979 amdgpu_uvd_force_into_uvd_segment(bo); 974 amdgpu_uvd_force_into_uvd_segment(bo);
975 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
976 if (r)
977 goto err;
980 } 978 }
981 979
982 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
983 if (r)
984 goto err;
985
986 r = amdgpu_job_alloc_with_ib(adev, 64, &job); 980 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
987 if (r) 981 if (r)
988 goto err; 982 goto err;
@@ -1014,6 +1008,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1014 ib->length_dw = 16; 1008 ib->length_dw = 16;
1015 1009
1016 if (direct) { 1010 if (direct) {
1011 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1012 true, false,
1013 msecs_to_jiffies(10));
1014 if (r == 0)
1015 r = -ETIMEDOUT;
1016 if (r < 0)
1017 goto err_free;
1018
1017 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 1019 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
1018 job->fence = dma_fence_get(f); 1020 job->fence = dma_fence_get(f);
1019 if (r) 1021 if (r)
@@ -1021,17 +1023,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1021 1023
1022 amdgpu_job_free(job); 1024 amdgpu_job_free(job);
1023 } else { 1025 } else {
1026 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
1027 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1028 if (r)
1029 goto err_free;
1030
1024 r = amdgpu_job_submit(job, ring, &adev->uvd.entity, 1031 r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
1025 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 1032 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1026 if (r) 1033 if (r)
1027 goto err_free; 1034 goto err_free;
1028 } 1035 }
1029 1036
1030 ttm_eu_fence_buffer_objects(&ticket, &head, f); 1037 amdgpu_bo_fence(bo, f, false);
1038 amdgpu_bo_unreserve(bo);
1039 amdgpu_bo_unref(&bo);
1031 1040
1032 if (fence) 1041 if (fence)
1033 *fence = dma_fence_get(f); 1042 *fence = dma_fence_get(f);
1034 amdgpu_bo_unref(&bo);
1035 dma_fence_put(f); 1043 dma_fence_put(f);
1036 1044
1037 return 0; 1045 return 0;
@@ -1040,7 +1048,8 @@ err_free:
1040 amdgpu_job_free(job); 1048 amdgpu_job_free(job);
1041 1049
1042err: 1050err:
1043 ttm_eu_backoff_reservation(&ticket, &head); 1051 amdgpu_bo_unreserve(bo);
1052 amdgpu_bo_unref(&bo);
1044 return r; 1053 return r;
1045} 1054}
1046 1055
@@ -1051,31 +1060,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1051 struct dma_fence **fence) 1060 struct dma_fence **fence)
1052{ 1061{
1053 struct amdgpu_device *adev = ring->adev; 1062 struct amdgpu_device *adev = ring->adev;
1054 struct amdgpu_bo *bo; 1063 struct amdgpu_bo *bo = NULL;
1055 uint32_t *msg; 1064 uint32_t *msg;
1056 int r, i; 1065 int r, i;
1057 1066
1058 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 1067 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1059 AMDGPU_GEM_DOMAIN_VRAM, 1068 AMDGPU_GEM_DOMAIN_VRAM,
1060 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1069 &bo, NULL, (void **)&msg);
1061 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1062 NULL, NULL, 0, &bo);
1063 if (r) 1070 if (r)
1064 return r; 1071 return r;
1065 1072
1066 r = amdgpu_bo_reserve(bo, false);
1067 if (r) {
1068 amdgpu_bo_unref(&bo);
1069 return r;
1070 }
1071
1072 r = amdgpu_bo_kmap(bo, (void **)&msg);
1073 if (r) {
1074 amdgpu_bo_unreserve(bo);
1075 amdgpu_bo_unref(&bo);
1076 return r;
1077 }
1078
1079 /* stitch together an UVD create msg */ 1073 /* stitch together an UVD create msg */
1080 msg[0] = cpu_to_le32(0x00000de4); 1074 msg[0] = cpu_to_le32(0x00000de4);
1081 msg[1] = cpu_to_le32(0x00000000); 1075 msg[1] = cpu_to_le32(0x00000000);
@@ -1091,9 +1085,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1091 for (i = 11; i < 1024; ++i) 1085 for (i = 11; i < 1024; ++i)
1092 msg[i] = cpu_to_le32(0x0); 1086 msg[i] = cpu_to_le32(0x0);
1093 1087
1094 amdgpu_bo_kunmap(bo);
1095 amdgpu_bo_unreserve(bo);
1096
1097 return amdgpu_uvd_send_msg(ring, bo, true, fence); 1088 return amdgpu_uvd_send_msg(ring, bo, true, fence);
1098} 1089}
1099 1090
@@ -1101,31 +1092,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1101 bool direct, struct dma_fence **fence) 1092 bool direct, struct dma_fence **fence)
1102{ 1093{
1103 struct amdgpu_device *adev = ring->adev; 1094 struct amdgpu_device *adev = ring->adev;
1104 struct amdgpu_bo *bo; 1095 struct amdgpu_bo *bo = NULL;
1105 uint32_t *msg; 1096 uint32_t *msg;
1106 int r, i; 1097 int r, i;
1107 1098
1108 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 1099 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1109 AMDGPU_GEM_DOMAIN_VRAM, 1100 AMDGPU_GEM_DOMAIN_VRAM,
1110 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1101 &bo, NULL, (void **)&msg);
1111 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1112 NULL, NULL, 0, &bo);
1113 if (r) 1102 if (r)
1114 return r; 1103 return r;
1115 1104
1116 r = amdgpu_bo_reserve(bo, false);
1117 if (r) {
1118 amdgpu_bo_unref(&bo);
1119 return r;
1120 }
1121
1122 r = amdgpu_bo_kmap(bo, (void **)&msg);
1123 if (r) {
1124 amdgpu_bo_unreserve(bo);
1125 amdgpu_bo_unref(&bo);
1126 return r;
1127 }
1128
1129 /* stitch together an UVD destroy msg */ 1105 /* stitch together an UVD destroy msg */
1130 msg[0] = cpu_to_le32(0x00000de4); 1106 msg[0] = cpu_to_le32(0x00000de4);
1131 msg[1] = cpu_to_le32(0x00000002); 1107 msg[1] = cpu_to_le32(0x00000002);
@@ -1134,9 +1110,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1134 for (i = 4; i < 1024; ++i) 1110 for (i = 4; i < 1024; ++i)
1135 msg[i] = cpu_to_le32(0x0); 1111 msg[i] = cpu_to_le32(0x0);
1136 1112
1137 amdgpu_bo_kunmap(bo);
1138 amdgpu_bo_unreserve(bo);
1139
1140 return amdgpu_uvd_send_msg(ring, bo, direct, fence); 1113 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
1141} 1114}
1142 1115
@@ -1146,9 +1119,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1146 container_of(work, struct amdgpu_device, uvd.idle_work.work); 1119 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1147 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); 1120 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1148 1121
1149 if (amdgpu_sriov_vf(adev))
1150 return;
1151
1152 if (fences == 0) { 1122 if (fences == 0) {
1153 if (adev->pm.dpm_enabled) { 1123 if (adev->pm.dpm_enabled) {
1154 amdgpu_dpm_enable_uvd(adev, false); 1124 amdgpu_dpm_enable_uvd(adev, false);
@@ -1168,11 +1138,12 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1168void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) 1138void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1169{ 1139{
1170 struct amdgpu_device *adev = ring->adev; 1140 struct amdgpu_device *adev = ring->adev;
1171 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1141 bool set_clocks;
1172 1142
1173 if (amdgpu_sriov_vf(adev)) 1143 if (amdgpu_sriov_vf(adev))
1174 return; 1144 return;
1175 1145
1146 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1176 if (set_clocks) { 1147 if (set_clocks) {
1177 if (adev->pm.dpm_enabled) { 1148 if (adev->pm.dpm_enabled) {
1178 amdgpu_dpm_enable_uvd(adev, true); 1149 amdgpu_dpm_enable_uvd(adev, true);
@@ -1188,7 +1159,8 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1188 1159
1189void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1160void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1190{ 1161{
1191 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1162 if (!amdgpu_sriov_vf(ring->adev))
1163 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1192} 1164}
1193 1165
1194/** 1166/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d274ae535530..9152478d7528 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -300,9 +300,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
300 container_of(work, struct amdgpu_device, vce.idle_work.work); 300 container_of(work, struct amdgpu_device, vce.idle_work.work);
301 unsigned i, count = 0; 301 unsigned i, count = 0;
302 302
303 if (amdgpu_sriov_vf(adev))
304 return;
305
306 for (i = 0; i < adev->vce.num_rings; i++) 303 for (i = 0; i < adev->vce.num_rings; i++)
307 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); 304 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
308 305
@@ -362,7 +359,8 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
362 */ 359 */
363void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) 360void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
364{ 361{
365 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); 362 if (!amdgpu_sriov_vf(ring->adev))
363 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
366} 364}
367 365
368/** 366/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 0fd378ae92c3..71781267ee4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -30,6 +30,8 @@
30#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) 30#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
31#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) 31#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
32 32
33#define AMDGPU_VCE_FW_53_45 ((53 << 24) | (45 << 16))
34
33struct amdgpu_vce { 35struct amdgpu_vce {
34 struct amdgpu_bo *vcpu_bo; 36 struct amdgpu_bo *vcpu_bo;
35 uint64_t gpu_addr; 37 uint64_t gpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 837962118dbc..58e495330b38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -270,34 +270,17 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
270 return r; 270 return r;
271} 271}
272 272
273static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 273static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
274 bool direct, struct dma_fence **fence) 274 struct amdgpu_bo *bo, bool direct,
275 struct dma_fence **fence)
275{ 276{
276 struct ttm_operation_ctx ctx = { true, false }; 277 struct amdgpu_device *adev = ring->adev;
277 struct ttm_validate_buffer tv; 278 struct dma_fence *f = NULL;
278 struct ww_acquire_ctx ticket;
279 struct list_head head;
280 struct amdgpu_job *job; 279 struct amdgpu_job *job;
281 struct amdgpu_ib *ib; 280 struct amdgpu_ib *ib;
282 struct dma_fence *f = NULL;
283 struct amdgpu_device *adev = ring->adev;
284 uint64_t addr; 281 uint64_t addr;
285 int i, r; 282 int i, r;
286 283
287 memset(&tv, 0, sizeof(tv));
288 tv.bo = &bo->tbo;
289
290 INIT_LIST_HEAD(&head);
291 list_add(&tv.head, &head);
292
293 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
294 if (r)
295 return r;
296
297 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
298 if (r)
299 goto err;
300
301 r = amdgpu_job_alloc_with_ib(adev, 64, &job); 284 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
302 if (r) 285 if (r)
303 goto err; 286 goto err;
@@ -330,11 +313,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
330 goto err_free; 313 goto err_free;
331 } 314 }
332 315
333 ttm_eu_fence_buffer_objects(&ticket, &head, f); 316 amdgpu_bo_fence(bo, f, false);
317 amdgpu_bo_unreserve(bo);
318 amdgpu_bo_unref(&bo);
334 319
335 if (fence) 320 if (fence)
336 *fence = dma_fence_get(f); 321 *fence = dma_fence_get(f);
337 amdgpu_bo_unref(&bo);
338 dma_fence_put(f); 322 dma_fence_put(f);
339 323
340 return 0; 324 return 0;
@@ -343,7 +327,8 @@ err_free:
343 amdgpu_job_free(job); 327 amdgpu_job_free(job);
344 328
345err: 329err:
346 ttm_eu_backoff_reservation(&ticket, &head); 330 amdgpu_bo_unreserve(bo);
331 amdgpu_bo_unref(&bo);
347 return r; 332 return r;
348} 333}
349 334
@@ -351,31 +336,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
351 struct dma_fence **fence) 336 struct dma_fence **fence)
352{ 337{
353 struct amdgpu_device *adev = ring->adev; 338 struct amdgpu_device *adev = ring->adev;
354 struct amdgpu_bo *bo; 339 struct amdgpu_bo *bo = NULL;
355 uint32_t *msg; 340 uint32_t *msg;
356 int r, i; 341 int r, i;
357 342
358 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 343 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
359 AMDGPU_GEM_DOMAIN_VRAM, 344 AMDGPU_GEM_DOMAIN_VRAM,
360 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 345 &bo, NULL, (void **)&msg);
361 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
362 NULL, NULL, 0, &bo);
363 if (r) 346 if (r)
364 return r; 347 return r;
365 348
366 r = amdgpu_bo_reserve(bo, false);
367 if (r) {
368 amdgpu_bo_unref(&bo);
369 return r;
370 }
371
372 r = amdgpu_bo_kmap(bo, (void **)&msg);
373 if (r) {
374 amdgpu_bo_unreserve(bo);
375 amdgpu_bo_unref(&bo);
376 return r;
377 }
378
379 msg[0] = cpu_to_le32(0x00000028); 349 msg[0] = cpu_to_le32(0x00000028);
380 msg[1] = cpu_to_le32(0x00000038); 350 msg[1] = cpu_to_le32(0x00000038);
381 msg[2] = cpu_to_le32(0x00000001); 351 msg[2] = cpu_to_le32(0x00000001);
@@ -393,9 +363,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
393 for (i = 14; i < 1024; ++i) 363 for (i = 14; i < 1024; ++i)
394 msg[i] = cpu_to_le32(0x0); 364 msg[i] = cpu_to_le32(0x0);
395 365
396 amdgpu_bo_kunmap(bo);
397 amdgpu_bo_unreserve(bo);
398
399 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); 366 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
400} 367}
401 368
@@ -403,31 +370,16 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
403 bool direct, struct dma_fence **fence) 370 bool direct, struct dma_fence **fence)
404{ 371{
405 struct amdgpu_device *adev = ring->adev; 372 struct amdgpu_device *adev = ring->adev;
406 struct amdgpu_bo *bo; 373 struct amdgpu_bo *bo = NULL;
407 uint32_t *msg; 374 uint32_t *msg;
408 int r, i; 375 int r, i;
409 376
410 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 377 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
411 AMDGPU_GEM_DOMAIN_VRAM, 378 AMDGPU_GEM_DOMAIN_VRAM,
412 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 379 &bo, NULL, (void **)&msg);
413 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
414 NULL, NULL, 0, &bo);
415 if (r) 380 if (r)
416 return r; 381 return r;
417 382
418 r = amdgpu_bo_reserve(bo, false);
419 if (r) {
420 amdgpu_bo_unref(&bo);
421 return r;
422 }
423
424 r = amdgpu_bo_kmap(bo, (void **)&msg);
425 if (r) {
426 amdgpu_bo_unreserve(bo);
427 amdgpu_bo_unref(&bo);
428 return r;
429 }
430
431 msg[0] = cpu_to_le32(0x00000028); 383 msg[0] = cpu_to_le32(0x00000028);
432 msg[1] = cpu_to_le32(0x00000018); 384 msg[1] = cpu_to_le32(0x00000018);
433 msg[2] = cpu_to_le32(0x00000000); 385 msg[2] = cpu_to_le32(0x00000000);
@@ -437,9 +389,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
437 for (i = 6; i < 1024; ++i) 389 for (i = 6; i < 1024; ++i)
438 msg[i] = cpu_to_le32(0x0); 390 msg[i] = cpu_to_le32(0x0);
439 391
440 amdgpu_bo_kunmap(bo);
441 amdgpu_bo_unreserve(bo);
442
443 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); 392 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
444} 393}
445 394
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e7dfb7b44b4b..21adb1b6e5cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,7 +22,21 @@
22 */ 22 */
23 23
24#include "amdgpu.h" 24#include "amdgpu.h"
25#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ 25#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
26#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
27#define MAX_KIQ_REG_TRY 20
28
29uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
30{
31 uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
32
33 addr -= AMDGPU_VA_RESERVED_SIZE;
34
35 if (addr >= AMDGPU_VA_HOLE_START)
36 addr |= AMDGPU_VA_HOLE_END;
37
38 return addr;
39}
26 40
27bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) 41bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
28{ 42{
@@ -55,14 +69,14 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
55 69
56/* 70/*
57 * amdgpu_map_static_csa should be called during amdgpu_vm_init 71 * amdgpu_map_static_csa should be called during amdgpu_vm_init
58 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" 72 * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
59 * to this VM, and each command submission of GFX should use this virtual 73 * submission of GFX should use this virtual address within META_DATA init
60 * address within META_DATA init package to support SRIOV gfx preemption. 74 * package to support SRIOV gfx preemption.
61 */ 75 */
62
63int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 76int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
64 struct amdgpu_bo_va **bo_va) 77 struct amdgpu_bo_va **bo_va)
65{ 78{
79 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
66 struct ww_acquire_ctx ticket; 80 struct ww_acquire_ctx ticket;
67 struct list_head list; 81 struct list_head list;
68 struct amdgpu_bo_list_entry pd; 82 struct amdgpu_bo_list_entry pd;
@@ -90,7 +104,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
90 return -ENOMEM; 104 return -ENOMEM;
91 } 105 }
92 106
93 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, 107 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
94 AMDGPU_CSA_SIZE); 108 AMDGPU_CSA_SIZE);
95 if (r) { 109 if (r) {
96 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 110 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
@@ -99,7 +113,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
99 return r; 113 return r;
100 } 114 }
101 115
102 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, 116 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
103 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 117 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
104 AMDGPU_PTE_EXECUTABLE); 118 AMDGPU_PTE_EXECUTABLE);
105 119
@@ -125,9 +139,9 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
125 139
126uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 140uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
127{ 141{
128 signed long r; 142 signed long r, cnt = 0;
129 unsigned long flags; 143 unsigned long flags;
130 uint32_t val, seq; 144 uint32_t seq;
131 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 145 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
132 struct amdgpu_ring *ring = &kiq->ring; 146 struct amdgpu_ring *ring = &kiq->ring;
133 147
@@ -141,18 +155,39 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
141 spin_unlock_irqrestore(&kiq->ring_lock, flags); 155 spin_unlock_irqrestore(&kiq->ring_lock, flags);
142 156
143 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 157 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
144 if (r < 1) { 158
145 DRM_ERROR("wait for kiq fence error: %ld\n", r); 159 /* don't wait anymore for gpu reset case because this way may
146 return ~0; 160 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
161 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
162 * never return if we keep waiting in virt_kiq_rreg, which cause
163 * gpu_recover() hang there.
164 *
165 * also don't wait anymore for IRQ context
166 * */
167 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
168 goto failed_kiq_read;
169
170 if (in_interrupt())
171 might_sleep();
172
173 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
174 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
175 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
147 } 176 }
148 val = adev->wb.wb[adev->virt.reg_val_offs];
149 177
150 return val; 178 if (cnt > MAX_KIQ_REG_TRY)
179 goto failed_kiq_read;
180
181 return adev->wb.wb[adev->virt.reg_val_offs];
182
183failed_kiq_read:
184 pr_err("failed to read reg:%x\n", reg);
185 return ~0;
151} 186}
152 187
153void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 188void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
154{ 189{
155 signed long r; 190 signed long r, cnt = 0;
156 unsigned long flags; 191 unsigned long flags;
157 uint32_t seq; 192 uint32_t seq;
158 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 193 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -168,8 +203,34 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
168 spin_unlock_irqrestore(&kiq->ring_lock, flags); 203 spin_unlock_irqrestore(&kiq->ring_lock, flags);
169 204
170 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 205 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
171 if (r < 1) 206
172 DRM_ERROR("wait for kiq fence error: %ld\n", r); 207 /* don't wait anymore for gpu reset case because this way may
208 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
209 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
210 * never return if we keep waiting in virt_kiq_rreg, which cause
211 * gpu_recover() hang there.
212 *
213 * also don't wait anymore for IRQ context
214 * */
215 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
216 goto failed_kiq_write;
217
218 if (in_interrupt())
219 might_sleep();
220
221 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
222
223 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
224 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
225 }
226
227 if (cnt > MAX_KIQ_REG_TRY)
228 goto failed_kiq_write;
229
230 return;
231
232failed_kiq_write:
233 pr_err("failed to write reg:%x\n", reg);
173} 234}
174 235
175/** 236/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 6a83425aa9ed..880ac113a3a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -251,8 +251,7 @@ struct amdgpu_virt {
251 uint32_t gim_feature; 251 uint32_t gim_feature;
252}; 252};
253 253
254#define AMDGPU_CSA_SIZE (8 * 1024) 254#define AMDGPU_CSA_SIZE (8 * 1024)
255#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)
256 255
257#define amdgpu_sriov_enabled(adev) \ 256#define amdgpu_sriov_enabled(adev) \
258((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) 257((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
@@ -279,6 +278,8 @@ static inline bool is_virtual_machine(void)
279} 278}
280 279
281struct amdgpu_vm; 280struct amdgpu_vm;
281
282uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
282bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 283bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
283int amdgpu_allocate_static_csa(struct amdgpu_device *adev); 284int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
284int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 285int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5afbc5e714d0..24474294c92a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -75,7 +75,8 @@ struct amdgpu_pte_update_params {
75 /* indirect buffer to fill with commands */ 75 /* indirect buffer to fill with commands */
76 struct amdgpu_ib *ib; 76 struct amdgpu_ib *ib;
77 /* Function which actually does the update */ 77 /* Function which actually does the update */
78 void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, 78 void (*func)(struct amdgpu_pte_update_params *params,
79 struct amdgpu_bo *bo, uint64_t pe,
79 uint64_t addr, unsigned count, uint32_t incr, 80 uint64_t addr, unsigned count, uint32_t incr,
80 uint64_t flags); 81 uint64_t flags);
81 /* The next two are used during VM update by CPU 82 /* The next two are used during VM update by CPU
@@ -257,6 +258,104 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
257} 258}
258 259
259/** 260/**
261 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
262 *
263 * @adev: amdgpu_device pointer
264 * @bo: BO to clear
265 * @level: level this BO is at
266 *
267 * Root PD needs to be reserved when calling this.
268 */
269static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
270 struct amdgpu_vm *vm, struct amdgpu_bo *bo,
271 unsigned level, bool pte_support_ats)
272{
273 struct ttm_operation_ctx ctx = { true, false };
274 struct dma_fence *fence = NULL;
275 unsigned entries, ats_entries;
276 struct amdgpu_ring *ring;
277 struct amdgpu_job *job;
278 uint64_t addr;
279 int r;
280
281 addr = amdgpu_bo_gpu_offset(bo);
282 entries = amdgpu_bo_size(bo) / 8;
283
284 if (pte_support_ats) {
285 if (level == adev->vm_manager.root_level) {
286 ats_entries = amdgpu_vm_level_shift(adev, level);
287 ats_entries += AMDGPU_GPU_PAGE_SHIFT;
288 ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
289 ats_entries = min(ats_entries, entries);
290 entries -= ats_entries;
291 } else {
292 ats_entries = entries;
293 entries = 0;
294 }
295 } else {
296 ats_entries = 0;
297 }
298
299 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
300
301 r = reservation_object_reserve_shared(bo->tbo.resv);
302 if (r)
303 return r;
304
305 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
306 if (r)
307 goto error;
308
309 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
310 if (r)
311 goto error;
312
313 if (ats_entries) {
314 uint64_t ats_value;
315
316 ats_value = AMDGPU_PTE_DEFAULT_ATC;
317 if (level != AMDGPU_VM_PTB)
318 ats_value |= AMDGPU_PDE_PTE;
319
320 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
321 ats_entries, 0, ats_value);
322 addr += ats_entries * 8;
323 }
324
325 if (entries)
326 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
327 entries, 0, 0);
328
329 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
330
331 WARN_ON(job->ibs[0].length_dw > 64);
332 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
333 AMDGPU_FENCE_OWNER_UNDEFINED, false);
334 if (r)
335 goto error_free;
336
337 r = amdgpu_job_submit(job, ring, &vm->entity,
338 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
339 if (r)
340 goto error_free;
341
342 amdgpu_bo_fence(bo, fence, true);
343 dma_fence_put(fence);
344
345 if (bo->shadow)
346 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
347 level, pte_support_ats);
348
349 return 0;
350
351error_free:
352 amdgpu_job_free(job);
353
354error:
355 return r;
356}
357
358/**
260 * amdgpu_vm_alloc_levels - allocate the PD/PT levels 359 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
261 * 360 *
262 * @adev: amdgpu_device pointer 361 * @adev: amdgpu_device pointer
@@ -270,13 +369,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
270 struct amdgpu_vm *vm, 369 struct amdgpu_vm *vm,
271 struct amdgpu_vm_pt *parent, 370 struct amdgpu_vm_pt *parent,
272 uint64_t saddr, uint64_t eaddr, 371 uint64_t saddr, uint64_t eaddr,
273 unsigned level) 372 unsigned level, bool ats)
274{ 373{
275 unsigned shift = amdgpu_vm_level_shift(adev, level); 374 unsigned shift = amdgpu_vm_level_shift(adev, level);
276 unsigned pt_idx, from, to; 375 unsigned pt_idx, from, to;
277 int r;
278 u64 flags; 376 u64 flags;
279 uint64_t init_value = 0; 377 int r;
280 378
281 if (!parent->entries) { 379 if (!parent->entries) {
282 unsigned num_entries = amdgpu_vm_num_entries(adev, level); 380 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@@ -299,21 +397,13 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
299 saddr = saddr & ((1 << shift) - 1); 397 saddr = saddr & ((1 << shift) - 1);
300 eaddr = eaddr & ((1 << shift) - 1); 398 eaddr = eaddr & ((1 << shift) - 1);
301 399
302 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 400 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
303 AMDGPU_GEM_CREATE_VRAM_CLEARED;
304 if (vm->use_cpu_for_update) 401 if (vm->use_cpu_for_update)
305 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 402 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
306 else 403 else
307 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 404 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
308 AMDGPU_GEM_CREATE_SHADOW); 405 AMDGPU_GEM_CREATE_SHADOW);
309 406
310 if (vm->pte_support_ats) {
311 init_value = AMDGPU_PTE_DEFAULT_ATC;
312 if (level != AMDGPU_VM_PTB)
313 init_value |= AMDGPU_PDE_PTE;
314
315 }
316
317 /* walk over the address space and allocate the page tables */ 407 /* walk over the address space and allocate the page tables */
318 for (pt_idx = from; pt_idx <= to; ++pt_idx) { 408 for (pt_idx = from; pt_idx <= to; ++pt_idx) {
319 struct reservation_object *resv = vm->root.base.bo->tbo.resv; 409 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
@@ -323,16 +413,23 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
323 if (!entry->base.bo) { 413 if (!entry->base.bo) {
324 r = amdgpu_bo_create(adev, 414 r = amdgpu_bo_create(adev,
325 amdgpu_vm_bo_size(adev, level), 415 amdgpu_vm_bo_size(adev, level),
326 AMDGPU_GPU_PAGE_SIZE, true, 416 AMDGPU_GPU_PAGE_SIZE,
327 AMDGPU_GEM_DOMAIN_VRAM, 417 AMDGPU_GEM_DOMAIN_VRAM, flags,
328 flags, 418 ttm_bo_type_kernel, resv, &pt);
329 NULL, resv, init_value, &pt);
330 if (r) 419 if (r)
331 return r; 420 return r;
332 421
422 r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
423 if (r) {
424 amdgpu_bo_unref(&pt->shadow);
425 amdgpu_bo_unref(&pt);
426 return r;
427 }
428
333 if (vm->use_cpu_for_update) { 429 if (vm->use_cpu_for_update) {
334 r = amdgpu_bo_kmap(pt, NULL); 430 r = amdgpu_bo_kmap(pt, NULL);
335 if (r) { 431 if (r) {
432 amdgpu_bo_unref(&pt->shadow);
336 amdgpu_bo_unref(&pt); 433 amdgpu_bo_unref(&pt);
337 return r; 434 return r;
338 } 435 }
@@ -356,7 +453,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
356 uint64_t sub_eaddr = (pt_idx == to) ? eaddr : 453 uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
357 ((1 << shift) - 1); 454 ((1 << shift) - 1);
358 r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr, 455 r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
359 sub_eaddr, level); 456 sub_eaddr, level, ats);
360 if (r) 457 if (r)
361 return r; 458 return r;
362 } 459 }
@@ -379,26 +476,29 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
379 struct amdgpu_vm *vm, 476 struct amdgpu_vm *vm,
380 uint64_t saddr, uint64_t size) 477 uint64_t saddr, uint64_t size)
381{ 478{
382 uint64_t last_pfn;
383 uint64_t eaddr; 479 uint64_t eaddr;
480 bool ats = false;
384 481
385 /* validate the parameters */ 482 /* validate the parameters */
386 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK) 483 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
387 return -EINVAL; 484 return -EINVAL;
388 485
389 eaddr = saddr + size - 1; 486 eaddr = saddr + size - 1;
390 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 487
391 if (last_pfn >= adev->vm_manager.max_pfn) { 488 if (vm->pte_support_ats)
392 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n", 489 ats = saddr < AMDGPU_VA_HOLE_START;
393 last_pfn, adev->vm_manager.max_pfn);
394 return -EINVAL;
395 }
396 490
397 saddr /= AMDGPU_GPU_PAGE_SIZE; 491 saddr /= AMDGPU_GPU_PAGE_SIZE;
398 eaddr /= AMDGPU_GPU_PAGE_SIZE; 492 eaddr /= AMDGPU_GPU_PAGE_SIZE;
399 493
494 if (eaddr >= adev->vm_manager.max_pfn) {
495 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
496 eaddr, adev->vm_manager.max_pfn);
497 return -EINVAL;
498 }
499
400 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 500 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
401 adev->vm_manager.root_level); 501 adev->vm_manager.root_level, ats);
402} 502}
403 503
404/** 504/**
@@ -465,7 +565,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
465 565
466static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) 566static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
467{ 567{
468 return (adev->mc.real_vram_size == adev->mc.visible_vram_size); 568 return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
469} 569}
470 570
471/** 571/**
@@ -491,14 +591,24 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
491 id->oa_base != job->oa_base || 591 id->oa_base != job->oa_base ||
492 id->oa_size != job->oa_size); 592 id->oa_size != job->oa_size);
493 bool vm_flush_needed = job->vm_needs_flush; 593 bool vm_flush_needed = job->vm_needs_flush;
594 bool pasid_mapping_needed = id->pasid != job->pasid ||
595 !id->pasid_mapping ||
596 !dma_fence_is_signaled(id->pasid_mapping);
597 struct dma_fence *fence = NULL;
494 unsigned patch_offset = 0; 598 unsigned patch_offset = 0;
495 int r; 599 int r;
496 600
497 if (amdgpu_vmid_had_gpu_reset(adev, id)) { 601 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
498 gds_switch_needed = true; 602 gds_switch_needed = true;
499 vm_flush_needed = true; 603 vm_flush_needed = true;
604 pasid_mapping_needed = true;
500 } 605 }
501 606
607 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
608 vm_flush_needed &= !!ring->funcs->emit_vm_flush;
609 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
610 ring->funcs->emit_wreg;
611
502 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 612 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
503 return 0; 613 return 0;
504 614
@@ -508,23 +618,36 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
508 if (need_pipe_sync) 618 if (need_pipe_sync)
509 amdgpu_ring_emit_pipeline_sync(ring); 619 amdgpu_ring_emit_pipeline_sync(ring);
510 620
511 if (ring->funcs->emit_vm_flush && vm_flush_needed) { 621 if (vm_flush_needed) {
512 struct dma_fence *fence;
513
514 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); 622 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
515 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); 623 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
624 }
516 625
626 if (pasid_mapping_needed)
627 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
628
629 if (vm_flush_needed || pasid_mapping_needed) {
517 r = amdgpu_fence_emit(ring, &fence); 630 r = amdgpu_fence_emit(ring, &fence);
518 if (r) 631 if (r)
519 return r; 632 return r;
633 }
520 634
635 if (vm_flush_needed) {
521 mutex_lock(&id_mgr->lock); 636 mutex_lock(&id_mgr->lock);
522 dma_fence_put(id->last_flush); 637 dma_fence_put(id->last_flush);
523 id->last_flush = fence; 638 id->last_flush = dma_fence_get(fence);
524 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); 639 id->current_gpu_reset_count =
640 atomic_read(&adev->gpu_reset_counter);
525 mutex_unlock(&id_mgr->lock); 641 mutex_unlock(&id_mgr->lock);
526 } 642 }
527 643
644 if (pasid_mapping_needed) {
645 id->pasid = job->pasid;
646 dma_fence_put(id->pasid_mapping);
647 id->pasid_mapping = dma_fence_get(fence);
648 }
649 dma_fence_put(fence);
650
528 if (ring->funcs->emit_gds_switch && gds_switch_needed) { 651 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
529 id->gds_base = job->gds_base; 652 id->gds_base = job->gds_base;
530 id->gds_size = job->gds_size; 653 id->gds_size = job->gds_size;
@@ -578,6 +701,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
578 * amdgpu_vm_do_set_ptes - helper to call the right asic function 701 * amdgpu_vm_do_set_ptes - helper to call the right asic function
579 * 702 *
580 * @params: see amdgpu_pte_update_params definition 703 * @params: see amdgpu_pte_update_params definition
704 * @bo: PD/PT to update
581 * @pe: addr of the page entry 705 * @pe: addr of the page entry
582 * @addr: dst addr to write into pe 706 * @addr: dst addr to write into pe
583 * @count: number of page entries to update 707 * @count: number of page entries to update
@@ -588,10 +712,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
588 * to setup the page table using the DMA. 712 * to setup the page table using the DMA.
589 */ 713 */
590static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, 714static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
715 struct amdgpu_bo *bo,
591 uint64_t pe, uint64_t addr, 716 uint64_t pe, uint64_t addr,
592 unsigned count, uint32_t incr, 717 unsigned count, uint32_t incr,
593 uint64_t flags) 718 uint64_t flags)
594{ 719{
720 pe += amdgpu_bo_gpu_offset(bo);
595 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); 721 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
596 722
597 if (count < 3) { 723 if (count < 3) {
@@ -608,6 +734,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
608 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART 734 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
609 * 735 *
610 * @params: see amdgpu_pte_update_params definition 736 * @params: see amdgpu_pte_update_params definition
737 * @bo: PD/PT to update
611 * @pe: addr of the page entry 738 * @pe: addr of the page entry
612 * @addr: dst addr to write into pe 739 * @addr: dst addr to write into pe
613 * @count: number of page entries to update 740 * @count: number of page entries to update
@@ -617,13 +744,14 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
617 * Traces the parameters and calls the DMA function to copy the PTEs. 744 * Traces the parameters and calls the DMA function to copy the PTEs.
618 */ 745 */
619static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, 746static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
747 struct amdgpu_bo *bo,
620 uint64_t pe, uint64_t addr, 748 uint64_t pe, uint64_t addr,
621 unsigned count, uint32_t incr, 749 unsigned count, uint32_t incr,
622 uint64_t flags) 750 uint64_t flags)
623{ 751{
624 uint64_t src = (params->src + (addr >> 12) * 8); 752 uint64_t src = (params->src + (addr >> 12) * 8);
625 753
626 754 pe += amdgpu_bo_gpu_offset(bo);
627 trace_amdgpu_vm_copy_ptes(pe, src, count); 755 trace_amdgpu_vm_copy_ptes(pe, src, count);
628 756
629 amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); 757 amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
@@ -657,6 +785,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
657 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU 785 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
658 * 786 *
659 * @params: see amdgpu_pte_update_params definition 787 * @params: see amdgpu_pte_update_params definition
788 * @bo: PD/PT to update
660 * @pe: kmap addr of the page entry 789 * @pe: kmap addr of the page entry
661 * @addr: dst addr to write into pe 790 * @addr: dst addr to write into pe
662 * @count: number of page entries to update 791 * @count: number of page entries to update
@@ -666,6 +795,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
666 * Write count number of PT/PD entries directly. 795 * Write count number of PT/PD entries directly.
667 */ 796 */
668static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, 797static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
798 struct amdgpu_bo *bo,
669 uint64_t pe, uint64_t addr, 799 uint64_t pe, uint64_t addr,
670 unsigned count, uint32_t incr, 800 unsigned count, uint32_t incr,
671 uint64_t flags) 801 uint64_t flags)
@@ -673,14 +803,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
673 unsigned int i; 803 unsigned int i;
674 uint64_t value; 804 uint64_t value;
675 805
806 pe += (unsigned long)amdgpu_bo_kptr(bo);
807
676 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); 808 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
677 809
678 for (i = 0; i < count; i++) { 810 for (i = 0; i < count; i++) {
679 value = params->pages_addr ? 811 value = params->pages_addr ?
680 amdgpu_vm_map_gart(params->pages_addr, addr) : 812 amdgpu_vm_map_gart(params->pages_addr, addr) :
681 addr; 813 addr;
682 amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe, 814 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
683 i, value, flags); 815 i, value, flags);
684 addr += incr; 816 addr += incr;
685 } 817 }
686} 818}
@@ -714,8 +846,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
714 struct amdgpu_vm_pt *parent, 846 struct amdgpu_vm_pt *parent,
715 struct amdgpu_vm_pt *entry) 847 struct amdgpu_vm_pt *entry)
716{ 848{
717 struct amdgpu_bo *bo = entry->base.bo, *shadow = NULL, *pbo; 849 struct amdgpu_bo *bo = parent->base.bo, *pbo;
718 uint64_t pd_addr, shadow_addr = 0;
719 uint64_t pde, pt, flags; 850 uint64_t pde, pt, flags;
720 unsigned level; 851 unsigned level;
721 852
@@ -723,29 +854,17 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
723 if (entry->huge) 854 if (entry->huge)
724 return; 855 return;
725 856
726 if (vm->use_cpu_for_update) { 857 for (level = 0, pbo = bo->parent; pbo; ++level)
727 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
728 } else {
729 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
730 shadow = parent->base.bo->shadow;
731 if (shadow)
732 shadow_addr = amdgpu_bo_gpu_offset(shadow);
733 }
734
735 for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
736 pbo = pbo->parent; 858 pbo = pbo->parent;
737 859
738 level += params->adev->vm_manager.root_level; 860 level += params->adev->vm_manager.root_level;
739 pt = amdgpu_bo_gpu_offset(bo); 861 pt = amdgpu_bo_gpu_offset(entry->base.bo);
740 flags = AMDGPU_PTE_VALID; 862 flags = AMDGPU_PTE_VALID;
741 amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags); 863 amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
742 if (shadow) { 864 pde = (entry - parent->entries) * 8;
743 pde = shadow_addr + (entry - parent->entries) * 8; 865 if (bo->shadow)
744 params->func(params, pde, pt, 1, 0, flags); 866 params->func(params, bo->shadow, pde, pt, 1, 0, flags);
745 } 867 params->func(params, bo, pde, pt, 1, 0, flags);
746
747 pde = pd_addr + (entry - parent->entries) * 8;
748 params->func(params, pde, pt, 1, 0, flags);
749} 868}
750 869
751/* 870/*
@@ -856,7 +975,7 @@ restart:
856 if (vm->use_cpu_for_update) { 975 if (vm->use_cpu_for_update) {
857 /* Flush HDP */ 976 /* Flush HDP */
858 mb(); 977 mb();
859 amdgpu_gart_flush_gpu_tlb(adev, 0); 978 amdgpu_asic_flush_hdp(adev, NULL);
860 } else if (params.ib->length_dw == 0) { 979 } else if (params.ib->length_dw == 0) {
861 amdgpu_job_free(job); 980 amdgpu_job_free(job);
862 } else { 981 } else {
@@ -870,11 +989,6 @@ restart:
870 amdgpu_ring_pad_ib(ring, params.ib); 989 amdgpu_ring_pad_ib(ring, params.ib);
871 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, 990 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
872 AMDGPU_FENCE_OWNER_VM, false); 991 AMDGPU_FENCE_OWNER_VM, false);
873 if (root->shadow)
874 amdgpu_sync_resv(adev, &job->sync,
875 root->shadow->tbo.resv,
876 AMDGPU_FENCE_OWNER_VM, false);
877
878 WARN_ON(params.ib->length_dw > ndw); 992 WARN_ON(params.ib->length_dw > ndw);
879 r = amdgpu_job_submit(job, ring, &vm->entity, 993 r = amdgpu_job_submit(job, ring, &vm->entity,
880 AMDGPU_FENCE_OWNER_VM, &fence); 994 AMDGPU_FENCE_OWNER_VM, &fence);
@@ -946,7 +1060,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
946 unsigned nptes, uint64_t dst, 1060 unsigned nptes, uint64_t dst,
947 uint64_t flags) 1061 uint64_t flags)
948{ 1062{
949 uint64_t pd_addr, pde; 1063 uint64_t pde;
950 1064
951 /* In the case of a mixed PT the PDE must point to it*/ 1065 /* In the case of a mixed PT the PDE must point to it*/
952 if (p->adev->asic_type >= CHIP_VEGA10 && !p->src && 1066 if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
@@ -967,21 +1081,12 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
967 } 1081 }
968 1082
969 entry->huge = true; 1083 entry->huge = true;
970 amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, 1084 amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
971 &dst, &flags);
972 1085
973 if (p->func == amdgpu_vm_cpu_set_ptes) { 1086 pde = (entry - parent->entries) * 8;
974 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); 1087 if (parent->base.bo->shadow)
975 } else { 1088 p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
976 if (parent->base.bo->shadow) { 1089 p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
977 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
978 pde = pd_addr + (entry - parent->entries) * 8;
979 p->func(p, pde, dst, 1, 0, flags);
980 }
981 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
982 }
983 pde = pd_addr + (entry - parent->entries) * 8;
984 p->func(p, pde, dst, 1, 0, flags);
985} 1090}
986 1091
987/** 1092/**
@@ -1007,7 +1112,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1007 uint64_t addr, pe_start; 1112 uint64_t addr, pe_start;
1008 struct amdgpu_bo *pt; 1113 struct amdgpu_bo *pt;
1009 unsigned nptes; 1114 unsigned nptes;
1010 bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
1011 1115
1012 /* walk over the address space and update the page tables */ 1116 /* walk over the address space and update the page tables */
1013 for (addr = start; addr < end; addr += nptes, 1117 for (addr = start; addr < end; addr += nptes,
@@ -1030,20 +1134,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1030 continue; 1134 continue;
1031 1135
1032 pt = entry->base.bo; 1136 pt = entry->base.bo;
1033 if (use_cpu_update) { 1137 pe_start = (addr & mask) * 8;
1034 pe_start = (unsigned long)amdgpu_bo_kptr(pt); 1138 if (pt->shadow)
1035 } else { 1139 params->func(params, pt->shadow, pe_start, dst, nptes,
1036 if (pt->shadow) { 1140 AMDGPU_GPU_PAGE_SIZE, flags);
1037 pe_start = amdgpu_bo_gpu_offset(pt->shadow); 1141 params->func(params, pt, pe_start, dst, nptes,
1038 pe_start += (addr & mask) * 8;
1039 params->func(params, pe_start, dst, nptes,
1040 AMDGPU_GPU_PAGE_SIZE, flags);
1041 }
1042 pe_start = amdgpu_bo_gpu_offset(pt);
1043 }
1044
1045 pe_start += (addr & mask) * 8;
1046 params->func(params, pe_start, dst, nptes,
1047 AMDGPU_GPU_PAGE_SIZE, flags); 1142 AMDGPU_GPU_PAGE_SIZE, flags);
1048 } 1143 }
1049 1144
@@ -1204,11 +1299,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1204 1299
1205 } else { 1300 } else {
1206 /* set page commands needed */ 1301 /* set page commands needed */
1207 ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw; 1302 ndw += ncmds * 10;
1208 1303
1209 /* extra commands for begin/end fragments */ 1304 /* extra commands for begin/end fragments */
1210 ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw 1305 ndw += 2 * 10 * adev->vm_manager.fragment_size;
1211 * adev->vm_manager.fragment_size;
1212 1306
1213 params.func = amdgpu_vm_do_set_ptes; 1307 params.func = amdgpu_vm_do_set_ptes;
1214 } 1308 }
@@ -1457,7 +1551,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1457 if (vm->use_cpu_for_update) { 1551 if (vm->use_cpu_for_update) {
1458 /* Flush HDP */ 1552 /* Flush HDP */
1459 mb(); 1553 mb();
1460 amdgpu_gart_flush_gpu_tlb(adev, 0); 1554 amdgpu_asic_flush_hdp(adev, NULL);
1461 } 1555 }
1462 1556
1463 spin_lock(&vm->status_lock); 1557 spin_lock(&vm->status_lock);
@@ -1485,7 +1579,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1485 1579
1486 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1580 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1487 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1581 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1488 adev->gart.gart_funcs->set_prt(adev, enable); 1582 adev->gmc.gmc_funcs->set_prt(adev, enable);
1489 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1583 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1490} 1584}
1491 1585
@@ -1494,7 +1588,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1494 */ 1588 */
1495static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1589static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1496{ 1590{
1497 if (!adev->gart.gart_funcs->set_prt) 1591 if (!adev->gmc.gmc_funcs->set_prt)
1498 return; 1592 return;
1499 1593
1500 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1594 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1529,7 +1623,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1529{ 1623{
1530 struct amdgpu_prt_cb *cb; 1624 struct amdgpu_prt_cb *cb;
1531 1625
1532 if (!adev->gart.gart_funcs->set_prt) 1626 if (!adev->gmc.gmc_funcs->set_prt)
1533 return; 1627 return;
1534 1628
1535 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1629 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -1623,16 +1717,16 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1623 struct dma_fence **fence) 1717 struct dma_fence **fence)
1624{ 1718{
1625 struct amdgpu_bo_va_mapping *mapping; 1719 struct amdgpu_bo_va_mapping *mapping;
1720 uint64_t init_pte_value = 0;
1626 struct dma_fence *f = NULL; 1721 struct dma_fence *f = NULL;
1627 int r; 1722 int r;
1628 uint64_t init_pte_value = 0;
1629 1723
1630 while (!list_empty(&vm->freed)) { 1724 while (!list_empty(&vm->freed)) {
1631 mapping = list_first_entry(&vm->freed, 1725 mapping = list_first_entry(&vm->freed,
1632 struct amdgpu_bo_va_mapping, list); 1726 struct amdgpu_bo_va_mapping, list);
1633 list_del(&mapping->list); 1727 list_del(&mapping->list);
1634 1728
1635 if (vm->pte_support_ats) 1729 if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1636 init_pte_value = AMDGPU_PTE_DEFAULT_ATC; 1730 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1637 1731
1638 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, 1732 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
@@ -2262,11 +2356,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2262{ 2356{
2263 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 2357 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2264 AMDGPU_VM_PTE_COUNT(adev) * 8); 2358 AMDGPU_VM_PTE_COUNT(adev) * 8);
2265 uint64_t init_pde_value = 0, flags;
2266 unsigned ring_instance; 2359 unsigned ring_instance;
2267 struct amdgpu_ring *ring; 2360 struct amdgpu_ring *ring;
2268 struct drm_sched_rq *rq; 2361 struct drm_sched_rq *rq;
2269 unsigned long size; 2362 unsigned long size;
2363 uint64_t flags;
2270 int r, i; 2364 int r, i;
2271 2365
2272 vm->va = RB_ROOT_CACHED; 2366 vm->va = RB_ROOT_CACHED;
@@ -2295,23 +2389,19 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2295 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2389 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2296 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2390 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2297 2391
2298 if (adev->asic_type == CHIP_RAVEN) { 2392 if (adev->asic_type == CHIP_RAVEN)
2299 vm->pte_support_ats = true; 2393 vm->pte_support_ats = true;
2300 init_pde_value = AMDGPU_PTE_DEFAULT_ATC 2394 } else {
2301 | AMDGPU_PDE_PTE;
2302
2303 }
2304 } else
2305 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2395 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2306 AMDGPU_VM_USE_CPU_FOR_GFX); 2396 AMDGPU_VM_USE_CPU_FOR_GFX);
2397 }
2307 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2398 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2308 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2399 vm->use_cpu_for_update ? "CPU" : "SDMA");
2309 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), 2400 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2310 "CPU update of VM recommended only for large BAR system\n"); 2401 "CPU update of VM recommended only for large BAR system\n");
2311 vm->last_update = NULL; 2402 vm->last_update = NULL;
2312 2403
2313 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 2404 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2314 AMDGPU_GEM_CREATE_VRAM_CLEARED;
2315 if (vm->use_cpu_for_update) 2405 if (vm->use_cpu_for_update)
2316 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 2406 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2317 else 2407 else
@@ -2319,9 +2409,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2319 AMDGPU_GEM_CREATE_SHADOW); 2409 AMDGPU_GEM_CREATE_SHADOW);
2320 2410
2321 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); 2411 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2322 r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM, 2412 r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
2323 flags, NULL, NULL, init_pde_value, 2413 ttm_bo_type_kernel, NULL, &vm->root.base.bo);
2324 &vm->root.base.bo);
2325 if (r) 2414 if (r)
2326 goto error_free_sched_entity; 2415 goto error_free_sched_entity;
2327 2416
@@ -2329,6 +2418,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2329 if (r) 2418 if (r)
2330 goto error_free_root; 2419 goto error_free_root;
2331 2420
2421 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2422 adev->vm_manager.root_level,
2423 vm->pte_support_ats);
2424 if (r)
2425 goto error_unreserve;
2426
2332 vm->root.base.vm = vm; 2427 vm->root.base.vm = vm;
2333 list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); 2428 list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2334 list_add_tail(&vm->root.base.vm_status, &vm->evicted); 2429 list_add_tail(&vm->root.base.vm_status, &vm->evicted);
@@ -2352,6 +2447,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2352 2447
2353 return 0; 2448 return 0;
2354 2449
2450error_unreserve:
2451 amdgpu_bo_unreserve(vm->root.base.bo);
2452
2355error_free_root: 2453error_free_root:
2356 amdgpu_bo_unref(&vm->root.base.bo->shadow); 2454 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2357 amdgpu_bo_unref(&vm->root.base.bo); 2455 amdgpu_bo_unref(&vm->root.base.bo);
@@ -2405,7 +2503,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2405void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2503void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2406{ 2504{
2407 struct amdgpu_bo_va_mapping *mapping, *tmp; 2505 struct amdgpu_bo_va_mapping *mapping, *tmp;
2408 bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; 2506 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2409 struct amdgpu_bo *root; 2507 struct amdgpu_bo *root;
2410 u64 fault; 2508 u64 fault;
2411 int i, r; 2509 int i, r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 21a80f1bb2b9..e9841518343e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -28,6 +28,7 @@
28#include <linux/kfifo.h> 28#include <linux/kfifo.h>
29#include <linux/rbtree.h> 29#include <linux/rbtree.h>
30#include <drm/gpu_scheduler.h> 30#include <drm/gpu_scheduler.h>
31#include <drm/drm_file.h>
31 32
32#include "amdgpu_sync.h" 33#include "amdgpu_sync.h"
33#include "amdgpu_ring.h" 34#include "amdgpu_ring.h"
@@ -99,7 +100,7 @@ struct amdgpu_bo_list_entry;
99#define AMDGPU_MMHUB 1 100#define AMDGPU_MMHUB 1
100 101
101/* hardcode that limit for now */ 102/* hardcode that limit for now */
102#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20) 103#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
103 104
104/* VA hole for 48bit addresses on Vega10 */ 105/* VA hole for 48bit addresses on Vega10 */
105#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL 106#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 4acca92f6a52..9aca653bec07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -89,11 +89,11 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
89 uint64_t start = node->start << PAGE_SHIFT; 89 uint64_t start = node->start << PAGE_SHIFT;
90 uint64_t end = (node->size + node->start) << PAGE_SHIFT; 90 uint64_t end = (node->size + node->start) << PAGE_SHIFT;
91 91
92 if (start >= adev->mc.visible_vram_size) 92 if (start >= adev->gmc.visible_vram_size)
93 return 0; 93 return 0;
94 94
95 return (end > adev->mc.visible_vram_size ? 95 return (end > adev->gmc.visible_vram_size ?
96 adev->mc.visible_vram_size : end) - start; 96 adev->gmc.visible_vram_size : end) - start;
97} 97}
98 98
99/** 99/**
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..d702fb8e3427 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include "bif/bif_4_1_d.h" 35#include "bif/bif_4_1_d.h"
36 36
37static u8 37u8
38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) 38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
39{ 39{
40 u8 backlight_level; 40 u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
48 return backlight_level; 48 return backlight_level;
49} 49}
50 50
51static void 51void
52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, 52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
53 u8 backlight_level) 53 u8 backlight_level)
54{ 54{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
25#define __ATOMBIOS_ENCODER_H__ 25#define __ATOMBIOS_ENCODER_H__
26 26
27u8 27u8
28amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
29void
30amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
31 u8 backlight_level);
32u8
28amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); 33amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
29void 34void
30amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, 35amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a0943aa8d1d3..98d1dd253596 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -65,6 +65,8 @@ MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
65#define VOLTAGE_VID_OFFSET_SCALE1 625 65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100 66#define VOLTAGE_VID_OFFSET_SCALE2 100
67 67
68static const struct amd_pm_funcs ci_dpm_funcs;
69
68static const struct ci_pt_defaults defaults_hawaii_xt = 70static const struct ci_pt_defaults defaults_hawaii_xt =
69{ 71{
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 72 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
@@ -905,7 +907,7 @@ static bool ci_dpm_vblank_too_short(void *handle)
905{ 907{
906 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 908 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
907 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 909 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
908 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; 910 u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
909 911
910 /* disable mclk switching if the refresh is >120Hz, even if the 912 /* disable mclk switching if the refresh is >120Hz, even if the
911 * blanking period would allow it 913 * blanking period would allow it
@@ -2954,7 +2956,7 @@ static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2954 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK; 2956 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2955 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); 2957 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2956 2958
2957 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 2959 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2958 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK | 2960 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2959 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK); 2961 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2960 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) | 2962 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
@@ -3077,7 +3079,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3077 (memory_clock <= pi->mclk_strobe_mode_threshold)) 3079 (memory_clock <= pi->mclk_strobe_mode_threshold))
3078 memory_level->StrobeEnable = 1; 3080 memory_level->StrobeEnable = 1;
3079 3081
3080 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 3082 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3081 memory_level->StrobeRatio = 3083 memory_level->StrobeRatio =
3082 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); 3084 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3083 if (pi->mclk_edc_enable_threshold && 3085 if (pi->mclk_edc_enable_threshold &&
@@ -3695,40 +3697,6 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table,
3695 return ret; 3697 return ret;
3696} 3698}
3697 3699
3698static void ci_save_default_power_profile(struct amdgpu_device *adev)
3699{
3700 struct ci_power_info *pi = ci_get_pi(adev);
3701 struct SMU7_Discrete_GraphicsLevel *levels =
3702 pi->smc_state_table.GraphicsLevel;
3703 uint32_t min_level = 0;
3704
3705 pi->default_gfx_power_profile.activity_threshold =
3706 be16_to_cpu(levels[0].ActivityLevel);
3707 pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
3708 pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
3709 pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
3710
3711 pi->default_compute_power_profile = pi->default_gfx_power_profile;
3712 pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
3713
3714 /* Optimize compute power profile: Use only highest
3715 * 2 power levels (if more than 2 are available), Hysteresis:
3716 * 0ms up, 5ms down
3717 */
3718 if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
3719 min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
3720 else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
3721 min_level = 1;
3722 pi->default_compute_power_profile.min_sclk =
3723 be32_to_cpu(levels[min_level].SclkFrequency);
3724
3725 pi->default_compute_power_profile.up_hyst = 0;
3726 pi->default_compute_power_profile.down_hyst = 5;
3727
3728 pi->gfx_power_profile = pi->default_gfx_power_profile;
3729 pi->compute_power_profile = pi->default_compute_power_profile;
3730}
3731
3732static int ci_init_smc_table(struct amdgpu_device *adev) 3700static int ci_init_smc_table(struct amdgpu_device *adev)
3733{ 3701{
3734 struct ci_power_info *pi = ci_get_pi(adev); 3702 struct ci_power_info *pi = ci_get_pi(adev);
@@ -3752,7 +3720,7 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
3752 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 3720 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3753 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 3721 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3754 3722
3755 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 3723 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3756 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 3724 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3757 3725
3758 if (ulv->supported) { 3726 if (ulv->supported) {
@@ -3874,8 +3842,6 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
3874 if (ret) 3842 if (ret)
3875 return ret; 3843 return ret;
3876 3844
3877 ci_save_default_power_profile(adev);
3878
3879 return 0; 3845 return 0;
3880} 3846}
3881 3847
@@ -4549,12 +4515,12 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4549 for (k = 0; k < table->num_entries; k++) { 4515 for (k = 0; k < table->num_entries; k++) {
4550 table->mc_reg_table_entry[k].mc_data[j] = 4516 table->mc_reg_table_entry[k].mc_data[j] =
4551 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4517 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4552 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) 4518 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4553 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 4519 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4554 } 4520 }
4555 j++; 4521 j++;
4556 4522
4557 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { 4523 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4558 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4524 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4559 return -EINVAL; 4525 return -EINVAL;
4560 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; 4526 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
@@ -6277,6 +6243,7 @@ static int ci_dpm_early_init(void *handle)
6277{ 6243{
6278 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6244 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6279 6245
6246 adev->powerplay.pp_funcs = &ci_dpm_funcs;
6280 ci_dpm_set_irq_funcs(adev); 6247 ci_dpm_set_irq_funcs(adev);
6281 6248
6282 return 0; 6249 return 0;
@@ -6639,9 +6606,10 @@ static int ci_dpm_force_clock_level(void *handle,
6639 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6606 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6640 struct ci_power_info *pi = ci_get_pi(adev); 6607 struct ci_power_info *pi = ci_get_pi(adev);
6641 6608
6642 if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO | 6609 if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL)
6643 AMD_DPM_FORCED_LEVEL_LOW | 6610 return -EINVAL;
6644 AMD_DPM_FORCED_LEVEL_HIGH)) 6611
6612 if (mask == 0)
6645 return -EINVAL; 6613 return -EINVAL;
6646 6614
6647 switch (type) { 6615 switch (type) {
@@ -6662,15 +6630,15 @@ static int ci_dpm_force_clock_level(void *handle,
6662 case PP_PCIE: 6630 case PP_PCIE:
6663 { 6631 {
6664 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 6632 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6665 uint32_t level = 0;
6666 6633
6667 while (tmp >>= 1) 6634 if (!pi->pcie_dpm_key_disabled) {
6668 level++; 6635 if (fls(tmp) != ffs(tmp))
6669 6636 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
6670 if (!pi->pcie_dpm_key_disabled) 6637 else
6671 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 6638 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6672 PPSMC_MSG_PCIeDPM_ForceLevel, 6639 PPSMC_MSG_PCIeDPM_ForceLevel,
6673 level); 6640 fls(tmp) - 1);
6641 }
6674 break; 6642 break;
6675 } 6643 }
6676 default: 6644 default:
@@ -6752,222 +6720,6 @@ static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
6752 return 0; 6720 return 0;
6753} 6721}
6754 6722
6755static int ci_dpm_get_power_profile_state(void *handle,
6756 struct amd_pp_profile *query)
6757{
6758 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6759 struct ci_power_info *pi = ci_get_pi(adev);
6760
6761 if (!pi || !query)
6762 return -EINVAL;
6763
6764 if (query->type == AMD_PP_GFX_PROFILE)
6765 memcpy(query, &pi->gfx_power_profile,
6766 sizeof(struct amd_pp_profile));
6767 else if (query->type == AMD_PP_COMPUTE_PROFILE)
6768 memcpy(query, &pi->compute_power_profile,
6769 sizeof(struct amd_pp_profile));
6770 else
6771 return -EINVAL;
6772
6773 return 0;
6774}
6775
6776static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
6777 struct amd_pp_profile *request)
6778{
6779 struct ci_power_info *pi = ci_get_pi(adev);
6780 struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6781 struct SMU7_Discrete_GraphicsLevel *levels =
6782 pi->smc_state_table.GraphicsLevel;
6783 uint32_t array = pi->dpm_table_start +
6784 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
6785 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
6786 SMU7_MAX_LEVELS_GRAPHICS;
6787 uint32_t i;
6788
6789 for (i = 0; i < dpm_table->sclk_table.count; i++) {
6790 levels[i].ActivityLevel =
6791 cpu_to_be16(request->activity_threshold);
6792 levels[i].EnabledForActivity = 1;
6793 levels[i].UpH = request->up_hyst;
6794 levels[i].DownH = request->down_hyst;
6795 }
6796
6797 return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
6798 array_size, pi->sram_end);
6799}
6800
6801static void ci_find_min_clock_masks(struct amdgpu_device *adev,
6802 uint32_t *sclk_mask, uint32_t *mclk_mask,
6803 uint32_t min_sclk, uint32_t min_mclk)
6804{
6805 struct ci_power_info *pi = ci_get_pi(adev);
6806 struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6807 uint32_t i;
6808
6809 for (i = 0; i < dpm_table->sclk_table.count; i++) {
6810 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
6811 dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
6812 *sclk_mask |= 1 << i;
6813 }
6814
6815 for (i = 0; i < dpm_table->mclk_table.count; i++) {
6816 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
6817 dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
6818 *mclk_mask |= 1 << i;
6819 }
6820}
6821
6822static int ci_set_power_profile_state(struct amdgpu_device *adev,
6823 struct amd_pp_profile *request)
6824{
6825 struct ci_power_info *pi = ci_get_pi(adev);
6826 int tmp_result, result = 0;
6827 uint32_t sclk_mask = 0, mclk_mask = 0;
6828
6829 tmp_result = ci_freeze_sclk_mclk_dpm(adev);
6830 if (tmp_result) {
6831 DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
6832 result = tmp_result;
6833 }
6834
6835 tmp_result = ci_populate_requested_graphic_levels(adev,
6836 request);
6837 if (tmp_result) {
6838 DRM_ERROR("Failed to populate requested graphic levels!");
6839 result = tmp_result;
6840 }
6841
6842 tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
6843 if (tmp_result) {
6844 DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
6845 result = tmp_result;
6846 }
6847
6848 ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
6849 request->min_sclk, request->min_mclk);
6850
6851 if (sclk_mask) {
6852 if (!pi->sclk_dpm_key_disabled)
6853 amdgpu_ci_send_msg_to_smc_with_parameter(
6854 adev,
6855 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6856 pi->dpm_level_enable_mask.
6857 sclk_dpm_enable_mask &
6858 sclk_mask);
6859 }
6860
6861 if (mclk_mask) {
6862 if (!pi->mclk_dpm_key_disabled)
6863 amdgpu_ci_send_msg_to_smc_with_parameter(
6864 adev,
6865 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6866 pi->dpm_level_enable_mask.
6867 mclk_dpm_enable_mask &
6868 mclk_mask);
6869 }
6870
6871
6872 return result;
6873}
6874
6875static int ci_dpm_set_power_profile_state(void *handle,
6876 struct amd_pp_profile *request)
6877{
6878 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6879 struct ci_power_info *pi = ci_get_pi(adev);
6880 int ret = -1;
6881
6882 if (!pi || !request)
6883 return -EINVAL;
6884
6885 if (adev->pm.dpm.forced_level !=
6886 AMD_DPM_FORCED_LEVEL_AUTO)
6887 return -EINVAL;
6888
6889 if (request->min_sclk ||
6890 request->min_mclk ||
6891 request->activity_threshold ||
6892 request->up_hyst ||
6893 request->down_hyst) {
6894 if (request->type == AMD_PP_GFX_PROFILE)
6895 memcpy(&pi->gfx_power_profile, request,
6896 sizeof(struct amd_pp_profile));
6897 else if (request->type == AMD_PP_COMPUTE_PROFILE)
6898 memcpy(&pi->compute_power_profile, request,
6899 sizeof(struct amd_pp_profile));
6900 else
6901 return -EINVAL;
6902
6903 if (request->type == pi->current_power_profile)
6904 ret = ci_set_power_profile_state(
6905 adev,
6906 request);
6907 } else {
6908 /* set power profile if it exists */
6909 switch (request->type) {
6910 case AMD_PP_GFX_PROFILE:
6911 ret = ci_set_power_profile_state(
6912 adev,
6913 &pi->gfx_power_profile);
6914 break;
6915 case AMD_PP_COMPUTE_PROFILE:
6916 ret = ci_set_power_profile_state(
6917 adev,
6918 &pi->compute_power_profile);
6919 break;
6920 default:
6921 return -EINVAL;
6922 }
6923 }
6924
6925 if (!ret)
6926 pi->current_power_profile = request->type;
6927
6928 return 0;
6929}
6930
6931static int ci_dpm_reset_power_profile_state(void *handle,
6932 struct amd_pp_profile *request)
6933{
6934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6935 struct ci_power_info *pi = ci_get_pi(adev);
6936
6937 if (!pi || !request)
6938 return -EINVAL;
6939
6940 if (request->type == AMD_PP_GFX_PROFILE) {
6941 pi->gfx_power_profile = pi->default_gfx_power_profile;
6942 return ci_dpm_set_power_profile_state(adev,
6943 &pi->gfx_power_profile);
6944 } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
6945 pi->compute_power_profile =
6946 pi->default_compute_power_profile;
6947 return ci_dpm_set_power_profile_state(adev,
6948 &pi->compute_power_profile);
6949 } else
6950 return -EINVAL;
6951}
6952
6953static int ci_dpm_switch_power_profile(void *handle,
6954 enum amd_pp_profile_type type)
6955{
6956 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6957 struct ci_power_info *pi = ci_get_pi(adev);
6958 struct amd_pp_profile request = {0};
6959
6960 if (!pi)
6961 return -EINVAL;
6962
6963 if (pi->current_power_profile != type) {
6964 request.type = type;
6965 return ci_dpm_set_power_profile_state(adev, &request);
6966 }
6967
6968 return 0;
6969}
6970
6971static int ci_dpm_read_sensor(void *handle, int idx, 6723static int ci_dpm_read_sensor(void *handle, int idx,
6972 void *value, int *size) 6724 void *value, int *size)
6973{ 6725{
@@ -7011,7 +6763,7 @@ static int ci_dpm_read_sensor(void *handle, int idx,
7011 } 6763 }
7012} 6764}
7013 6765
7014const struct amd_ip_funcs ci_dpm_ip_funcs = { 6766static const struct amd_ip_funcs ci_dpm_ip_funcs = {
7015 .name = "ci_dpm", 6767 .name = "ci_dpm",
7016 .early_init = ci_dpm_early_init, 6768 .early_init = ci_dpm_early_init,
7017 .late_init = ci_dpm_late_init, 6769 .late_init = ci_dpm_late_init,
@@ -7028,8 +6780,16 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
7028 .set_powergating_state = ci_dpm_set_powergating_state, 6780 .set_powergating_state = ci_dpm_set_powergating_state,
7029}; 6781};
7030 6782
7031const struct amd_pm_funcs ci_dpm_funcs = { 6783const struct amdgpu_ip_block_version ci_smu_ip_block =
7032 .get_temperature = &ci_dpm_get_temp, 6784{
6785 .type = AMD_IP_BLOCK_TYPE_SMC,
6786 .major = 7,
6787 .minor = 0,
6788 .rev = 0,
6789 .funcs = &ci_dpm_ip_funcs,
6790};
6791
6792static const struct amd_pm_funcs ci_dpm_funcs = {
7033 .pre_set_power_state = &ci_dpm_pre_set_power_state, 6793 .pre_set_power_state = &ci_dpm_pre_set_power_state,
7034 .set_power_state = &ci_dpm_set_power_state, 6794 .set_power_state = &ci_dpm_set_power_state,
7035 .post_set_power_state = &ci_dpm_post_set_power_state, 6795 .post_set_power_state = &ci_dpm_post_set_power_state,
@@ -7053,10 +6813,6 @@ const struct amd_pm_funcs ci_dpm_funcs = {
7053 .set_mclk_od = ci_dpm_set_mclk_od, 6813 .set_mclk_od = ci_dpm_set_mclk_od,
7054 .check_state_equal = ci_check_state_equal, 6814 .check_state_equal = ci_check_state_equal,
7055 .get_vce_clock_state = amdgpu_get_vce_clock_state, 6815 .get_vce_clock_state = amdgpu_get_vce_clock_state,
7056 .get_power_profile_state = ci_dpm_get_power_profile_state,
7057 .set_power_profile_state = ci_dpm_set_power_profile_state,
7058 .reset_power_profile_state = ci_dpm_reset_power_profile_state,
7059 .switch_power_profile = ci_dpm_switch_power_profile,
7060 .read_sensor = ci_dpm_read_sensor, 6816 .read_sensor = ci_dpm_read_sensor,
7061}; 6817};
7062 6818
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
index 84cbc9c45f4d..91be2996ae7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
@@ -295,13 +295,6 @@ struct ci_power_info {
295 bool fan_is_controlled_by_smc; 295 bool fan_is_controlled_by_smc;
296 u32 t_min; 296 u32 t_min;
297 u32 fan_ctrl_default_mode; 297 u32 fan_ctrl_default_mode;
298
299 /* power profile */
300 struct amd_pp_profile gfx_power_profile;
301 struct amd_pp_profile compute_power_profile;
302 struct amd_pp_profile default_gfx_power_profile;
303 struct amd_pp_profile default_compute_power_profile;
304 enum amd_pp_profile_type current_power_profile;
305}; 298};
306 299
307#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0 300#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 8e59e65efd44..0df22030e713 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,7 +67,6 @@
67 67
68#include "amdgpu_dm.h" 68#include "amdgpu_dm.h"
69#include "amdgpu_amdkfd.h" 69#include "amdgpu_amdkfd.h"
70#include "amdgpu_powerplay.h"
71#include "dce_virtual.h" 70#include "dce_virtual.h"
72 71
73/* 72/*
@@ -1715,6 +1714,27 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
1715 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 1714 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
1716} 1715}
1717 1716
1717static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1718{
1719 if (!ring || !ring->funcs->emit_wreg) {
1720 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1721 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1722 } else {
1723 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1724 }
1725}
1726
1727static void cik_invalidate_hdp(struct amdgpu_device *adev,
1728 struct amdgpu_ring *ring)
1729{
1730 if (!ring || !ring->funcs->emit_wreg) {
1731 WREG32(mmHDP_DEBUG0, 1);
1732 RREG32(mmHDP_DEBUG0);
1733 } else {
1734 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1735 }
1736}
1737
1718static const struct amdgpu_asic_funcs cik_asic_funcs = 1738static const struct amdgpu_asic_funcs cik_asic_funcs =
1719{ 1739{
1720 .read_disabled_bios = &cik_read_disabled_bios, 1740 .read_disabled_bios = &cik_read_disabled_bios,
@@ -1726,6 +1746,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
1726 .set_uvd_clocks = &cik_set_uvd_clocks, 1746 .set_uvd_clocks = &cik_set_uvd_clocks,
1727 .set_vce_clocks = &cik_set_vce_clocks, 1747 .set_vce_clocks = &cik_set_vce_clocks,
1728 .get_config_memsize = &cik_get_config_memsize, 1748 .get_config_memsize = &cik_get_config_memsize,
1749 .flush_hdp = &cik_flush_hdp,
1750 .invalidate_hdp = &cik_invalidate_hdp,
1729}; 1751};
1730 1752
1731static int cik_common_early_init(void *handle) 1753static int cik_common_early_init(void *handle)
@@ -1864,10 +1886,6 @@ static int cik_common_early_init(void *handle)
1864 return -EINVAL; 1886 return -EINVAL;
1865 } 1887 }
1866 1888
1867 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1868
1869 amdgpu_device_get_pcie_info(adev);
1870
1871 return 0; 1889 return 0;
1872} 1890}
1873 1891
@@ -1977,7 +1995,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
1977 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 1995 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
1978 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 1996 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
1979 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 1997 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
1980 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1998 if (amdgpu_dpm == -1)
1999 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2000 else
2001 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1981 if (adev->enable_virtual_display) 2002 if (adev->enable_virtual_display)
1982 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2003 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1983#if defined(CONFIG_DRM_AMD_DC) 2004#if defined(CONFIG_DRM_AMD_DC)
@@ -1995,7 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
1995 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2016 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
1996 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2017 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
1997 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2018 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
1998 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2019 if (amdgpu_dpm == -1)
2020 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2021 else
2022 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1999 if (adev->enable_virtual_display) 2023 if (adev->enable_virtual_display)
2000 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2024 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2001#if defined(CONFIG_DRM_AMD_DC) 2025#if defined(CONFIG_DRM_AMD_DC)
@@ -2013,7 +2037,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2013 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2037 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2014 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2038 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2015 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2039 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2016 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2040 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
2017 if (adev->enable_virtual_display) 2041 if (adev->enable_virtual_display)
2018 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2042 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2019#if defined(CONFIG_DRM_AMD_DC) 2043#if defined(CONFIG_DRM_AMD_DC)
@@ -2032,7 +2056,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2032 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2056 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2033 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2057 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2034 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2058 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2035 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2059 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
2036 if (adev->enable_virtual_display) 2060 if (adev->enable_virtual_display)
2037 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2061 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2038#if defined(CONFIG_DRM_AMD_DC) 2062#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index c4989f51ecef..e49c6f15a0a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,6 +24,8 @@
24#ifndef __CIK_H__ 24#ifndef __CIK_H__
25#define __CIK_H__ 25#define __CIK_H__
26 26
27#define CIK_FLUSH_GPU_TLB_NUM_WREG 3
28
27void cik_srbm_select(struct amdgpu_device *adev, 29void cik_srbm_select(struct amdgpu_device *adev,
28 u32 me, u32 pipe, u32 queue, u32 vmid); 30 u32 me, u32 pipe, u32 queue, u32 vmid);
29int cik_set_ip_blocks(struct amdgpu_device *adev); 31int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index c7b4349f6319..2a086610f74d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -24,8 +24,7 @@
24#ifndef __CIK_DPM_H__ 24#ifndef __CIK_DPM_H__
25#define __CIK_DPM_H__ 25#define __CIK_DPM_H__
26 26
27extern const struct amd_ip_funcs ci_dpm_ip_funcs; 27extern const struct amdgpu_ip_block_version ci_smu_ip_block;
28extern const struct amd_ip_funcs kv_dpm_ip_funcs; 28extern const struct amdgpu_ip_block_version kv_smu_ip_block;
29extern const struct amd_pm_funcs ci_dpm_funcs; 29
30extern const struct amd_pm_funcs kv_dpm_funcs;
31#endif 30#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index d5a05c19708f..44d10c2172f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -111,7 +111,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
111 cik_ih_disable_interrupts(adev); 111 cik_ih_disable_interrupts(adev);
112 112
113 /* setup interrupt control */ 113 /* setup interrupt control */
114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); 114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL); 115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
116 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 116 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
117 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 117 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -281,7 +281,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
281 entry->src_data[0] = dw[1] & 0xfffffff; 281 entry->src_data[0] = dw[1] & 0xfffffff;
282 entry->ring_id = dw[2] & 0xff; 282 entry->ring_id = dw[2] & 0xff;
283 entry->vmid = (dw[2] >> 8) & 0xff; 283 entry->vmid = (dw[2] >> 8) & 0xff;
284 entry->pas_id = (dw[2] >> 16) & 0xffff; 284 entry->pasid = (dw[2] >> 16) & 0xffff;
285 285
286 /* wptr/rptr are in bytes! */ 286 /* wptr/rptr are in bytes! */
287 adev->irq.ih.rptr += 16; 287 adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 6e8278e689b1..f48ea0dad875 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -261,13 +261,6 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
261 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 261 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
262} 262}
263 263
264static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
265{
266 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
267 amdgpu_ring_write(ring, mmHDP_DEBUG0);
268 amdgpu_ring_write(ring, 1);
269}
270
271/** 264/**
272 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring 265 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
273 * 266 *
@@ -317,7 +310,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
317 310
318 if ((adev->mman.buffer_funcs_ring == sdma0) || 311 if ((adev->mman.buffer_funcs_ring == sdma0) ||
319 (adev->mman.buffer_funcs_ring == sdma1)) 312 (adev->mman.buffer_funcs_ring == sdma1))
320 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 313 amdgpu_ttm_set_buffer_funcs_status(adev, false);
321 314
322 for (i = 0; i < adev->sdma.num_instances; i++) { 315 for (i = 0; i < adev->sdma.num_instances; i++) {
323 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 316 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -517,7 +510,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
517 } 510 }
518 511
519 if (adev->mman.buffer_funcs_ring == ring) 512 if (adev->mman.buffer_funcs_ring == ring)
520 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 513 amdgpu_ttm_set_buffer_funcs_status(adev, true);
521 } 514 }
522 515
523 return 0; 516 return 0;
@@ -885,18 +878,7 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
885 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | 878 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
886 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ 879 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
887 880
888 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 881 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
889 if (vmid < 8) {
890 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
891 } else {
892 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
893 }
894 amdgpu_ring_write(ring, pd_addr >> 12);
895
896 /* flush TLB */
897 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
898 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
899 amdgpu_ring_write(ring, 1 << vmid);
900 882
901 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 883 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
902 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 884 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
@@ -906,6 +888,14 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
906 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 888 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
907} 889}
908 890
891static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring,
892 uint32_t reg, uint32_t val)
893{
894 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
895 amdgpu_ring_write(ring, reg);
896 amdgpu_ring_write(ring, val);
897}
898
909static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, 899static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
910 bool enable) 900 bool enable)
911{ 901{
@@ -1279,9 +1269,9 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1279 .set_wptr = cik_sdma_ring_set_wptr, 1269 .set_wptr = cik_sdma_ring_set_wptr,
1280 .emit_frame_size = 1270 .emit_frame_size =
1281 6 + /* cik_sdma_ring_emit_hdp_flush */ 1271 6 + /* cik_sdma_ring_emit_hdp_flush */
1282 3 + /* cik_sdma_ring_emit_hdp_invalidate */ 1272 3 + /* hdp invalidate */
1283 6 + /* cik_sdma_ring_emit_pipeline_sync */ 1273 6 + /* cik_sdma_ring_emit_pipeline_sync */
1284 12 + /* cik_sdma_ring_emit_vm_flush */ 1274 CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */
1285 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ 1275 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
1286 .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */ 1276 .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
1287 .emit_ib = cik_sdma_ring_emit_ib, 1277 .emit_ib = cik_sdma_ring_emit_ib,
@@ -1289,11 +1279,11 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1289 .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, 1279 .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
1290 .emit_vm_flush = cik_sdma_ring_emit_vm_flush, 1280 .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
1291 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1281 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
1292 .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
1293 .test_ring = cik_sdma_ring_test_ring, 1282 .test_ring = cik_sdma_ring_test_ring,
1294 .test_ib = cik_sdma_ring_test_ib, 1283 .test_ib = cik_sdma_ring_test_ib,
1295 .insert_nop = cik_sdma_ring_insert_nop, 1284 .insert_nop = cik_sdma_ring_insert_nop,
1296 .pad_ib = cik_sdma_ring_pad_ib, 1285 .pad_ib = cik_sdma_ring_pad_ib,
1286 .emit_wreg = cik_sdma_ring_emit_wreg,
1297}; 1287};
1298 1288
1299static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) 1289static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1391,9 +1381,6 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
1391 .copy_pte = cik_sdma_vm_copy_pte, 1381 .copy_pte = cik_sdma_vm_copy_pte,
1392 1382
1393 .write_pte = cik_sdma_vm_write_pte, 1383 .write_pte = cik_sdma_vm_write_pte,
1394
1395 .set_max_nums_pte_pde = 0x1fffff >> 3,
1396 .set_pte_pde_num_dw = 10,
1397 .set_pte_pde = cik_sdma_vm_set_pte_pde, 1384 .set_pte_pde = cik_sdma_vm_set_pte_pde,
1398}; 1385};
1399 1386
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index f576e9cbbc61..960c29e17da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -111,7 +111,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
111 cz_ih_disable_interrupts(adev); 111 cz_ih_disable_interrupts(adev);
112 112
113 /* setup interrupt control */ 113 /* setup interrupt control */
114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); 114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL); 115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
116 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 116 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
117 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 117 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -260,7 +260,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
260 entry->src_data[0] = dw[1] & 0xfffffff; 260 entry->src_data[0] = dw[1] & 0xfffffff;
261 entry->ring_id = dw[2] & 0xff; 261 entry->ring_id = dw[2] & 0xff;
262 entry->vmid = (dw[2] >> 8) & 0xff; 262 entry->vmid = (dw[2] >> 8) & 0xff;
263 entry->pas_id = (dw[2] >> 16) & 0xffff; 263 entry->pasid = (dw[2] >> 16) & 0xffff;
264 264
265 /* wptr/rptr are in bytes! */ 265 /* wptr/rptr are in bytes! */
266 adev->irq.ih.rptr += 16; 266 adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f34bc68aadfb..452f88ea46a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -190,66 +190,6 @@ static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
190 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 190 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
191} 191}
192 192
193static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
194{
195 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
196 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
197 return true;
198 else
199 return false;
200}
201
202static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
203{
204 u32 pos1, pos2;
205
206 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
207 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
208
209 if (pos1 != pos2)
210 return true;
211 else
212 return false;
213}
214
215/**
216 * dce_v10_0_vblank_wait - vblank wait asic callback.
217 *
218 * @adev: amdgpu_device pointer
219 * @crtc: crtc to wait for vblank on
220 *
221 * Wait for vblank on the requested crtc (evergreen+).
222 */
223static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
224{
225 unsigned i = 100;
226
227 if (crtc >= adev->mode_info.num_crtc)
228 return;
229
230 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
231 return;
232
233 /* depending on when we hit vblank, we may be close to active; if so,
234 * wait for another frame.
235 */
236 while (dce_v10_0_is_in_vblank(adev, crtc)) {
237 if (i++ == 100) {
238 i = 0;
239 if (!dce_v10_0_is_counter_moving(adev, crtc))
240 break;
241 }
242 }
243
244 while (!dce_v10_0_is_in_vblank(adev, crtc)) {
245 if (i++ == 100) {
246 i = 0;
247 if (!dce_v10_0_is_counter_moving(adev, crtc))
248 break;
249 }
250 }
251}
252
253static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 193static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
254{ 194{
255 if (crtc >= adev->mode_info.num_crtc) 195 if (crtc >= adev->mode_info.num_crtc)
@@ -1205,7 +1145,7 @@ static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1205 u32 num_heads = 0, lb_size; 1145 u32 num_heads = 0, lb_size;
1206 int i; 1146 int i;
1207 1147
1208 amdgpu_update_display_priority(adev); 1148 amdgpu_display_update_priority(adev);
1209 1149
1210 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1150 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1211 if (adev->mode_info.crtcs[i]->base.enabled) 1151 if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2517,9 +2457,9 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2517 .cursor_set2 = dce_v10_0_crtc_cursor_set2, 2457 .cursor_set2 = dce_v10_0_crtc_cursor_set2,
2518 .cursor_move = dce_v10_0_crtc_cursor_move, 2458 .cursor_move = dce_v10_0_crtc_cursor_move,
2519 .gamma_set = dce_v10_0_crtc_gamma_set, 2459 .gamma_set = dce_v10_0_crtc_gamma_set,
2520 .set_config = amdgpu_crtc_set_config, 2460 .set_config = amdgpu_display_crtc_set_config,
2521 .destroy = dce_v10_0_crtc_destroy, 2461 .destroy = dce_v10_0_crtc_destroy,
2522 .page_flip_target = amdgpu_crtc_page_flip_target, 2462 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2523}; 2463};
2524 2464
2525static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2465static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2537,7 +2477,8 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2537 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2477 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2538 dce_v10_0_vga_enable(crtc, false); 2478 dce_v10_0_vga_enable(crtc, false);
2539 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2479 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2540 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2480 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2481 amdgpu_crtc->crtc_id);
2541 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2482 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2542 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2483 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2543 drm_crtc_vblank_on(crtc); 2484 drm_crtc_vblank_on(crtc);
@@ -2676,7 +2617,7 @@ static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2676 amdgpu_crtc->connector = NULL; 2617 amdgpu_crtc->connector = NULL;
2677 return false; 2618 return false;
2678 } 2619 }
2679 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2620 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2680 return false; 2621 return false;
2681 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2622 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2682 return false; 2623 return false;
@@ -2824,9 +2765,9 @@ static int dce_v10_0_sw_init(void *handle)
2824 adev->ddev->mode_config.preferred_depth = 24; 2765 adev->ddev->mode_config.preferred_depth = 24;
2825 adev->ddev->mode_config.prefer_shadow = 1; 2766 adev->ddev->mode_config.prefer_shadow = 1;
2826 2767
2827 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2768 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2828 2769
2829 r = amdgpu_modeset_create_props(adev); 2770 r = amdgpu_display_modeset_create_props(adev);
2830 if (r) 2771 if (r)
2831 return r; 2772 return r;
2832 2773
@@ -2841,7 +2782,7 @@ static int dce_v10_0_sw_init(void *handle)
2841 } 2782 }
2842 2783
2843 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2784 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2844 amdgpu_print_display_setup(adev->ddev); 2785 amdgpu_display_print_display_setup(adev->ddev);
2845 else 2786 else
2846 return -EINVAL; 2787 return -EINVAL;
2847 2788
@@ -2921,6 +2862,11 @@ static int dce_v10_0_hw_fini(void *handle)
2921 2862
2922static int dce_v10_0_suspend(void *handle) 2863static int dce_v10_0_suspend(void *handle)
2923{ 2864{
2865 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2866
2867 adev->mode_info.bl_level =
2868 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2869
2924 return dce_v10_0_hw_fini(handle); 2870 return dce_v10_0_hw_fini(handle);
2925} 2871}
2926 2872
@@ -2929,6 +2875,9 @@ static int dce_v10_0_resume(void *handle)
2929 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2930 int ret; 2876 int ret;
2931 2877
2878 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2879 adev->mode_info.bl_level);
2880
2932 ret = dce_v10_0_hw_init(handle); 2881 ret = dce_v10_0_hw_init(handle);
2933 2882
2934 /* turn on the BL */ 2883 /* turn on the BL */
@@ -3249,7 +3198,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3249{ 3198{
3250 unsigned crtc = entry->src_id - 1; 3199 unsigned crtc = entry->src_id - 1;
3251 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3200 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3252 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 3201 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
3253 3202
3254 switch (entry->src_data[0]) { 3203 switch (entry->src_data[0]) {
3255 case 0: /* vblank */ 3204 case 0: /* vblank */
@@ -3601,7 +3550,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3601static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { 3550static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3602 .bandwidth_update = &dce_v10_0_bandwidth_update, 3551 .bandwidth_update = &dce_v10_0_bandwidth_update,
3603 .vblank_get_counter = &dce_v10_0_vblank_get_counter, 3552 .vblank_get_counter = &dce_v10_0_vblank_get_counter,
3604 .vblank_wait = &dce_v10_0_vblank_wait,
3605 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3553 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3606 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3554 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3607 .hpd_sense = &dce_v10_0_hpd_sense, 3555 .hpd_sense = &dce_v10_0_hpd_sense,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 26378bd6aba4..a7c1c584a191 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -207,66 +207,6 @@ static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
207 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 207 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
208} 208}
209 209
210static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
211{
212 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
213 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
214 return true;
215 else
216 return false;
217}
218
219static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
220{
221 u32 pos1, pos2;
222
223 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
224 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
225
226 if (pos1 != pos2)
227 return true;
228 else
229 return false;
230}
231
232/**
233 * dce_v11_0_vblank_wait - vblank wait asic callback.
234 *
235 * @adev: amdgpu_device pointer
236 * @crtc: crtc to wait for vblank on
237 *
238 * Wait for vblank on the requested crtc (evergreen+).
239 */
240static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
241{
242 unsigned i = 100;
243
244 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
245 return;
246
247 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
248 return;
249
250 /* depending on when we hit vblank, we may be close to active; if so,
251 * wait for another frame.
252 */
253 while (dce_v11_0_is_in_vblank(adev, crtc)) {
254 if (i++ == 100) {
255 i = 0;
256 if (!dce_v11_0_is_counter_moving(adev, crtc))
257 break;
258 }
259 }
260
261 while (!dce_v11_0_is_in_vblank(adev, crtc)) {
262 if (i++ == 100) {
263 i = 0;
264 if (!dce_v11_0_is_counter_moving(adev, crtc))
265 break;
266 }
267 }
268}
269
270static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 210static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
271{ 211{
272 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) 212 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
@@ -1229,7 +1169,7 @@ static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
1229 u32 num_heads = 0, lb_size; 1169 u32 num_heads = 0, lb_size;
1230 int i; 1170 int i;
1231 1171
1232 amdgpu_update_display_priority(adev); 1172 amdgpu_display_update_priority(adev);
1233 1173
1234 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1174 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1235 if (adev->mode_info.crtcs[i]->base.enabled) 1175 if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2592,9 +2532,9 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
2592 .cursor_set2 = dce_v11_0_crtc_cursor_set2, 2532 .cursor_set2 = dce_v11_0_crtc_cursor_set2,
2593 .cursor_move = dce_v11_0_crtc_cursor_move, 2533 .cursor_move = dce_v11_0_crtc_cursor_move,
2594 .gamma_set = dce_v11_0_crtc_gamma_set, 2534 .gamma_set = dce_v11_0_crtc_gamma_set,
2595 .set_config = amdgpu_crtc_set_config, 2535 .set_config = amdgpu_display_crtc_set_config,
2596 .destroy = dce_v11_0_crtc_destroy, 2536 .destroy = dce_v11_0_crtc_destroy,
2597 .page_flip_target = amdgpu_crtc_page_flip_target, 2537 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2598}; 2538};
2599 2539
2600static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2540static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2612,7 +2552,8 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2612 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2552 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2613 dce_v11_0_vga_enable(crtc, false); 2553 dce_v11_0_vga_enable(crtc, false);
2614 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2554 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2615 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2555 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2556 amdgpu_crtc->crtc_id);
2616 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2557 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2617 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2558 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2618 drm_crtc_vblank_on(crtc); 2559 drm_crtc_vblank_on(crtc);
@@ -2779,7 +2720,7 @@ static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
2779 amdgpu_crtc->connector = NULL; 2720 amdgpu_crtc->connector = NULL;
2780 return false; 2721 return false;
2781 } 2722 }
2782 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2723 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2783 return false; 2724 return false;
2784 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2725 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2785 return false; 2726 return false;
@@ -2939,9 +2880,9 @@ static int dce_v11_0_sw_init(void *handle)
2939 adev->ddev->mode_config.preferred_depth = 24; 2880 adev->ddev->mode_config.preferred_depth = 24;
2940 adev->ddev->mode_config.prefer_shadow = 1; 2881 adev->ddev->mode_config.prefer_shadow = 1;
2941 2882
2942 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2883 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2943 2884
2944 r = amdgpu_modeset_create_props(adev); 2885 r = amdgpu_display_modeset_create_props(adev);
2945 if (r) 2886 if (r)
2946 return r; 2887 return r;
2947 2888
@@ -2957,7 +2898,7 @@ static int dce_v11_0_sw_init(void *handle)
2957 } 2898 }
2958 2899
2959 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2900 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2960 amdgpu_print_display_setup(adev->ddev); 2901 amdgpu_display_print_display_setup(adev->ddev);
2961 else 2902 else
2962 return -EINVAL; 2903 return -EINVAL;
2963 2904
@@ -3047,6 +2988,11 @@ static int dce_v11_0_hw_fini(void *handle)
3047 2988
3048static int dce_v11_0_suspend(void *handle) 2989static int dce_v11_0_suspend(void *handle)
3049{ 2990{
2991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2992
2993 adev->mode_info.bl_level =
2994 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2995
3050 return dce_v11_0_hw_fini(handle); 2996 return dce_v11_0_hw_fini(handle);
3051} 2997}
3052 2998
@@ -3055,6 +3001,9 @@ static int dce_v11_0_resume(void *handle)
3055 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3001 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3056 int ret; 3002 int ret;
3057 3003
3004 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3005 adev->mode_info.bl_level);
3006
3058 ret = dce_v11_0_hw_init(handle); 3007 ret = dce_v11_0_hw_init(handle);
3059 3008
3060 /* turn on the BL */ 3009 /* turn on the BL */
@@ -3368,7 +3317,8 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3368{ 3317{
3369 unsigned crtc = entry->src_id - 1; 3318 unsigned crtc = entry->src_id - 1;
3370 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3319 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3371 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 3320 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3321 crtc);
3372 3322
3373 switch (entry->src_data[0]) { 3323 switch (entry->src_data[0]) {
3374 case 0: /* vblank */ 3324 case 0: /* vblank */
@@ -3725,7 +3675,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3725static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { 3675static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
3726 .bandwidth_update = &dce_v11_0_bandwidth_update, 3676 .bandwidth_update = &dce_v11_0_bandwidth_update,
3727 .vblank_get_counter = &dce_v11_0_vblank_get_counter, 3677 .vblank_get_counter = &dce_v11_0_vblank_get_counter,
3728 .vblank_wait = &dce_v11_0_vblank_wait,
3729 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3678 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3730 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3679 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3731 .hpd_sense = &dce_v11_0_hpd_sense, 3680 .hpd_sense = &dce_v11_0_hpd_sense,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index bd2c4f727df6..9f67b7fd3487 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -142,64 +142,6 @@ static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
142 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 142 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
143} 143}
144 144
145static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
146{
147 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
148 return true;
149 else
150 return false;
151}
152
153static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
154{
155 u32 pos1, pos2;
156
157 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
158 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
159
160 if (pos1 != pos2)
161 return true;
162 else
163 return false;
164}
165
166/**
167 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
168 *
169 * @crtc: crtc to wait for vblank on
170 *
171 * Wait for vblank on the requested crtc (evergreen+).
172 */
173static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
174{
175 unsigned i = 100;
176
177 if (crtc >= adev->mode_info.num_crtc)
178 return;
179
180 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
181 return;
182
183 /* depending on when we hit vblank, we may be close to active; if so,
184 * wait for another frame.
185 */
186 while (dce_v6_0_is_in_vblank(adev, crtc)) {
187 if (i++ == 100) {
188 i = 0;
189 if (!dce_v6_0_is_counter_moving(adev, crtc))
190 break;
191 }
192 }
193
194 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
195 if (i++ == 100) {
196 i = 0;
197 if (!dce_v6_0_is_counter_moving(adev, crtc))
198 break;
199 }
200 }
201}
202
203static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 145static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
204{ 146{
205 if (crtc >= adev->mode_info.num_crtc) 147 if (crtc >= adev->mode_info.num_crtc)
@@ -1108,7 +1050,7 @@ static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1108 if (!adev->mode_info.mode_config_initialized) 1050 if (!adev->mode_info.mode_config_initialized)
1109 return; 1051 return;
1110 1052
1111 amdgpu_update_display_priority(adev); 1053 amdgpu_display_update_priority(adev);
1112 1054
1113 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1055 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1114 if (adev->mode_info.crtcs[i]->base.enabled) 1056 if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2407,9 +2349,9 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2407 .cursor_set2 = dce_v6_0_crtc_cursor_set2, 2349 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2408 .cursor_move = dce_v6_0_crtc_cursor_move, 2350 .cursor_move = dce_v6_0_crtc_cursor_move,
2409 .gamma_set = dce_v6_0_crtc_gamma_set, 2351 .gamma_set = dce_v6_0_crtc_gamma_set,
2410 .set_config = amdgpu_crtc_set_config, 2352 .set_config = amdgpu_display_crtc_set_config,
2411 .destroy = dce_v6_0_crtc_destroy, 2353 .destroy = dce_v6_0_crtc_destroy,
2412 .page_flip_target = amdgpu_crtc_page_flip_target, 2354 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2413}; 2355};
2414 2356
2415static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2357static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2425,7 +2367,8 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2425 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2367 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2426 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2368 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2427 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2369 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2428 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2370 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2371 amdgpu_crtc->crtc_id);
2429 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2372 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2430 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2373 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2431 drm_crtc_vblank_on(crtc); 2374 drm_crtc_vblank_on(crtc);
@@ -2562,7 +2505,7 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2562 amdgpu_crtc->connector = NULL; 2505 amdgpu_crtc->connector = NULL;
2563 return false; 2506 return false;
2564 } 2507 }
2565 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2508 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2566 return false; 2509 return false;
2567 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2510 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2568 return false; 2511 return false;
@@ -2693,9 +2636,9 @@ static int dce_v6_0_sw_init(void *handle)
2693 adev->ddev->mode_config.max_height = 16384; 2636 adev->ddev->mode_config.max_height = 16384;
2694 adev->ddev->mode_config.preferred_depth = 24; 2637 adev->ddev->mode_config.preferred_depth = 24;
2695 adev->ddev->mode_config.prefer_shadow = 1; 2638 adev->ddev->mode_config.prefer_shadow = 1;
2696 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2639 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2697 2640
2698 r = amdgpu_modeset_create_props(adev); 2641 r = amdgpu_display_modeset_create_props(adev);
2699 if (r) 2642 if (r)
2700 return r; 2643 return r;
2701 2644
@@ -2711,7 +2654,7 @@ static int dce_v6_0_sw_init(void *handle)
2711 2654
2712 ret = amdgpu_atombios_get_connector_info_from_object_table(adev); 2655 ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2713 if (ret) 2656 if (ret)
2714 amdgpu_print_display_setup(adev->ddev); 2657 amdgpu_display_print_display_setup(adev->ddev);
2715 else 2658 else
2716 return -EINVAL; 2659 return -EINVAL;
2717 2660
@@ -2787,6 +2730,11 @@ static int dce_v6_0_hw_fini(void *handle)
2787 2730
2788static int dce_v6_0_suspend(void *handle) 2731static int dce_v6_0_suspend(void *handle)
2789{ 2732{
2733 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2734
2735 adev->mode_info.bl_level =
2736 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2737
2790 return dce_v6_0_hw_fini(handle); 2738 return dce_v6_0_hw_fini(handle);
2791} 2739}
2792 2740
@@ -2795,6 +2743,9 @@ static int dce_v6_0_resume(void *handle)
2795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2796 int ret; 2744 int ret;
2797 2745
2746 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2747 adev->mode_info.bl_level);
2748
2798 ret = dce_v6_0_hw_init(handle); 2749 ret = dce_v6_0_hw_init(handle);
2799 2750
2800 /* turn on the BL */ 2751 /* turn on the BL */
@@ -2966,7 +2917,8 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2966{ 2917{
2967 unsigned crtc = entry->src_id - 1; 2918 unsigned crtc = entry->src_id - 1;
2968 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 2919 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2969 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 2920 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2921 crtc);
2970 2922
2971 switch (entry->src_data[0]) { 2923 switch (entry->src_data[0]) {
2972 case 0: /* vblank */ 2924 case 0: /* vblank */
@@ -3093,7 +3045,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3093 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3045 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3094 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3046 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3095 schedule_work(&adev->hotplug_work); 3047 schedule_work(&adev->hotplug_work);
3096 DRM_INFO("IH: HPD%d\n", hpd + 1); 3048 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3097 } 3049 }
3098 3050
3099 return 0; 3051 return 0;
@@ -3407,7 +3359,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3407static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { 3359static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3408 .bandwidth_update = &dce_v6_0_bandwidth_update, 3360 .bandwidth_update = &dce_v6_0_bandwidth_update,
3409 .vblank_get_counter = &dce_v6_0_vblank_get_counter, 3361 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3410 .vblank_wait = &dce_v6_0_vblank_wait,
3411 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3362 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3412 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3363 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3413 .hpd_sense = &dce_v6_0_hpd_sense, 3364 .hpd_sense = &dce_v6_0_hpd_sense,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c008dc030687..f55422cbd77a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -140,66 +140,6 @@ static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
140 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 140 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
141} 141}
142 142
143static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
144{
145 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
146 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
147 return true;
148 else
149 return false;
150}
151
152static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
153{
154 u32 pos1, pos2;
155
156 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
157 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
158
159 if (pos1 != pos2)
160 return true;
161 else
162 return false;
163}
164
165/**
166 * dce_v8_0_vblank_wait - vblank wait asic callback.
167 *
168 * @adev: amdgpu_device pointer
169 * @crtc: crtc to wait for vblank on
170 *
171 * Wait for vblank on the requested crtc (evergreen+).
172 */
173static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
174{
175 unsigned i = 100;
176
177 if (crtc >= adev->mode_info.num_crtc)
178 return;
179
180 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
181 return;
182
183 /* depending on when we hit vblank, we may be close to active; if so,
184 * wait for another frame.
185 */
186 while (dce_v8_0_is_in_vblank(adev, crtc)) {
187 if (i++ == 100) {
188 i = 0;
189 if (!dce_v8_0_is_counter_moving(adev, crtc))
190 break;
191 }
192 }
193
194 while (!dce_v8_0_is_in_vblank(adev, crtc)) {
195 if (i++ == 100) {
196 i = 0;
197 if (!dce_v8_0_is_counter_moving(adev, crtc))
198 break;
199 }
200 }
201}
202
203static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 143static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
204{ 144{
205 if (crtc >= adev->mode_info.num_crtc) 145 if (crtc >= adev->mode_info.num_crtc)
@@ -1144,7 +1084,7 @@ static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1144 u32 num_heads = 0, lb_size; 1084 u32 num_heads = 0, lb_size;
1145 int i; 1085 int i;
1146 1086
1147 amdgpu_update_display_priority(adev); 1087 amdgpu_display_update_priority(adev);
1148 1088
1149 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1089 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1150 if (adev->mode_info.crtcs[i]->base.enabled) 1090 if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2421,9 +2361,9 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2421 .cursor_set2 = dce_v8_0_crtc_cursor_set2, 2361 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2422 .cursor_move = dce_v8_0_crtc_cursor_move, 2362 .cursor_move = dce_v8_0_crtc_cursor_move,
2423 .gamma_set = dce_v8_0_crtc_gamma_set, 2363 .gamma_set = dce_v8_0_crtc_gamma_set,
2424 .set_config = amdgpu_crtc_set_config, 2364 .set_config = amdgpu_display_crtc_set_config,
2425 .destroy = dce_v8_0_crtc_destroy, 2365 .destroy = dce_v8_0_crtc_destroy,
2426 .page_flip_target = amdgpu_crtc_page_flip_target, 2366 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2427}; 2367};
2428 2368
2429static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2369static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2441,7 +2381,8 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2441 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2381 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2442 dce_v8_0_vga_enable(crtc, false); 2382 dce_v8_0_vga_enable(crtc, false);
2443 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2383 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2444 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2384 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2385 amdgpu_crtc->crtc_id);
2445 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2386 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2446 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2387 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2447 drm_crtc_vblank_on(crtc); 2388 drm_crtc_vblank_on(crtc);
@@ -2587,7 +2528,7 @@ static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2587 amdgpu_crtc->connector = NULL; 2528 amdgpu_crtc->connector = NULL;
2588 return false; 2529 return false;
2589 } 2530 }
2590 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2531 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2591 return false; 2532 return false;
2592 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2533 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2593 return false; 2534 return false;
@@ -2724,9 +2665,9 @@ static int dce_v8_0_sw_init(void *handle)
2724 adev->ddev->mode_config.preferred_depth = 24; 2665 adev->ddev->mode_config.preferred_depth = 24;
2725 adev->ddev->mode_config.prefer_shadow = 1; 2666 adev->ddev->mode_config.prefer_shadow = 1;
2726 2667
2727 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2668 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2728 2669
2729 r = amdgpu_modeset_create_props(adev); 2670 r = amdgpu_display_modeset_create_props(adev);
2730 if (r) 2671 if (r)
2731 return r; 2672 return r;
2732 2673
@@ -2741,7 +2682,7 @@ static int dce_v8_0_sw_init(void *handle)
2741 } 2682 }
2742 2683
2743 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2684 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2744 amdgpu_print_display_setup(adev->ddev); 2685 amdgpu_display_print_display_setup(adev->ddev);
2745 else 2686 else
2746 return -EINVAL; 2687 return -EINVAL;
2747 2688
@@ -2819,6 +2760,11 @@ static int dce_v8_0_hw_fini(void *handle)
2819 2760
2820static int dce_v8_0_suspend(void *handle) 2761static int dce_v8_0_suspend(void *handle)
2821{ 2762{
2763 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2764
2765 adev->mode_info.bl_level =
2766 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2767
2822 return dce_v8_0_hw_fini(handle); 2768 return dce_v8_0_hw_fini(handle);
2823} 2769}
2824 2770
@@ -2827,6 +2773,9 @@ static int dce_v8_0_resume(void *handle)
2827 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2828 int ret; 2774 int ret;
2829 2775
2776 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2777 adev->mode_info.bl_level);
2778
2830 ret = dce_v8_0_hw_init(handle); 2779 ret = dce_v8_0_hw_init(handle);
2831 2780
2832 /* turn on the BL */ 2781 /* turn on the BL */
@@ -3063,7 +3012,8 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3063{ 3012{
3064 unsigned crtc = entry->src_id - 1; 3013 unsigned crtc = entry->src_id - 1;
3065 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3014 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3066 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 3015 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3016 crtc);
3067 3017
3068 switch (entry->src_data[0]) { 3018 switch (entry->src_data[0]) {
3069 case 0: /* vblank */ 3019 case 0: /* vblank */
@@ -3491,7 +3441,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3491static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { 3441static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3492 .bandwidth_update = &dce_v8_0_bandwidth_update, 3442 .bandwidth_update = &dce_v8_0_bandwidth_update,
3493 .vblank_get_counter = &dce_v8_0_vblank_get_counter, 3443 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3494 .vblank_wait = &dce_v8_0_vblank_wait,
3495 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3444 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3496 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3445 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3497 .hpd_sense = &dce_v8_0_hpd_sense, 3446 .hpd_sense = &dce_v8_0_hpd_sense,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 120dd3b26fc2..8201a0929ca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -48,19 +48,6 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
48 int crtc, 48 int crtc,
49 enum amdgpu_interrupt_state state); 49 enum amdgpu_interrupt_state state);
50 50
51/**
52 * dce_virtual_vblank_wait - vblank wait asic callback.
53 *
54 * @adev: amdgpu_device pointer
55 * @crtc: crtc to wait for vblank on
56 *
57 * Wait for vblank on the requested crtc (evergreen+).
58 */
59static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
60{
61 return;
62}
63
64static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) 51static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
65{ 52{
66 return 0; 53 return 0;
@@ -130,9 +117,9 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
130 .cursor_set2 = NULL, 117 .cursor_set2 = NULL,
131 .cursor_move = NULL, 118 .cursor_move = NULL,
132 .gamma_set = dce_virtual_crtc_gamma_set, 119 .gamma_set = dce_virtual_crtc_gamma_set,
133 .set_config = amdgpu_crtc_set_config, 120 .set_config = amdgpu_display_crtc_set_config,
134 .destroy = dce_virtual_crtc_destroy, 121 .destroy = dce_virtual_crtc_destroy,
135 .page_flip_target = amdgpu_crtc_page_flip_target, 122 .page_flip_target = amdgpu_display_crtc_page_flip_target,
136}; 123};
137 124
138static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) 125static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -149,7 +136,8 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
149 case DRM_MODE_DPMS_ON: 136 case DRM_MODE_DPMS_ON:
150 amdgpu_crtc->enabled = true; 137 amdgpu_crtc->enabled = true;
151 /* Make sure VBLANK interrupts are still enabled */ 138 /* Make sure VBLANK interrupts are still enabled */
152 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 139 type = amdgpu_display_crtc_idx_to_irq_type(adev,
140 amdgpu_crtc->crtc_id);
153 amdgpu_irq_update(adev, &adev->crtc_irq, type); 141 amdgpu_irq_update(adev, &adev->crtc_irq, type);
154 drm_crtc_vblank_on(crtc); 142 drm_crtc_vblank_on(crtc);
155 break; 143 break;
@@ -406,9 +394,9 @@ static int dce_virtual_sw_init(void *handle)
406 adev->ddev->mode_config.preferred_depth = 24; 394 adev->ddev->mode_config.preferred_depth = 24;
407 adev->ddev->mode_config.prefer_shadow = 1; 395 adev->ddev->mode_config.prefer_shadow = 1;
408 396
409 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 397 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
410 398
411 r = amdgpu_modeset_create_props(adev); 399 r = amdgpu_display_modeset_create_props(adev);
412 if (r) 400 if (r)
413 return r; 401 return r;
414 402
@@ -653,7 +641,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
653static const struct amdgpu_display_funcs dce_virtual_display_funcs = { 641static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
654 .bandwidth_update = &dce_virtual_bandwidth_update, 642 .bandwidth_update = &dce_virtual_bandwidth_update,
655 .vblank_get_counter = &dce_virtual_vblank_get_counter, 643 .vblank_get_counter = &dce_virtual_vblank_get_counter,
656 .vblank_wait = &dce_virtual_vblank_wait,
657 .backlight_set_level = NULL, 644 .backlight_set_level = NULL,
658 .backlight_get_level = NULL, 645 .backlight_get_level = NULL,
659 .hpd_sense = &dce_virtual_hpd_sense, 646 .hpd_sense = &dce_virtual_hpd_sense,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/amdgpu/emu_soc.c
index 8fe8ba9434ff..d72c25c1b987 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/amdgpu/emu_soc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,7 +20,14 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#include "amdgpu.h"
24#include "soc15.h"
25
26#include "soc15_common.h"
27#include "soc15_hw_ip.h"
28
29int emu_soc_asic_init(struct amdgpu_device *adev)
30{
31 return 0;
32}
23 33
24bool acpi_atcs_functions_supported(void *device, uint32_t index);
25int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
26bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 9870d83b68c1..0fff5b8cd318 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -38,6 +38,7 @@
38#include "dce/dce_6_0_sh_mask.h" 38#include "dce/dce_6_0_sh_mask.h"
39#include "gca/gfx_7_2_enum.h" 39#include "gca/gfx_7_2_enum.h"
40#include "si_enums.h" 40#include "si_enums.h"
41#include "si.h"
41 42
42static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev); 43static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
43static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev); 44static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1808,17 +1809,6 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
1808 return r; 1809 return r;
1809} 1810}
1810 1811
1811static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1812{
1813 /* flush hdp cache */
1814 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1815 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
1816 WRITE_DATA_DST_SEL(0)));
1817 amdgpu_ring_write(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1818 amdgpu_ring_write(ring, 0);
1819 amdgpu_ring_write(ring, 0x1);
1820}
1821
1822static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) 1812static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
1823{ 1813{
1824 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 1814 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
@@ -1826,24 +1816,6 @@ static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
1826 EVENT_INDEX(0)); 1816 EVENT_INDEX(0));
1827} 1817}
1828 1818
1829/**
1830 * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
1831 *
1832 * @adev: amdgpu_device pointer
1833 * @ridx: amdgpu ring index
1834 *
1835 * Emits an hdp invalidate on the cp.
1836 */
1837static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
1838{
1839 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1840 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
1841 WRITE_DATA_DST_SEL(0)));
1842 amdgpu_ring_write(ring, mmHDP_DEBUG0);
1843 amdgpu_ring_write(ring, 0);
1844 amdgpu_ring_write(ring, 0x1);
1845}
1846
1847static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 1819static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1848 u64 seq, unsigned flags) 1820 u64 seq, unsigned flags)
1849{ 1821{
@@ -2358,25 +2330,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
2358{ 2330{
2359 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 2331 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2360 2332
2361 /* write new base address */ 2333 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2362 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2363 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
2364 WRITE_DATA_DST_SEL(0)));
2365 if (vmid < 8) {
2366 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid ));
2367 } else {
2368 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
2369 }
2370 amdgpu_ring_write(ring, 0);
2371 amdgpu_ring_write(ring, pd_addr >> 12);
2372
2373 /* bits 0-15 are the VM contexts0-15 */
2374 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2375 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
2376 WRITE_DATA_DST_SEL(0)));
2377 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
2378 amdgpu_ring_write(ring, 0);
2379 amdgpu_ring_write(ring, 1 << vmid);
2380 2334
2381 /* wait for the invalidate to complete */ 2335 /* wait for the invalidate to complete */
2382 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 2336 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -2401,6 +2355,18 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
2401 } 2355 }
2402} 2356}
2403 2357
2358static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
2359 uint32_t reg, uint32_t val)
2360{
2361 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2362
2363 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2364 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
2365 WRITE_DATA_DST_SEL(0)));
2366 amdgpu_ring_write(ring, reg);
2367 amdgpu_ring_write(ring, 0);
2368 amdgpu_ring_write(ring, val);
2369}
2404 2370
2405static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) 2371static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
2406{ 2372{
@@ -3511,23 +3477,21 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
3511 .get_wptr = gfx_v6_0_ring_get_wptr, 3477 .get_wptr = gfx_v6_0_ring_get_wptr,
3512 .set_wptr = gfx_v6_0_ring_set_wptr_gfx, 3478 .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
3513 .emit_frame_size = 3479 .emit_frame_size =
3514 5 + /* gfx_v6_0_ring_emit_hdp_flush */ 3480 5 + 5 + /* hdp flush / invalidate */
3515 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
3516 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 3481 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3517 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ 3482 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
3518 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ 3483 SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
3519 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */ 3484 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
3520 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ 3485 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
3521 .emit_ib = gfx_v6_0_ring_emit_ib, 3486 .emit_ib = gfx_v6_0_ring_emit_ib,
3522 .emit_fence = gfx_v6_0_ring_emit_fence, 3487 .emit_fence = gfx_v6_0_ring_emit_fence,
3523 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, 3488 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
3524 .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush, 3489 .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
3525 .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
3526 .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
3527 .test_ring = gfx_v6_0_ring_test_ring, 3490 .test_ring = gfx_v6_0_ring_test_ring,
3528 .test_ib = gfx_v6_0_ring_test_ib, 3491 .test_ib = gfx_v6_0_ring_test_ib,
3529 .insert_nop = amdgpu_ring_insert_nop, 3492 .insert_nop = amdgpu_ring_insert_nop,
3530 .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, 3493 .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
3494 .emit_wreg = gfx_v6_0_ring_emit_wreg,
3531}; 3495};
3532 3496
3533static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { 3497static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@@ -3538,21 +3502,19 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
3538 .get_wptr = gfx_v6_0_ring_get_wptr, 3502 .get_wptr = gfx_v6_0_ring_get_wptr,
3539 .set_wptr = gfx_v6_0_ring_set_wptr_compute, 3503 .set_wptr = gfx_v6_0_ring_set_wptr_compute,
3540 .emit_frame_size = 3504 .emit_frame_size =
3541 5 + /* gfx_v6_0_ring_emit_hdp_flush */ 3505 5 + 5 + /* hdp flush / invalidate */
3542 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
3543 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ 3506 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
3544 17 + /* gfx_v6_0_ring_emit_vm_flush */ 3507 SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
3545 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 3508 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3546 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ 3509 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
3547 .emit_ib = gfx_v6_0_ring_emit_ib, 3510 .emit_ib = gfx_v6_0_ring_emit_ib,
3548 .emit_fence = gfx_v6_0_ring_emit_fence, 3511 .emit_fence = gfx_v6_0_ring_emit_fence,
3549 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, 3512 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
3550 .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush, 3513 .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
3551 .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
3552 .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
3553 .test_ring = gfx_v6_0_ring_test_ring, 3514 .test_ring = gfx_v6_0_ring_test_ring,
3554 .test_ib = gfx_v6_0_ring_test_ib, 3515 .test_ib = gfx_v6_0_ring_test_ib,
3555 .insert_nop = amdgpu_ring_insert_nop, 3516 .insert_nop = amdgpu_ring_insert_nop,
3517 .emit_wreg = gfx_v6_0_ring_emit_wreg,
3556}; 3518};
3557 3519
3558static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) 3520static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index a066c5eda135..e13d9d83767b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1946,7 +1946,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
1946 if (i == 0) 1946 if (i == 0)
1947 sh_mem_base = 0; 1947 sh_mem_base = 0;
1948 else 1948 else
1949 sh_mem_base = adev->mc.shared_aperture_start >> 48; 1949 sh_mem_base = adev->gmc.shared_aperture_start >> 48;
1950 cik_srbm_select(adev, 0, 0, 0, i); 1950 cik_srbm_select(adev, 0, 0, 0, i);
1951 /* CP and shaders */ 1951 /* CP and shaders */
1952 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg); 1952 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
@@ -2147,26 +2147,6 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2147 EVENT_INDEX(0)); 2147 EVENT_INDEX(0));
2148} 2148}
2149 2149
2150
2151/**
2152 * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
2153 *
2154 * @adev: amdgpu_device pointer
2155 * @ridx: amdgpu ring index
2156 *
2157 * Emits an hdp invalidate on the cp.
2158 */
2159static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
2160{
2161 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2162 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2163 WRITE_DATA_DST_SEL(0) |
2164 WR_CONFIRM));
2165 amdgpu_ring_write(ring, mmHDP_DEBUG0);
2166 amdgpu_ring_write(ring, 0);
2167 amdgpu_ring_write(ring, 1);
2168}
2169
2170/** 2150/**
2171 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring 2151 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2172 * 2152 *
@@ -3243,26 +3223,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3243{ 3223{
3244 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3224 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3245 3225
3246 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3226 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3247 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3248 WRITE_DATA_DST_SEL(0)));
3249 if (vmid < 8) {
3250 amdgpu_ring_write(ring,
3251 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
3252 } else {
3253 amdgpu_ring_write(ring,
3254 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
3255 }
3256 amdgpu_ring_write(ring, 0);
3257 amdgpu_ring_write(ring, pd_addr >> 12);
3258
3259 /* bits 0-15 are the VM contexts0-15 */
3260 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3261 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3262 WRITE_DATA_DST_SEL(0)));
3263 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3264 amdgpu_ring_write(ring, 0);
3265 amdgpu_ring_write(ring, 1 << vmid);
3266 3227
3267 /* wait for the invalidate to complete */ 3228 /* wait for the invalidate to complete */
3268 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 3229 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -3289,6 +3250,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3289 } 3250 }
3290} 3251}
3291 3252
3253static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
3254 uint32_t reg, uint32_t val)
3255{
3256 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3257
3258 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3259 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3260 WRITE_DATA_DST_SEL(0)));
3261 amdgpu_ring_write(ring, reg);
3262 amdgpu_ring_write(ring, 0);
3263 amdgpu_ring_write(ring, val);
3264}
3265
3292/* 3266/*
3293 * RLC 3267 * RLC
3294 * The RLC is a multi-purpose microengine that handles a 3268 * The RLC is a multi-purpose microengine that handles a
@@ -4384,34 +4358,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4384 case CHIP_KAVERI: 4358 case CHIP_KAVERI:
4385 adev->gfx.config.max_shader_engines = 1; 4359 adev->gfx.config.max_shader_engines = 1;
4386 adev->gfx.config.max_tile_pipes = 4; 4360 adev->gfx.config.max_tile_pipes = 4;
4387 if ((adev->pdev->device == 0x1304) || 4361 adev->gfx.config.max_cu_per_sh = 8;
4388 (adev->pdev->device == 0x1305) || 4362 adev->gfx.config.max_backends_per_se = 2;
4389 (adev->pdev->device == 0x130C) ||
4390 (adev->pdev->device == 0x130F) ||
4391 (adev->pdev->device == 0x1310) ||
4392 (adev->pdev->device == 0x1311) ||
4393 (adev->pdev->device == 0x131C)) {
4394 adev->gfx.config.max_cu_per_sh = 8;
4395 adev->gfx.config.max_backends_per_se = 2;
4396 } else if ((adev->pdev->device == 0x1309) ||
4397 (adev->pdev->device == 0x130A) ||
4398 (adev->pdev->device == 0x130D) ||
4399 (adev->pdev->device == 0x1313) ||
4400 (adev->pdev->device == 0x131D)) {
4401 adev->gfx.config.max_cu_per_sh = 6;
4402 adev->gfx.config.max_backends_per_se = 2;
4403 } else if ((adev->pdev->device == 0x1306) ||
4404 (adev->pdev->device == 0x1307) ||
4405 (adev->pdev->device == 0x130B) ||
4406 (adev->pdev->device == 0x130E) ||
4407 (adev->pdev->device == 0x1315) ||
4408 (adev->pdev->device == 0x131B)) {
4409 adev->gfx.config.max_cu_per_sh = 4;
4410 adev->gfx.config.max_backends_per_se = 1;
4411 } else {
4412 adev->gfx.config.max_cu_per_sh = 3;
4413 adev->gfx.config.max_backends_per_se = 1;
4414 }
4415 adev->gfx.config.max_sh_per_se = 1; 4363 adev->gfx.config.max_sh_per_se = 1;
4416 adev->gfx.config.max_texture_channel_caches = 4; 4364 adev->gfx.config.max_texture_channel_caches = 4;
4417 adev->gfx.config.max_gprs = 256; 4365 adev->gfx.config.max_gprs = 256;
@@ -5115,10 +5063,10 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5115 .emit_frame_size = 5063 .emit_frame_size =
5116 20 + /* gfx_v7_0_ring_emit_gds_switch */ 5064 20 + /* gfx_v7_0_ring_emit_gds_switch */
5117 7 + /* gfx_v7_0_ring_emit_hdp_flush */ 5065 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5118 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ 5066 5 + /* hdp invalidate */
5119 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ 5067 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5120 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ 5068 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5121 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ 5069 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5122 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ 5070 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
5123 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ 5071 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
5124 .emit_ib = gfx_v7_0_ring_emit_ib_gfx, 5072 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
@@ -5127,12 +5075,12 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5127 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5075 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5128 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 5076 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5129 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 5077 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5130 .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5131 .test_ring = gfx_v7_0_ring_test_ring, 5078 .test_ring = gfx_v7_0_ring_test_ring,
5132 .test_ib = gfx_v7_0_ring_test_ib, 5079 .test_ib = gfx_v7_0_ring_test_ib,
5133 .insert_nop = amdgpu_ring_insert_nop, 5080 .insert_nop = amdgpu_ring_insert_nop,
5134 .pad_ib = amdgpu_ring_generic_pad_ib, 5081 .pad_ib = amdgpu_ring_generic_pad_ib,
5135 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, 5082 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
5083 .emit_wreg = gfx_v7_0_ring_emit_wreg,
5136}; 5084};
5137 5085
5138static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { 5086static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5146,9 +5094,9 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5146 .emit_frame_size = 5094 .emit_frame_size =
5147 20 + /* gfx_v7_0_ring_emit_gds_switch */ 5095 20 + /* gfx_v7_0_ring_emit_gds_switch */
5148 7 + /* gfx_v7_0_ring_emit_hdp_flush */ 5096 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5149 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ 5097 5 + /* hdp invalidate */
5150 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ 5098 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5151 17 + /* gfx_v7_0_ring_emit_vm_flush */ 5099 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
5152 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ 5100 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5153 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */ 5101 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
5154 .emit_ib = gfx_v7_0_ring_emit_ib_compute, 5102 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
@@ -5157,11 +5105,11 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5157 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5105 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5158 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 5106 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5159 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 5107 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5160 .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5161 .test_ring = gfx_v7_0_ring_test_ring, 5108 .test_ring = gfx_v7_0_ring_test_ring,
5162 .test_ib = gfx_v7_0_ring_test_ib, 5109 .test_ib = gfx_v7_0_ring_test_ib,
5163 .insert_nop = amdgpu_ring_insert_nop, 5110 .insert_nop = amdgpu_ring_insert_nop,
5164 .pad_ib = amdgpu_ring_generic_pad_ib, 5111 .pad_ib = amdgpu_ring_generic_pad_ib,
5112 .emit_wreg = gfx_v7_0_ring_emit_wreg,
5165}; 5113};
5166 5114
5167static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) 5115static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 4e694ae9f308..27943e57681c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3796,7 +3796,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
3796 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 3796 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3797 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 3797 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3798 WREG32(mmSH_MEM_CONFIG, tmp); 3798 WREG32(mmSH_MEM_CONFIG, tmp);
3799 tmp = adev->mc.shared_aperture_start >> 48; 3799 tmp = adev->gmc.shared_aperture_start >> 48;
3800 WREG32(mmSH_MEM_BASES, tmp); 3800 WREG32(mmSH_MEM_BASES, tmp);
3801 } 3801 }
3802 3802
@@ -4847,6 +4847,9 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4847 /* reset MQD to a clean status */ 4847 /* reset MQD to a clean status */
4848 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4848 if (adev->gfx.mec.mqd_backup[mqd_idx])
4849 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); 4849 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4850 /* reset ring buffer */
4851 ring->wptr = 0;
4852 amdgpu_ring_clear_ring(ring);
4850 } else { 4853 } else {
4851 amdgpu_ring_clear_ring(ring); 4854 amdgpu_ring_clear_ring(ring);
4852 } 4855 }
@@ -4921,13 +4924,6 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4921 /* Test KCQs */ 4924 /* Test KCQs */
4922 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4925 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4923 ring = &adev->gfx.compute_ring[i]; 4926 ring = &adev->gfx.compute_ring[i];
4924 if (adev->in_gpu_reset) {
4925 /* move reset ring buffer to here to workaround
4926 * compute ring test failed
4927 */
4928 ring->wptr = 0;
4929 amdgpu_ring_clear_ring(ring);
4930 }
4931 ring->ready = true; 4927 ring->ready = true;
4932 r = amdgpu_ring_test_ring(ring); 4928 r = amdgpu_ring_test_ring(ring);
4933 if (r) 4929 if (r)
@@ -6230,19 +6226,6 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6230 EVENT_INDEX(0)); 6226 EVENT_INDEX(0));
6231} 6227}
6232 6228
6233
6234static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
6235{
6236 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6237 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6238 WRITE_DATA_DST_SEL(0) |
6239 WR_CONFIRM));
6240 amdgpu_ring_write(ring, mmHDP_DEBUG0);
6241 amdgpu_ring_write(ring, 0);
6242 amdgpu_ring_write(ring, 1);
6243
6244}
6245
6246static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 6229static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6247 struct amdgpu_ib *ib, 6230 struct amdgpu_ib *ib,
6248 unsigned vmid, bool ctx_switch) 6231 unsigned vmid, bool ctx_switch)
@@ -6332,28 +6315,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6332{ 6315{
6333 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 6316 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6334 6317
6335 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6318 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
6336 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
6337 WRITE_DATA_DST_SEL(0)) |
6338 WR_CONFIRM);
6339 if (vmid < 8) {
6340 amdgpu_ring_write(ring,
6341 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
6342 } else {
6343 amdgpu_ring_write(ring,
6344 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
6345 }
6346 amdgpu_ring_write(ring, 0);
6347 amdgpu_ring_write(ring, pd_addr >> 12);
6348
6349 /* bits 0-15 are the VM contexts0-15 */
6350 /* invalidate the cache */
6351 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6352 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6353 WRITE_DATA_DST_SEL(0)));
6354 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6355 amdgpu_ring_write(ring, 0);
6356 amdgpu_ring_write(ring, 1 << vmid);
6357 6319
6358 /* wait for the invalidate to complete */ 6320 /* wait for the invalidate to complete */
6359 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 6321 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -6617,8 +6579,22 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
6617static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 6579static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6618 uint32_t val) 6580 uint32_t val)
6619{ 6581{
6582 uint32_t cmd;
6583
6584 switch (ring->funcs->type) {
6585 case AMDGPU_RING_TYPE_GFX:
6586 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6587 break;
6588 case AMDGPU_RING_TYPE_KIQ:
6589 cmd = 1 << 16; /* no inc addr */
6590 break;
6591 default:
6592 cmd = WR_CONFIRM;
6593 break;
6594 }
6595
6620 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6596 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6621 amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */ 6597 amdgpu_ring_write(ring, cmd);
6622 amdgpu_ring_write(ring, reg); 6598 amdgpu_ring_write(ring, reg);
6623 amdgpu_ring_write(ring, 0); 6599 amdgpu_ring_write(ring, 0);
6624 amdgpu_ring_write(ring, val); 6600 amdgpu_ring_write(ring, val);
@@ -6871,7 +6847,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6871 .emit_frame_size = /* maximum 215dw if count 16 IBs in */ 6847 .emit_frame_size = /* maximum 215dw if count 16 IBs in */
6872 5 + /* COND_EXEC */ 6848 5 + /* COND_EXEC */
6873 7 + /* PIPELINE_SYNC */ 6849 7 + /* PIPELINE_SYNC */
6874 19 + /* VM_FLUSH */ 6850 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6875 8 + /* FENCE for VM_FLUSH */ 6851 8 + /* FENCE for VM_FLUSH */
6876 20 + /* GDS switch */ 6852 20 + /* GDS switch */
6877 4 + /* double SWITCH_BUFFER, 6853 4 + /* double SWITCH_BUFFER,
@@ -6893,7 +6869,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6893 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 6869 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6894 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, 6870 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6895 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 6871 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6896 .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
6897 .test_ring = gfx_v8_0_ring_test_ring, 6872 .test_ring = gfx_v8_0_ring_test_ring,
6898 .test_ib = gfx_v8_0_ring_test_ib, 6873 .test_ib = gfx_v8_0_ring_test_ib,
6899 .insert_nop = amdgpu_ring_insert_nop, 6874 .insert_nop = amdgpu_ring_insert_nop,
@@ -6902,6 +6877,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6902 .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, 6877 .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6903 .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec, 6878 .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6904 .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec, 6879 .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
6880 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6905}; 6881};
6906 6882
6907static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { 6883static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6915,9 +6891,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6915 .emit_frame_size = 6891 .emit_frame_size =
6916 20 + /* gfx_v8_0_ring_emit_gds_switch */ 6892 20 + /* gfx_v8_0_ring_emit_gds_switch */
6917 7 + /* gfx_v8_0_ring_emit_hdp_flush */ 6893 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6918 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ 6894 5 + /* hdp_invalidate */
6919 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 6895 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6920 17 + /* gfx_v8_0_ring_emit_vm_flush */ 6896 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6921 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ 6897 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6922 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ 6898 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
6923 .emit_ib = gfx_v8_0_ring_emit_ib_compute, 6899 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
@@ -6926,12 +6902,12 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6926 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 6902 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6927 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, 6903 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6928 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 6904 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6929 .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
6930 .test_ring = gfx_v8_0_ring_test_ring, 6905 .test_ring = gfx_v8_0_ring_test_ring,
6931 .test_ib = gfx_v8_0_ring_test_ib, 6906 .test_ib = gfx_v8_0_ring_test_ib,
6932 .insert_nop = amdgpu_ring_insert_nop, 6907 .insert_nop = amdgpu_ring_insert_nop,
6933 .pad_ib = amdgpu_ring_generic_pad_ib, 6908 .pad_ib = amdgpu_ring_generic_pad_ib,
6934 .set_priority = gfx_v8_0_ring_set_priority_compute, 6909 .set_priority = gfx_v8_0_ring_set_priority_compute,
6910 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6935}; 6911};
6936 6912
6937static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { 6913static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@@ -6945,7 +6921,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
6945 .emit_frame_size = 6921 .emit_frame_size =
6946 20 + /* gfx_v8_0_ring_emit_gds_switch */ 6922 20 + /* gfx_v8_0_ring_emit_gds_switch */
6947 7 + /* gfx_v8_0_ring_emit_hdp_flush */ 6923 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6948 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ 6924 5 + /* hdp_invalidate */
6949 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 6925 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6950 17 + /* gfx_v8_0_ring_emit_vm_flush */ 6926 17 + /* gfx_v8_0_ring_emit_vm_flush */
6951 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 6927 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
@@ -7151,12 +7127,12 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7151 } ce_payload = {}; 7127 } ce_payload = {};
7152 7128
7153 if (ring->adev->virt.chained_ib_support) { 7129 if (ring->adev->virt.chained_ib_support) {
7154 ce_payload_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096 + 7130 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7155 offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload); 7131 offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7156 cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2; 7132 cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7157 } else { 7133 } else {
7158 ce_payload_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096 + 7134 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7159 offsetof(struct vi_gfx_meta_data, ce_payload); 7135 offsetof(struct vi_gfx_meta_data, ce_payload);
7160 cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2; 7136 cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7161 } 7137 }
7162 7138
@@ -7179,7 +7155,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7179 struct vi_de_ib_state_chained_ib chained; 7155 struct vi_de_ib_state_chained_ib chained;
7180 } de_payload = {}; 7156 } de_payload = {};
7181 7157
7182 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096; 7158 csa_addr = amdgpu_csa_vaddr(ring->adev);
7183 gds_addr = csa_addr + 4096; 7159 gds_addr = csa_addr + 4096;
7184 if (ring->adev->virt.chained_ib_support) { 7160 if (ring->adev->virt.chained_ib_support) {
7185 de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr); 7161 de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c06479615e8a..d1d2c27156b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -271,58 +271,65 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
271 271
272static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 272static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
273{ 273{
274 struct amdgpu_device *adev = ring->adev; 274 struct amdgpu_device *adev = ring->adev;
275 struct amdgpu_ib ib; 275 struct amdgpu_ib ib;
276 struct dma_fence *f = NULL; 276 struct dma_fence *f = NULL;
277 uint32_t scratch; 277
278 uint32_t tmp = 0; 278 unsigned index;
279 long r; 279 uint64_t gpu_addr;
280 280 uint32_t tmp;
281 r = amdgpu_gfx_scratch_get(adev, &scratch); 281 long r;
282 if (r) { 282
283 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); 283 r = amdgpu_device_wb_get(adev, &index);
284 return r; 284 if (r) {
285 } 285 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
286 WREG32(scratch, 0xCAFEDEAD); 286 return r;
287 memset(&ib, 0, sizeof(ib)); 287 }
288 r = amdgpu_ib_get(adev, NULL, 256, &ib); 288
289 if (r) { 289 gpu_addr = adev->wb.gpu_addr + (index * 4);
290 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 290 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
291 goto err1; 291 memset(&ib, 0, sizeof(ib));
292 } 292 r = amdgpu_ib_get(adev, NULL, 16, &ib);
293 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 293 if (r) {
294 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); 294 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
295 ib.ptr[2] = 0xDEADBEEF; 295 goto err1;
296 ib.length_dw = 3; 296 }
297 297 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
298 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 298 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
299 if (r) 299 ib.ptr[2] = lower_32_bits(gpu_addr);
300 goto err2; 300 ib.ptr[3] = upper_32_bits(gpu_addr);
301 301 ib.ptr[4] = 0xDEADBEEF;
302 r = dma_fence_wait_timeout(f, false, timeout); 302 ib.length_dw = 5;
303 if (r == 0) { 303
304 DRM_ERROR("amdgpu: IB test timed out.\n"); 304 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
305 r = -ETIMEDOUT; 305 if (r)
306 goto err2; 306 goto err2;
307 } else if (r < 0) { 307
308 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 308 r = dma_fence_wait_timeout(f, false, timeout);
309 goto err2; 309 if (r == 0) {
310 } 310 DRM_ERROR("amdgpu: IB test timed out.\n");
311 tmp = RREG32(scratch); 311 r = -ETIMEDOUT;
312 if (tmp == 0xDEADBEEF) { 312 goto err2;
313 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); 313 } else if (r < 0) {
314 r = 0; 314 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
315 } else { 315 goto err2;
316 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 316 }
317 scratch, tmp); 317
318 r = -EINVAL; 318 tmp = adev->wb.wb[index];
319 } 319 if (tmp == 0xDEADBEEF) {
320 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
321 r = 0;
322 } else {
323 DRM_ERROR("ib test on ring %d failed\n", ring->idx);
324 r = -EINVAL;
325 }
326
320err2: 327err2:
321 amdgpu_ib_free(adev, &ib, NULL); 328 amdgpu_ib_free(adev, &ib, NULL);
322 dma_fence_put(f); 329 dma_fence_put(f);
323err1: 330err1:
324 amdgpu_gfx_scratch_free(adev, scratch); 331 amdgpu_device_wb_free(adev, index);
325 return r; 332 return r;
326} 333}
327 334
328 335
@@ -1254,23 +1261,23 @@ static int gfx_v9_0_sw_init(void *handle)
1254 adev->gfx.mec.num_queue_per_pipe = 8; 1261 adev->gfx.mec.num_queue_per_pipe = 8;
1255 1262
1256 /* KIQ event */ 1263 /* KIQ event */
1257 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq); 1264 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1258 if (r) 1265 if (r)
1259 return r; 1266 return r;
1260 1267
1261 /* EOP Event */ 1268 /* EOP Event */
1262 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq); 1269 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1263 if (r) 1270 if (r)
1264 return r; 1271 return r;
1265 1272
1266 /* Privileged reg */ 1273 /* Privileged reg */
1267 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184, 1274 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
1268 &adev->gfx.priv_reg_irq); 1275 &adev->gfx.priv_reg_irq);
1269 if (r) 1276 if (r)
1270 return r; 1277 return r;
1271 1278
1272 /* Privileged inst */ 1279 /* Privileged inst */
1273 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185, 1280 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
1274 &adev->gfx.priv_inst_irq); 1281 &adev->gfx.priv_inst_irq);
1275 if (r) 1282 if (r)
1276 return r; 1283 return r;
@@ -1539,7 +1546,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1539 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1546 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1540 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1547 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1541 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1548 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1542 tmp = adev->mc.shared_aperture_start >> 48; 1549 tmp = adev->gmc.shared_aperture_start >> 48;
1543 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); 1550 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1544 } 1551 }
1545 } 1552 }
@@ -2954,7 +2961,13 @@ static int gfx_v9_0_hw_fini(void *handle)
2954 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]); 2961 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
2955 2962
2956 if (amdgpu_sriov_vf(adev)) { 2963 if (amdgpu_sriov_vf(adev)) {
2957 pr_debug("For SRIOV client, shouldn't do anything.\n"); 2964 gfx_v9_0_cp_gfx_enable(adev, false);
2965 /* must disable polling for SRIOV when hw finished, otherwise
2966 * CPC engine may still keep fetching WB address which is already
2967 * invalid after sw finished and trigger DMAR reading error in
2968 * hypervisor side.
2969 */
2970 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2958 return 0; 2971 return 0;
2959 } 2972 }
2960 gfx_v9_0_cp_enable(adev, false); 2973 gfx_v9_0_cp_enable(adev, false);
@@ -3585,14 +3598,6 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3585 ref_and_mask, ref_and_mask, 0x20); 3598 ref_and_mask, ref_and_mask, 0x20);
3586} 3599}
3587 3600
3588static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
3589{
3590 struct amdgpu_device *adev = ring->adev;
3591
3592 gfx_v9_0_write_data_to_reg(ring, 0, true,
3593 SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
3594}
3595
3596static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 3601static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3597 struct amdgpu_ib *ib, 3602 struct amdgpu_ib *ib,
3598 unsigned vmid, bool ctx_switch) 3603 unsigned vmid, bool ctx_switch)
@@ -3686,32 +3691,10 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3686static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 3691static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3687 unsigned vmid, uint64_t pd_addr) 3692 unsigned vmid, uint64_t pd_addr)
3688{ 3693{
3689 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 3694 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3690 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3691 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
3692 uint64_t flags = AMDGPU_PTE_VALID;
3693 unsigned eng = ring->vm_inv_eng;
3694
3695 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
3696 pd_addr |= flags;
3697
3698 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3699 hub->ctx0_ptb_addr_lo32 + (2 * vmid),
3700 lower_32_bits(pd_addr));
3701
3702 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3703 hub->ctx0_ptb_addr_hi32 + (2 * vmid),
3704 upper_32_bits(pd_addr));
3705
3706 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3707 hub->vm_inv_eng0_req + eng, req);
3708
3709 /* wait for the invalidate to complete */
3710 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
3711 eng, 0, 1 << vmid, 1 << vmid, 0x20);
3712 3695
3713 /* compute doesn't have PFP */ 3696 /* compute doesn't have PFP */
3714 if (usepfp) { 3697 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3715 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 3698 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3716 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3699 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3717 amdgpu_ring_write(ring, 0x0); 3700 amdgpu_ring_write(ring, 0x0);
@@ -3735,6 +3718,105 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3735 return wptr; 3718 return wptr;
3736} 3719}
3737 3720
3721static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
3722 bool acquire)
3723{
3724 struct amdgpu_device *adev = ring->adev;
3725 int pipe_num, tmp, reg;
3726 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
3727
3728 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
3729
3730 /* first me only has 2 entries, GFX and HP3D */
3731 if (ring->me > 0)
3732 pipe_num -= 2;
3733
3734 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
3735 tmp = RREG32(reg);
3736 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
3737 WREG32(reg, tmp);
3738}
3739
3740static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
3741 struct amdgpu_ring *ring,
3742 bool acquire)
3743{
3744 int i, pipe;
3745 bool reserve;
3746 struct amdgpu_ring *iring;
3747
3748 mutex_lock(&adev->gfx.pipe_reserve_mutex);
3749 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
3750 if (acquire)
3751 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3752 else
3753 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3754
3755 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
3756 /* Clear all reservations - everyone reacquires all resources */
3757 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
3758 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
3759 true);
3760
3761 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
3762 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
3763 true);
3764 } else {
3765 /* Lower all pipes without a current reservation */
3766 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
3767 iring = &adev->gfx.gfx_ring[i];
3768 pipe = amdgpu_gfx_queue_to_bit(adev,
3769 iring->me,
3770 iring->pipe,
3771 0);
3772 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3773 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
3774 }
3775
3776 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
3777 iring = &adev->gfx.compute_ring[i];
3778 pipe = amdgpu_gfx_queue_to_bit(adev,
3779 iring->me,
3780 iring->pipe,
3781 0);
3782 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3783 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
3784 }
3785 }
3786
3787 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
3788}
3789
3790static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
3791 struct amdgpu_ring *ring,
3792 bool acquire)
3793{
3794 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
3795 uint32_t queue_priority = acquire ? 0xf : 0x0;
3796
3797 mutex_lock(&adev->srbm_mutex);
3798 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3799
3800 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
3801 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
3802
3803 soc15_grbm_select(adev, 0, 0, 0, 0);
3804 mutex_unlock(&adev->srbm_mutex);
3805}
3806
3807static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
3808 enum drm_sched_priority priority)
3809{
3810 struct amdgpu_device *adev = ring->adev;
3811 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
3812
3813 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
3814 return;
3815
3816 gfx_v9_0_hqd_set_priority(adev, ring, acquire);
3817 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
3818}
3819
3738static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 3820static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3739{ 3821{
3740 struct amdgpu_device *adev = ring->adev; 3822 struct amdgpu_device *adev = ring->adev;
@@ -3788,7 +3870,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3788 int cnt; 3870 int cnt;
3789 3871
3790 cnt = (sizeof(ce_payload) >> 2) + 4 - 2; 3872 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3791 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096; 3873 csa_addr = amdgpu_csa_vaddr(ring->adev);
3792 3874
3793 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 3875 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3794 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 3876 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -3806,7 +3888,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
3806 uint64_t csa_addr, gds_addr; 3888 uint64_t csa_addr, gds_addr;
3807 int cnt; 3889 int cnt;
3808 3890
3809 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096; 3891 csa_addr = amdgpu_csa_vaddr(ring->adev);
3810 gds_addr = csa_addr + 4096; 3892 gds_addr = csa_addr + 4096;
3811 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 3893 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
3812 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 3894 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -3904,15 +3986,34 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
3904} 3986}
3905 3987
3906static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 3988static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3907 uint32_t val) 3989 uint32_t val)
3908{ 3990{
3991 uint32_t cmd = 0;
3992
3993 switch (ring->funcs->type) {
3994 case AMDGPU_RING_TYPE_GFX:
3995 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3996 break;
3997 case AMDGPU_RING_TYPE_KIQ:
3998 cmd = (1 << 16); /* no inc addr */
3999 break;
4000 default:
4001 cmd = WR_CONFIRM;
4002 break;
4003 }
3909 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4004 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3910 amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */ 4005 amdgpu_ring_write(ring, cmd);
3911 amdgpu_ring_write(ring, reg); 4006 amdgpu_ring_write(ring, reg);
3912 amdgpu_ring_write(ring, 0); 4007 amdgpu_ring_write(ring, 0);
3913 amdgpu_ring_write(ring, val); 4008 amdgpu_ring_write(ring, val);
3914} 4009}
3915 4010
4011static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4012 uint32_t val, uint32_t mask)
4013{
4014 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4015}
4016
3916static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4017static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3917 enum amdgpu_interrupt_state state) 4018 enum amdgpu_interrupt_state state)
3918{ 4019{
@@ -4199,7 +4300,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4199 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 4300 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4200 5 + /* COND_EXEC */ 4301 5 + /* COND_EXEC */
4201 7 + /* PIPELINE_SYNC */ 4302 7 + /* PIPELINE_SYNC */
4202 24 + /* VM_FLUSH */ 4303 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4304 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4305 2 + /* VM_FLUSH */
4203 8 + /* FENCE for VM_FLUSH */ 4306 8 + /* FENCE for VM_FLUSH */
4204 20 + /* GDS switch */ 4307 20 + /* GDS switch */
4205 4 + /* double SWITCH_BUFFER, 4308 4 + /* double SWITCH_BUFFER,
@@ -4221,7 +4324,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4221 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4324 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4222 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4325 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4223 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4326 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4224 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4225 .test_ring = gfx_v9_0_ring_test_ring, 4327 .test_ring = gfx_v9_0_ring_test_ring,
4226 .test_ib = gfx_v9_0_ring_test_ib, 4328 .test_ib = gfx_v9_0_ring_test_ib,
4227 .insert_nop = amdgpu_ring_insert_nop, 4329 .insert_nop = amdgpu_ring_insert_nop,
@@ -4231,6 +4333,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4231 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, 4333 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4232 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, 4334 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4233 .emit_tmz = gfx_v9_0_ring_emit_tmz, 4335 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4336 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4337 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4234}; 4338};
4235 4339
4236static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { 4340static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -4245,9 +4349,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4245 .emit_frame_size = 4349 .emit_frame_size =
4246 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4350 20 + /* gfx_v9_0_ring_emit_gds_switch */
4247 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4351 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4248 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */ 4352 5 + /* hdp invalidate */
4249 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4353 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4250 24 + /* gfx_v9_0_ring_emit_vm_flush */ 4354 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4355 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4356 2 + /* gfx_v9_0_ring_emit_vm_flush */
4251 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 4357 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4252 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ 4358 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4253 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 4359 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -4256,11 +4362,13 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4256 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4362 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4257 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4363 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4258 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4364 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4259 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4260 .test_ring = gfx_v9_0_ring_test_ring, 4365 .test_ring = gfx_v9_0_ring_test_ring,
4261 .test_ib = gfx_v9_0_ring_test_ib, 4366 .test_ib = gfx_v9_0_ring_test_ib,
4262 .insert_nop = amdgpu_ring_insert_nop, 4367 .insert_nop = amdgpu_ring_insert_nop,
4263 .pad_ib = amdgpu_ring_generic_pad_ib, 4368 .pad_ib = amdgpu_ring_generic_pad_ib,
4369 .set_priority = gfx_v9_0_ring_set_priority_compute,
4370 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4371 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4264}; 4372};
4265 4373
4266static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { 4374static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -4275,9 +4383,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4275 .emit_frame_size = 4383 .emit_frame_size =
4276 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4384 20 + /* gfx_v9_0_ring_emit_gds_switch */
4277 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4385 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4278 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */ 4386 5 + /* hdp invalidate */
4279 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4387 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4280 24 + /* gfx_v9_0_ring_emit_vm_flush */ 4388 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4389 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4390 2 + /* gfx_v9_0_ring_emit_vm_flush */
4281 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 4391 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4282 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ 4392 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4283 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 4393 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -4288,6 +4398,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4288 .pad_ib = amdgpu_ring_generic_pad_ib, 4398 .pad_ib = amdgpu_ring_generic_pad_ib,
4289 .emit_rreg = gfx_v9_0_ring_emit_rreg, 4399 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4290 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4400 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4401 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4291}; 4402};
4292 4403
4293static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) 4404static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 56f5fe4e2fee..acfbd2d749cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -40,7 +40,7 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
40 uint64_t value; 40 uint64_t value;
41 41
42 BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); 42 BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
43 value = adev->gart.table_addr - adev->mc.vram_start 43 value = adev->gart.table_addr - adev->gmc.vram_start
44 + adev->vm_manager.vram_base_offset; 44 + adev->vm_manager.vram_base_offset;
45 value &= 0x0000FFFFFFFFF000ULL; 45 value &= 0x0000FFFFFFFFF000ULL;
46 value |= 0x1; /*valid bit*/ 46 value |= 0x1; /*valid bit*/
@@ -57,14 +57,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
57 gfxhub_v1_0_init_gart_pt_regs(adev); 57 gfxhub_v1_0_init_gart_pt_regs(adev);
58 58
59 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 59 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
60 (u32)(adev->mc.gart_start >> 12)); 60 (u32)(adev->gmc.gart_start >> 12));
61 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 61 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
62 (u32)(adev->mc.gart_start >> 44)); 62 (u32)(adev->gmc.gart_start >> 44));
63 63
64 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 64 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
65 (u32)(adev->mc.gart_end >> 12)); 65 (u32)(adev->gmc.gart_end >> 12));
66 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 66 WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
67 (u32)(adev->mc.gart_end >> 44)); 67 (u32)(adev->gmc.gart_end >> 44));
68} 68}
69 69
70static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) 70static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -78,12 +78,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
78 78
79 /* Program the system aperture low logical page number. */ 79 /* Program the system aperture low logical page number. */
80 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 80 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
81 adev->mc.vram_start >> 18); 81 adev->gmc.vram_start >> 18);
82 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 82 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
83 adev->mc.vram_end >> 18); 83 adev->gmc.vram_end >> 18);
84 84
85 /* Set default page address. */ 85 /* Set default page address. */
86 value = adev->vram_scratch.gpu_addr - adev->mc.vram_start 86 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
87 + adev->vm_manager.vram_base_offset; 87 + adev->vm_manager.vram_base_offset;
88 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 88 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
89 (u32)(value >> 12)); 89 (u32)(value >> 12));
@@ -92,9 +92,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
92 92
93 /* Program "protection fault". */ 93 /* Program "protection fault". */
94 WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 94 WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
95 (u32)(adev->dummy_page.addr >> 12)); 95 (u32)(adev->dummy_page_addr >> 12));
96 WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 96 WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
97 (u32)((u64)adev->dummy_page.addr >> 44)); 97 (u32)((u64)adev->dummy_page_addr >> 44));
98 98
99 WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2, 99 WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
100 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 100 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
@@ -143,7 +143,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
143 WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); 143 WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
144 144
145 tmp = mmVM_L2_CNTL3_DEFAULT; 145 tmp = mmVM_L2_CNTL3_DEFAULT;
146 if (adev->mc.translate_further) { 146 if (adev->gmc.translate_further) {
147 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); 147 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
148 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 148 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
149 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 149 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
@@ -195,7 +195,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
195 195
196 num_level = adev->vm_manager.num_level; 196 num_level = adev->vm_manager.num_level;
197 block_size = adev->vm_manager.block_size; 197 block_size = adev->vm_manager.block_size;
198 if (adev->mc.translate_further) 198 if (adev->gmc.translate_further)
199 num_level -= 1; 199 num_level -= 1;
200 else 200 else
201 block_size -= 9; 201 block_size -= 9;
@@ -257,9 +257,9 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
257 * SRIOV driver need to program them 257 * SRIOV driver need to program them
258 */ 258 */
259 WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 259 WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE,
260 adev->mc.vram_start >> 24); 260 adev->gmc.vram_start >> 24);
261 WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 261 WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP,
262 adev->mc.vram_end >> 24); 262 adev->gmc.vram_end >> 24);
263 } 263 }
264 264
265 /* GART Enable. */ 265 /* GART Enable. */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5f5eb15ccf4a..5617cf62c566 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -37,7 +37,7 @@
37#include "dce/dce_6_0_sh_mask.h" 37#include "dce/dce_6_0_sh_mask.h"
38#include "si_enums.h" 38#include "si_enums.h"
39 39
40static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev); 40static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
41static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42static int gmc_v6_0_wait_for_idle(void *handle); 42static int gmc_v6_0_wait_for_idle(void *handle);
43 43
@@ -137,19 +137,19 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
137 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); 137 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
138 else 138 else
139 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 139 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
140 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 140 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
141 if (err) 141 if (err)
142 goto out; 142 goto out;
143 143
144 err = amdgpu_ucode_validate(adev->mc.fw); 144 err = amdgpu_ucode_validate(adev->gmc.fw);
145 145
146out: 146out:
147 if (err) { 147 if (err) {
148 dev_err(adev->dev, 148 dev_err(adev->dev,
149 "si_mc: Failed to load firmware \"%s\"\n", 149 "si_mc: Failed to load firmware \"%s\"\n",
150 fw_name); 150 fw_name);
151 release_firmware(adev->mc.fw); 151 release_firmware(adev->gmc.fw);
152 adev->mc.fw = NULL; 152 adev->gmc.fw = NULL;
153 } 153 }
154 return err; 154 return err;
155} 155}
@@ -162,20 +162,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
162 int i, regs_size, ucode_size; 162 int i, regs_size, ucode_size;
163 const struct mc_firmware_header_v1_0 *hdr; 163 const struct mc_firmware_header_v1_0 *hdr;
164 164
165 if (!adev->mc.fw) 165 if (!adev->gmc.fw)
166 return -EINVAL; 166 return -EINVAL;
167 167
168 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; 168 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
169 169
170 amdgpu_ucode_print_mc_hdr(&hdr->header); 170 amdgpu_ucode_print_mc_hdr(&hdr->header);
171 171
172 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); 172 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
173 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 173 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
174 new_io_mc_regs = (const __le32 *) 174 new_io_mc_regs = (const __le32 *)
175 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 175 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
176 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 176 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
177 new_fw_data = (const __le32 *) 177 new_fw_data = (const __le32 *)
178 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 178 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
179 179
180 running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK; 180 running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
181 181
@@ -218,12 +218,12 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
218} 218}
219 219
220static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, 220static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
221 struct amdgpu_mc *mc) 221 struct amdgpu_gmc *mc)
222{ 222{
223 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; 223 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
224 base <<= 24; 224 base <<= 24;
225 225
226 amdgpu_device_vram_location(adev, &adev->mc, base); 226 amdgpu_device_vram_location(adev, &adev->gmc, base);
227 amdgpu_device_gart_location(adev, mc); 227 amdgpu_device_gart_location(adev, mc);
228} 228}
229 229
@@ -260,9 +260,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
260 } 260 }
261 /* Update configuration */ 261 /* Update configuration */
262 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 262 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
263 adev->mc.vram_start >> 12); 263 adev->gmc.vram_start >> 12);
264 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 264 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
265 adev->mc.vram_end >> 12); 265 adev->gmc.vram_end >> 12);
266 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 266 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
267 adev->vram_scratch.gpu_addr >> 12); 267 adev->vram_scratch.gpu_addr >> 12);
268 WREG32(mmMC_VM_AGP_BASE, 0); 268 WREG32(mmMC_VM_AGP_BASE, 0);
@@ -320,56 +320,69 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
320 numchan = 16; 320 numchan = 16;
321 break; 321 break;
322 } 322 }
323 adev->mc.vram_width = numchan * chansize; 323 adev->gmc.vram_width = numchan * chansize;
324 /* size in MB on si */ 324 /* size in MB on si */
325 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 325 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
326 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 326 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
327 327
328 if (!(adev->flags & AMD_IS_APU)) { 328 if (!(adev->flags & AMD_IS_APU)) {
329 r = amdgpu_device_resize_fb_bar(adev); 329 r = amdgpu_device_resize_fb_bar(adev);
330 if (r) 330 if (r)
331 return r; 331 return r;
332 } 332 }
333 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); 333 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
334 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); 334 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
335 adev->mc.visible_vram_size = adev->mc.aper_size; 335 adev->gmc.visible_vram_size = adev->gmc.aper_size;
336 336
337 /* set the gart size */ 337 /* set the gart size */
338 if (amdgpu_gart_size == -1) { 338 if (amdgpu_gart_size == -1) {
339 switch (adev->asic_type) { 339 switch (adev->asic_type) {
340 case CHIP_HAINAN: /* no MM engines */ 340 case CHIP_HAINAN: /* no MM engines */
341 default: 341 default:
342 adev->mc.gart_size = 256ULL << 20; 342 adev->gmc.gart_size = 256ULL << 20;
343 break; 343 break;
344 case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ 344 case CHIP_VERDE: /* UVD, VCE do not support GPUVM */
345 case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ 345 case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */
346 case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ 346 case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
347 case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ 347 case CHIP_OLAND: /* UVD, VCE do not support GPUVM */
348 adev->mc.gart_size = 1024ULL << 20; 348 adev->gmc.gart_size = 1024ULL << 20;
349 break; 349 break;
350 } 350 }
351 } else { 351 } else {
352 adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 352 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
353 } 353 }
354 354
355 gmc_v6_0_vram_gtt_location(adev, &adev->mc); 355 gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
356 356
357 return 0; 357 return 0;
358} 358}
359 359
360static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 360static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
361 uint32_t vmid)
362{ 361{
363 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
364
365 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 362 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
366} 363}
367 364
368static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev, 365static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
369 void *cpu_pt_addr, 366 unsigned vmid, uint64_t pd_addr)
370 uint32_t gpu_page_idx, 367{
371 uint64_t addr, 368 uint32_t reg;
372 uint64_t flags) 369
370 /* write new base address */
371 if (vmid < 8)
372 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
373 else
374 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
375 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
376
377 /* bits 0-15 are the VM contexts0-15 */
378 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
379
380 return pd_addr;
381}
382
383static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
384 uint32_t gpu_page_idx, uint64_t addr,
385 uint64_t flags)
373{ 386{
374 void __iomem *ptr = (void *)cpu_pt_addr; 387 void __iomem *ptr = (void *)cpu_pt_addr;
375 uint64_t value; 388 uint64_t value;
@@ -433,9 +446,9 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
433{ 446{
434 u32 tmp; 447 u32 tmp;
435 448
436 if (enable && !adev->mc.prt_warning) { 449 if (enable && !adev->gmc.prt_warning) {
437 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); 450 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
438 adev->mc.prt_warning = true; 451 adev->gmc.prt_warning = true;
439 } 452 }
440 453
441 tmp = RREG32(mmVM_PRT_CNTL); 454 tmp = RREG32(mmVM_PRT_CNTL);
@@ -455,7 +468,8 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
455 468
456 if (enable) { 469 if (enable) {
457 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; 470 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
458 uint32_t high = adev->vm_manager.max_pfn; 471 uint32_t high = adev->vm_manager.max_pfn -
472 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
459 473
460 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); 474 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
461 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); 475 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@@ -515,11 +529,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
515 (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) | 529 (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
516 (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); 530 (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
517 /* setup context0 */ 531 /* setup context0 */
518 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); 532 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
519 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); 533 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
520 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 534 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
521 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 535 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
522 (u32)(adev->dummy_page.addr >> 12)); 536 (u32)(adev->dummy_page_addr >> 12));
523 WREG32(mmVM_CONTEXT0_CNTL2, 0); 537 WREG32(mmVM_CONTEXT0_CNTL2, 0);
524 WREG32(mmVM_CONTEXT0_CNTL, 538 WREG32(mmVM_CONTEXT0_CNTL,
525 VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK | 539 VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
@@ -549,7 +563,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
549 563
550 /* enable context1-15 */ 564 /* enable context1-15 */
551 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 565 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
552 (u32)(adev->dummy_page.addr >> 12)); 566 (u32)(adev->dummy_page_addr >> 12));
553 WREG32(mmVM_CONTEXT1_CNTL2, 4); 567 WREG32(mmVM_CONTEXT1_CNTL2, 4);
554 WREG32(mmVM_CONTEXT1_CNTL, 568 WREG32(mmVM_CONTEXT1_CNTL,
555 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 569 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
@@ -561,9 +575,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
561 else 575 else
562 gmc_v6_0_set_fault_enable_default(adev, true); 576 gmc_v6_0_set_fault_enable_default(adev, true);
563 577
564 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 578 gmc_v6_0_flush_gpu_tlb(adev, 0);
565 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 579 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
566 (unsigned)(adev->mc.gart_size >> 20), 580 (unsigned)(adev->gmc.gart_size >> 20),
567 (unsigned long long)adev->gart.table_addr); 581 (unsigned long long)adev->gart.table_addr);
568 adev->gart.ready = true; 582 adev->gart.ready = true;
569 return 0; 583 return 0;
@@ -795,7 +809,7 @@ static int gmc_v6_0_early_init(void *handle)
795{ 809{
796 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 810 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
797 811
798 gmc_v6_0_set_gart_funcs(adev); 812 gmc_v6_0_set_gmc_funcs(adev);
799 gmc_v6_0_set_irq_funcs(adev); 813 gmc_v6_0_set_irq_funcs(adev);
800 814
801 return 0; 815 return 0;
@@ -806,7 +820,7 @@ static int gmc_v6_0_late_init(void *handle)
806 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
807 821
808 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 822 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
809 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 823 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
810 else 824 else
811 return 0; 825 return 0;
812} 826}
@@ -818,26 +832,26 @@ static int gmc_v6_0_sw_init(void *handle)
818 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
819 833
820 if (adev->flags & AMD_IS_APU) { 834 if (adev->flags & AMD_IS_APU) {
821 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 835 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
822 } else { 836 } else {
823 u32 tmp = RREG32(mmMC_SEQ_MISC0); 837 u32 tmp = RREG32(mmMC_SEQ_MISC0);
824 tmp &= MC_SEQ_MISC0__MT__MASK; 838 tmp &= MC_SEQ_MISC0__MT__MASK;
825 adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp); 839 adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
826 } 840 }
827 841
828 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); 842 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
829 if (r) 843 if (r)
830 return r; 844 return r;
831 845
832 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault); 846 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
833 if (r) 847 if (r)
834 return r; 848 return r;
835 849
836 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); 850 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
837 851
838 adev->mc.mc_mask = 0xffffffffffULL; 852 adev->gmc.mc_mask = 0xffffffffffULL;
839 853
840 adev->mc.stolen_size = 256 * 1024; 854 adev->gmc.stolen_size = 256 * 1024;
841 855
842 adev->need_dma32 = false; 856 adev->need_dma32 = false;
843 dma_bits = adev->need_dma32 ? 32 : 40; 857 dma_bits = adev->need_dma32 ? 32 : 40;
@@ -902,8 +916,8 @@ static int gmc_v6_0_sw_fini(void *handle)
902 amdgpu_vm_manager_fini(adev); 916 amdgpu_vm_manager_fini(adev);
903 gmc_v6_0_gart_fini(adev); 917 gmc_v6_0_gart_fini(adev);
904 amdgpu_bo_fini(adev); 918 amdgpu_bo_fini(adev);
905 release_firmware(adev->mc.fw); 919 release_firmware(adev->gmc.fw);
906 adev->mc.fw = NULL; 920 adev->gmc.fw = NULL;
907 921
908 return 0; 922 return 0;
909} 923}
@@ -934,7 +948,7 @@ static int gmc_v6_0_hw_fini(void *handle)
934{ 948{
935 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 949 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
936 950
937 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); 951 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
938 gmc_v6_0_gart_disable(adev); 952 gmc_v6_0_gart_disable(adev);
939 953
940 return 0; 954 return 0;
@@ -1129,9 +1143,10 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1129 .set_powergating_state = gmc_v6_0_set_powergating_state, 1143 .set_powergating_state = gmc_v6_0_set_powergating_state,
1130}; 1144};
1131 1145
1132static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = { 1146static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1133 .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb, 1147 .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1134 .set_pte_pde = gmc_v6_0_gart_set_pte_pde, 1148 .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1149 .set_pte_pde = gmc_v6_0_set_pte_pde,
1135 .set_prt = gmc_v6_0_set_prt, 1150 .set_prt = gmc_v6_0_set_prt,
1136 .get_vm_pde = gmc_v6_0_get_vm_pde, 1151 .get_vm_pde = gmc_v6_0_get_vm_pde,
1137 .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags 1152 .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
@@ -1142,16 +1157,16 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1142 .process = gmc_v6_0_process_interrupt, 1157 .process = gmc_v6_0_process_interrupt,
1143}; 1158};
1144 1159
1145static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev) 1160static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1146{ 1161{
1147 if (adev->gart.gart_funcs == NULL) 1162 if (adev->gmc.gmc_funcs == NULL)
1148 adev->gart.gart_funcs = &gmc_v6_0_gart_funcs; 1163 adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1149} 1164}
1150 1165
1151static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) 1166static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1152{ 1167{
1153 adev->mc.vm_fault.num_types = 1; 1168 adev->gmc.vm_fault.num_types = 1;
1154 adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs; 1169 adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1155} 1170}
1156 1171
1157const struct amdgpu_ip_block_version gmc_v6_0_ip_block = 1172const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 12e49bd8fd2d..80054f36e487 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -43,7 +43,7 @@
43 43
44#include "amdgpu_atombios.h" 44#include "amdgpu_atombios.h"
45 45
46static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 46static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
48static int gmc_v7_0_wait_for_idle(void *handle); 48static int gmc_v7_0_wait_for_idle(void *handle);
49 49
@@ -152,16 +152,16 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
152 else 152 else
153 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 153 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
154 154
155 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 155 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
156 if (err) 156 if (err)
157 goto out; 157 goto out;
158 err = amdgpu_ucode_validate(adev->mc.fw); 158 err = amdgpu_ucode_validate(adev->gmc.fw);
159 159
160out: 160out:
161 if (err) { 161 if (err) {
162 pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name); 162 pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
163 release_firmware(adev->mc.fw); 163 release_firmware(adev->gmc.fw);
164 adev->mc.fw = NULL; 164 adev->gmc.fw = NULL;
165 } 165 }
166 return err; 166 return err;
167} 167}
@@ -182,19 +182,19 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
182 u32 running; 182 u32 running;
183 int i, ucode_size, regs_size; 183 int i, ucode_size, regs_size;
184 184
185 if (!adev->mc.fw) 185 if (!adev->gmc.fw)
186 return -EINVAL; 186 return -EINVAL;
187 187
188 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; 188 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
189 amdgpu_ucode_print_mc_hdr(&hdr->header); 189 amdgpu_ucode_print_mc_hdr(&hdr->header);
190 190
191 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); 191 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
192 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 192 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
193 io_mc_regs = (const __le32 *) 193 io_mc_regs = (const __le32 *)
194 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 194 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
195 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 195 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
196 fw_data = (const __le32 *) 196 fw_data = (const __le32 *)
197 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 197 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
198 198
199 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); 199 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
200 200
@@ -236,12 +236,12 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
236} 236}
237 237
238static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, 238static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
239 struct amdgpu_mc *mc) 239 struct amdgpu_gmc *mc)
240{ 240{
241 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; 241 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
242 base <<= 24; 242 base <<= 24;
243 243
244 amdgpu_device_vram_location(adev, &adev->mc, base); 244 amdgpu_device_vram_location(adev, &adev->gmc, base);
245 amdgpu_device_gart_location(adev, mc); 245 amdgpu_device_gart_location(adev, mc);
246} 246}
247 247
@@ -284,9 +284,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
284 } 284 }
285 /* Update configuration */ 285 /* Update configuration */
286 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 286 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
287 adev->mc.vram_start >> 12); 287 adev->gmc.vram_start >> 12);
288 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 288 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
289 adev->mc.vram_end >> 12); 289 adev->gmc.vram_end >> 12);
290 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 290 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
291 adev->vram_scratch.gpu_addr >> 12); 291 adev->vram_scratch.gpu_addr >> 12);
292 WREG32(mmMC_VM_AGP_BASE, 0); 292 WREG32(mmMC_VM_AGP_BASE, 0);
@@ -319,8 +319,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
319{ 319{
320 int r; 320 int r;
321 321
322 adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); 322 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
323 if (!adev->mc.vram_width) { 323 if (!adev->gmc.vram_width) {
324 u32 tmp; 324 u32 tmp;
325 int chansize, numchan; 325 int chansize, numchan;
326 326
@@ -362,38 +362,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
362 numchan = 16; 362 numchan = 16;
363 break; 363 break;
364 } 364 }
365 adev->mc.vram_width = numchan * chansize; 365 adev->gmc.vram_width = numchan * chansize;
366 } 366 }
367 /* size in MB on si */ 367 /* size in MB on si */
368 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 368 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
369 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 369 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
370 370
371 if (!(adev->flags & AMD_IS_APU)) { 371 if (!(adev->flags & AMD_IS_APU)) {
372 r = amdgpu_device_resize_fb_bar(adev); 372 r = amdgpu_device_resize_fb_bar(adev);
373 if (r) 373 if (r)
374 return r; 374 return r;
375 } 375 }
376 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); 376 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
377 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); 377 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
378 378
379#ifdef CONFIG_X86_64 379#ifdef CONFIG_X86_64
380 if (adev->flags & AMD_IS_APU) { 380 if (adev->flags & AMD_IS_APU) {
381 adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; 381 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
382 adev->mc.aper_size = adev->mc.real_vram_size; 382 adev->gmc.aper_size = adev->gmc.real_vram_size;
383 } 383 }
384#endif 384#endif
385 385
386 /* In case the PCI BAR is larger than the actual amount of vram */ 386 /* In case the PCI BAR is larger than the actual amount of vram */
387 adev->mc.visible_vram_size = adev->mc.aper_size; 387 adev->gmc.visible_vram_size = adev->gmc.aper_size;
388 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 388 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
389 adev->mc.visible_vram_size = adev->mc.real_vram_size; 389 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
390 390
391 /* set the gart size */ 391 /* set the gart size */
392 if (amdgpu_gart_size == -1) { 392 if (amdgpu_gart_size == -1) {
393 switch (adev->asic_type) { 393 switch (adev->asic_type) {
394 case CHIP_TOPAZ: /* no MM engines */ 394 case CHIP_TOPAZ: /* no MM engines */
395 default: 395 default:
396 adev->mc.gart_size = 256ULL << 20; 396 adev->gmc.gart_size = 256ULL << 20;
397 break; 397 break;
398#ifdef CONFIG_DRM_AMDGPU_CIK 398#ifdef CONFIG_DRM_AMDGPU_CIK
399 case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ 399 case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
@@ -401,15 +401,15 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
401 case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ 401 case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
402 case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ 402 case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
403 case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ 403 case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
404 adev->mc.gart_size = 1024ULL << 20; 404 adev->gmc.gart_size = 1024ULL << 20;
405 break; 405 break;
406#endif 406#endif
407 } 407 }
408 } else { 408 } else {
409 adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 409 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
410 } 410 }
411 411
412 gmc_v7_0_vram_gtt_location(adev, &adev->mc); 412 gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
413 413
414 return 0; 414 return 0;
415} 415}
@@ -422,25 +422,44 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
422 */ 422 */
423 423
424/** 424/**
425 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback 425 * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
426 * 426 *
427 * @adev: amdgpu_device pointer 427 * @adev: amdgpu_device pointer
428 * @vmid: vm instance to flush 428 * @vmid: vm instance to flush
429 * 429 *
430 * Flush the TLB for the requested page table (CIK). 430 * Flush the TLB for the requested page table (CIK).
431 */ 431 */
432static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 432static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
433 uint32_t vmid)
434{ 433{
435 /* flush hdp cache */
436 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
437
438 /* bits 0-15 are the VM contexts0-15 */ 434 /* bits 0-15 are the VM contexts0-15 */
439 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 435 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
440} 436}
441 437
438static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
439 unsigned vmid, uint64_t pd_addr)
440{
441 uint32_t reg;
442
443 if (vmid < 8)
444 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
445 else
446 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
447 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
448
449 /* bits 0-15 are the VM contexts0-15 */
450 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
451
452 return pd_addr;
453}
454
455static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
456 unsigned pasid)
457{
458 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
459}
460
442/** 461/**
443 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO 462 * gmc_v7_0_set_pte_pde - update the page tables using MMIO
444 * 463 *
445 * @adev: amdgpu_device pointer 464 * @adev: amdgpu_device pointer
446 * @cpu_pt_addr: cpu address of the page table 465 * @cpu_pt_addr: cpu address of the page table
@@ -450,11 +469,9 @@ static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
450 * 469 *
451 * Update the page tables using the CPU. 470 * Update the page tables using the CPU.
452 */ 471 */
453static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, 472static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
454 void *cpu_pt_addr, 473 uint32_t gpu_page_idx, uint64_t addr,
455 uint32_t gpu_page_idx, 474 uint64_t flags)
456 uint64_t addr,
457 uint64_t flags)
458{ 475{
459 void __iomem *ptr = (void *)cpu_pt_addr; 476 void __iomem *ptr = (void *)cpu_pt_addr;
460 uint64_t value; 477 uint64_t value;
@@ -524,9 +541,9 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
524{ 541{
525 uint32_t tmp; 542 uint32_t tmp;
526 543
527 if (enable && !adev->mc.prt_warning) { 544 if (enable && !adev->gmc.prt_warning) {
528 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); 545 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
529 adev->mc.prt_warning = true; 546 adev->gmc.prt_warning = true;
530 } 547 }
531 548
532 tmp = RREG32(mmVM_PRT_CNTL); 549 tmp = RREG32(mmVM_PRT_CNTL);
@@ -548,7 +565,8 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
548 565
549 if (enable) { 566 if (enable) {
550 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; 567 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
551 uint32_t high = adev->vm_manager.max_pfn; 568 uint32_t high = adev->vm_manager.max_pfn -
569 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
552 570
553 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); 571 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
554 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); 572 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@@ -622,11 +640,11 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
622 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); 640 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
623 WREG32(mmVM_L2_CNTL3, tmp); 641 WREG32(mmVM_L2_CNTL3, tmp);
624 /* setup context0 */ 642 /* setup context0 */
625 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); 643 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
626 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); 644 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
627 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 645 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
628 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 646 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
629 (u32)(adev->dummy_page.addr >> 12)); 647 (u32)(adev->dummy_page_addr >> 12));
630 WREG32(mmVM_CONTEXT0_CNTL2, 0); 648 WREG32(mmVM_CONTEXT0_CNTL2, 0);
631 tmp = RREG32(mmVM_CONTEXT0_CNTL); 649 tmp = RREG32(mmVM_CONTEXT0_CNTL);
632 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 650 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
@@ -656,7 +674,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
656 674
657 /* enable context1-15 */ 675 /* enable context1-15 */
658 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 676 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
659 (u32)(adev->dummy_page.addr >> 12)); 677 (u32)(adev->dummy_page_addr >> 12));
660 WREG32(mmVM_CONTEXT1_CNTL2, 4); 678 WREG32(mmVM_CONTEXT1_CNTL2, 4);
661 tmp = RREG32(mmVM_CONTEXT1_CNTL); 679 tmp = RREG32(mmVM_CONTEXT1_CNTL);
662 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 680 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
@@ -675,9 +693,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
675 WREG32(mmCHUB_CONTROL, tmp); 693 WREG32(mmCHUB_CONTROL, tmp);
676 } 694 }
677 695
678 gmc_v7_0_gart_flush_gpu_tlb(adev, 0); 696 gmc_v7_0_flush_gpu_tlb(adev, 0);
679 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 697 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
680 (unsigned)(adev->mc.gart_size >> 20), 698 (unsigned)(adev->gmc.gart_size >> 20),
681 (unsigned long long)adev->gart.table_addr); 699 (unsigned long long)adev->gart.table_addr);
682 adev->gart.ready = true; 700 adev->gart.ready = true;
683 return 0; 701 return 0;
@@ -750,21 +768,21 @@ static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
750 * 768 *
751 * Print human readable fault information (CIK). 769 * Print human readable fault information (CIK).
752 */ 770 */
753static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, 771static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
754 u32 status, u32 addr, u32 mc_client) 772 u32 addr, u32 mc_client, unsigned pasid)
755{ 773{
756 u32 mc_id;
757 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); 774 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
758 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 775 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
759 PROTECTIONS); 776 PROTECTIONS);
760 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 777 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
761 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 778 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
779 u32 mc_id;
762 780
763 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 781 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
764 MEMORY_CLIENT_ID); 782 MEMORY_CLIENT_ID);
765 783
766 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 784 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
767 protections, vmid, addr, 785 protections, vmid, pasid, addr,
768 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 786 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
769 MEMORY_CLIENT_RW) ? 787 MEMORY_CLIENT_RW) ?
770 "write" : "read", block, mc_client, mc_id); 788 "write" : "read", block, mc_client, mc_id);
@@ -922,16 +940,16 @@ static int gmc_v7_0_early_init(void *handle)
922{ 940{
923 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 941 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
924 942
925 gmc_v7_0_set_gart_funcs(adev); 943 gmc_v7_0_set_gmc_funcs(adev);
926 gmc_v7_0_set_irq_funcs(adev); 944 gmc_v7_0_set_irq_funcs(adev);
927 945
928 adev->mc.shared_aperture_start = 0x2000000000000000ULL; 946 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
929 adev->mc.shared_aperture_end = 947 adev->gmc.shared_aperture_end =
930 adev->mc.shared_aperture_start + (4ULL << 30) - 1; 948 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
931 adev->mc.private_aperture_start = 949 adev->gmc.private_aperture_start =
932 adev->mc.shared_aperture_end + 1; 950 adev->gmc.shared_aperture_end + 1;
933 adev->mc.private_aperture_end = 951 adev->gmc.private_aperture_end =
934 adev->mc.private_aperture_start + (4ULL << 30) - 1; 952 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
935 953
936 return 0; 954 return 0;
937} 955}
@@ -941,7 +959,7 @@ static int gmc_v7_0_late_init(void *handle)
941 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 959 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
942 960
943 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 961 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
944 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 962 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
945 else 963 else
946 return 0; 964 return 0;
947} 965}
@@ -953,18 +971,18 @@ static int gmc_v7_0_sw_init(void *handle)
953 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 971 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
954 972
955 if (adev->flags & AMD_IS_APU) { 973 if (adev->flags & AMD_IS_APU) {
956 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 974 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
957 } else { 975 } else {
958 u32 tmp = RREG32(mmMC_SEQ_MISC0); 976 u32 tmp = RREG32(mmMC_SEQ_MISC0);
959 tmp &= MC_SEQ_MISC0__MT__MASK; 977 tmp &= MC_SEQ_MISC0__MT__MASK;
960 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); 978 adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
961 } 979 }
962 980
963 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); 981 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
964 if (r) 982 if (r)
965 return r; 983 return r;
966 984
967 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault); 985 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
968 if (r) 986 if (r)
969 return r; 987 return r;
970 988
@@ -978,9 +996,9 @@ static int gmc_v7_0_sw_init(void *handle)
978 * This is the max address of the GPU's 996 * This is the max address of the GPU's
979 * internal address space. 997 * internal address space.
980 */ 998 */
981 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 999 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
982 1000
983 adev->mc.stolen_size = 256 * 1024; 1001 adev->gmc.stolen_size = 256 * 1024;
984 1002
985 /* set DMA mask + need_dma32 flags. 1003 /* set DMA mask + need_dma32 flags.
986 * PCIE - can handle 40-bits. 1004 * PCIE - can handle 40-bits.
@@ -1051,8 +1069,8 @@ static int gmc_v7_0_sw_fini(void *handle)
1051 amdgpu_vm_manager_fini(adev); 1069 amdgpu_vm_manager_fini(adev);
1052 gmc_v7_0_gart_fini(adev); 1070 gmc_v7_0_gart_fini(adev);
1053 amdgpu_bo_fini(adev); 1071 amdgpu_bo_fini(adev);
1054 release_firmware(adev->mc.fw); 1072 release_firmware(adev->gmc.fw);
1055 adev->mc.fw = NULL; 1073 adev->gmc.fw = NULL;
1056 1074
1057 return 0; 1075 return 0;
1058} 1076}
@@ -1085,7 +1103,7 @@ static int gmc_v7_0_hw_fini(void *handle)
1085{ 1103{
1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1104 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087 1105
1088 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); 1106 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1089 gmc_v7_0_gart_disable(adev); 1107 gmc_v7_0_gart_disable(adev);
1090 1108
1091 return 0; 1109 return 0;
@@ -1259,7 +1277,8 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1259 addr); 1277 addr);
1260 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1278 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1261 status); 1279 status);
1262 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); 1280 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
1281 entry->pasid);
1263 } 1282 }
1264 1283
1265 return 0; 1284 return 0;
@@ -1308,9 +1327,11 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1308 .set_powergating_state = gmc_v7_0_set_powergating_state, 1327 .set_powergating_state = gmc_v7_0_set_powergating_state,
1309}; 1328};
1310 1329
1311static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = { 1330static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
1312 .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb, 1331 .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
1313 .set_pte_pde = gmc_v7_0_gart_set_pte_pde, 1332 .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
1333 .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
1334 .set_pte_pde = gmc_v7_0_set_pte_pde,
1314 .set_prt = gmc_v7_0_set_prt, 1335 .set_prt = gmc_v7_0_set_prt,
1315 .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, 1336 .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
1316 .get_vm_pde = gmc_v7_0_get_vm_pde 1337 .get_vm_pde = gmc_v7_0_get_vm_pde
@@ -1321,16 +1342,16 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1321 .process = gmc_v7_0_process_interrupt, 1342 .process = gmc_v7_0_process_interrupt,
1322}; 1343};
1323 1344
1324static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev) 1345static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
1325{ 1346{
1326 if (adev->gart.gart_funcs == NULL) 1347 if (adev->gmc.gmc_funcs == NULL)
1327 adev->gart.gart_funcs = &gmc_v7_0_gart_funcs; 1348 adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
1328} 1349}
1329 1350
1330static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) 1351static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1331{ 1352{
1332 adev->mc.vm_fault.num_types = 1; 1353 adev->gmc.vm_fault.num_types = 1;
1333 adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; 1354 adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1334} 1355}
1335 1356
1336const struct amdgpu_ip_block_version gmc_v7_0_ip_block = 1357const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a170e37fbe7..d71d4cb68f9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -45,7 +45,7 @@
45#include "amdgpu_atombios.h" 45#include "amdgpu_atombios.h"
46 46
47 47
48static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); 48static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
49static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 49static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
50static int gmc_v8_0_wait_for_idle(void *handle); 50static int gmc_v8_0_wait_for_idle(void *handle);
51 51
@@ -236,16 +236,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
236 } 236 }
237 237
238 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); 238 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
239 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 239 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
240 if (err) 240 if (err)
241 goto out; 241 goto out;
242 err = amdgpu_ucode_validate(adev->mc.fw); 242 err = amdgpu_ucode_validate(adev->gmc.fw);
243 243
244out: 244out:
245 if (err) { 245 if (err) {
246 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); 246 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
247 release_firmware(adev->mc.fw); 247 release_firmware(adev->gmc.fw);
248 adev->mc.fw = NULL; 248 adev->gmc.fw = NULL;
249 } 249 }
250 return err; 250 return err;
251} 251}
@@ -274,19 +274,19 @@ static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
274 if (amdgpu_sriov_bios(adev)) 274 if (amdgpu_sriov_bios(adev))
275 return 0; 275 return 0;
276 276
277 if (!adev->mc.fw) 277 if (!adev->gmc.fw)
278 return -EINVAL; 278 return -EINVAL;
279 279
280 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; 280 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
281 amdgpu_ucode_print_mc_hdr(&hdr->header); 281 amdgpu_ucode_print_mc_hdr(&hdr->header);
282 282
283 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); 283 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
284 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 284 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
285 io_mc_regs = (const __le32 *) 285 io_mc_regs = (const __le32 *)
286 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 286 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
287 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 287 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
288 fw_data = (const __le32 *) 288 fw_data = (const __le32 *)
289 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 289 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
290 290
291 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); 291 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
292 292
@@ -350,19 +350,19 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
350 if (vbios_version == 0) 350 if (vbios_version == 0)
351 return 0; 351 return 0;
352 352
353 if (!adev->mc.fw) 353 if (!adev->gmc.fw)
354 return -EINVAL; 354 return -EINVAL;
355 355
356 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; 356 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
357 amdgpu_ucode_print_mc_hdr(&hdr->header); 357 amdgpu_ucode_print_mc_hdr(&hdr->header);
358 358
359 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); 359 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
360 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 360 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
361 io_mc_regs = (const __le32 *) 361 io_mc_regs = (const __le32 *)
362 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 362 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
363 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 363 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
364 fw_data = (const __le32 *) 364 fw_data = (const __le32 *)
365 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 365 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
366 366
367 data = RREG32(mmMC_SEQ_MISC0); 367 data = RREG32(mmMC_SEQ_MISC0);
368 data &= ~(0x40); 368 data &= ~(0x40);
@@ -398,7 +398,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
398} 398}
399 399
400static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, 400static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
401 struct amdgpu_mc *mc) 401 struct amdgpu_gmc *mc)
402{ 402{
403 u64 base = 0; 403 u64 base = 0;
404 404
@@ -406,7 +406,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
406 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; 406 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
407 base <<= 24; 407 base <<= 24;
408 408
409 amdgpu_device_vram_location(adev, &adev->mc, base); 409 amdgpu_device_vram_location(adev, &adev->gmc, base);
410 amdgpu_device_gart_location(adev, mc); 410 amdgpu_device_gart_location(adev, mc);
411} 411}
412 412
@@ -449,18 +449,18 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
449 } 449 }
450 /* Update configuration */ 450 /* Update configuration */
451 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 451 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
452 adev->mc.vram_start >> 12); 452 adev->gmc.vram_start >> 12);
453 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 453 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
454 adev->mc.vram_end >> 12); 454 adev->gmc.vram_end >> 12);
455 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 455 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
456 adev->vram_scratch.gpu_addr >> 12); 456 adev->vram_scratch.gpu_addr >> 12);
457 457
458 if (amdgpu_sriov_vf(adev)) { 458 if (amdgpu_sriov_vf(adev)) {
459 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; 459 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
460 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); 460 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
461 WREG32(mmMC_VM_FB_LOCATION, tmp); 461 WREG32(mmMC_VM_FB_LOCATION, tmp);
462 /* XXX double check these! */ 462 /* XXX double check these! */
463 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); 463 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
464 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); 464 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
465 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); 465 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
466 } 466 }
@@ -495,8 +495,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
495{ 495{
496 int r; 496 int r;
497 497
498 adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); 498 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
499 if (!adev->mc.vram_width) { 499 if (!adev->gmc.vram_width) {
500 u32 tmp; 500 u32 tmp;
501 int chansize, numchan; 501 int chansize, numchan;
502 502
@@ -538,31 +538,31 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
538 numchan = 16; 538 numchan = 16;
539 break; 539 break;
540 } 540 }
541 adev->mc.vram_width = numchan * chansize; 541 adev->gmc.vram_width = numchan * chansize;
542 } 542 }
543 /* size in MB on si */ 543 /* size in MB on si */
544 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 544 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
545 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 545 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
546 546
547 if (!(adev->flags & AMD_IS_APU)) { 547 if (!(adev->flags & AMD_IS_APU)) {
548 r = amdgpu_device_resize_fb_bar(adev); 548 r = amdgpu_device_resize_fb_bar(adev);
549 if (r) 549 if (r)
550 return r; 550 return r;
551 } 551 }
552 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); 552 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
553 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); 553 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
554 554
555#ifdef CONFIG_X86_64 555#ifdef CONFIG_X86_64
556 if (adev->flags & AMD_IS_APU) { 556 if (adev->flags & AMD_IS_APU) {
557 adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; 557 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
558 adev->mc.aper_size = adev->mc.real_vram_size; 558 adev->gmc.aper_size = adev->gmc.real_vram_size;
559 } 559 }
560#endif 560#endif
561 561
562 /* In case the PCI BAR is larger than the actual amount of vram */ 562 /* In case the PCI BAR is larger than the actual amount of vram */
563 adev->mc.visible_vram_size = adev->mc.aper_size; 563 adev->gmc.visible_vram_size = adev->gmc.aper_size;
564 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 564 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
565 adev->mc.visible_vram_size = adev->mc.real_vram_size; 565 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
566 566
567 /* set the gart size */ 567 /* set the gart size */
568 if (amdgpu_gart_size == -1) { 568 if (amdgpu_gart_size == -1) {
@@ -571,20 +571,20 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
571 case CHIP_POLARIS10: /* all engines support GPUVM */ 571 case CHIP_POLARIS10: /* all engines support GPUVM */
572 case CHIP_POLARIS12: /* all engines support GPUVM */ 572 case CHIP_POLARIS12: /* all engines support GPUVM */
573 default: 573 default:
574 adev->mc.gart_size = 256ULL << 20; 574 adev->gmc.gart_size = 256ULL << 20;
575 break; 575 break;
576 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ 576 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
577 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ 577 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
578 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ 578 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
579 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ 579 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
580 adev->mc.gart_size = 1024ULL << 20; 580 adev->gmc.gart_size = 1024ULL << 20;
581 break; 581 break;
582 } 582 }
583 } else { 583 } else {
584 adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 584 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
585 } 585 }
586 586
587 gmc_v8_0_vram_gtt_location(adev, &adev->mc); 587 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
588 588
589 return 0; 589 return 0;
590} 590}
@@ -597,25 +597,45 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
597 */ 597 */
598 598
599/** 599/**
600 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback 600 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
601 * 601 *
602 * @adev: amdgpu_device pointer 602 * @adev: amdgpu_device pointer
603 * @vmid: vm instance to flush 603 * @vmid: vm instance to flush
604 * 604 *
605 * Flush the TLB for the requested page table (CIK). 605 * Flush the TLB for the requested page table (CIK).
606 */ 606 */
607static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 607static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
608 uint32_t vmid) 608 uint32_t vmid)
609{ 609{
610 /* flush hdp cache */
611 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
612
613 /* bits 0-15 are the VM contexts0-15 */ 610 /* bits 0-15 are the VM contexts0-15 */
614 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 611 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
615} 612}
616 613
614static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
615 unsigned vmid, uint64_t pd_addr)
616{
617 uint32_t reg;
618
619 if (vmid < 8)
620 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
621 else
622 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
623 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
624
625 /* bits 0-15 are the VM contexts0-15 */
626 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
627
628 return pd_addr;
629}
630
631static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
632 unsigned pasid)
633{
634 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
635}
636
617/** 637/**
618 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO 638 * gmc_v8_0_set_pte_pde - update the page tables using MMIO
619 * 639 *
620 * @adev: amdgpu_device pointer 640 * @adev: amdgpu_device pointer
621 * @cpu_pt_addr: cpu address of the page table 641 * @cpu_pt_addr: cpu address of the page table
@@ -625,11 +645,9 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
625 * 645 *
626 * Update the page tables using the CPU. 646 * Update the page tables using the CPU.
627 */ 647 */
628static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, 648static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
629 void *cpu_pt_addr, 649 uint32_t gpu_page_idx, uint64_t addr,
630 uint32_t gpu_page_idx, 650 uint64_t flags)
631 uint64_t addr,
632 uint64_t flags)
633{ 651{
634 void __iomem *ptr = (void *)cpu_pt_addr; 652 void __iomem *ptr = (void *)cpu_pt_addr;
635 uint64_t value; 653 uint64_t value;
@@ -723,9 +741,9 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
723{ 741{
724 u32 tmp; 742 u32 tmp;
725 743
726 if (enable && !adev->mc.prt_warning) { 744 if (enable && !adev->gmc.prt_warning) {
727 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); 745 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
728 adev->mc.prt_warning = true; 746 adev->gmc.prt_warning = true;
729 } 747 }
730 748
731 tmp = RREG32(mmVM_PRT_CNTL); 749 tmp = RREG32(mmVM_PRT_CNTL);
@@ -747,7 +765,8 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
747 765
748 if (enable) { 766 if (enable) {
749 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; 767 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
750 uint32_t high = adev->vm_manager.max_pfn; 768 uint32_t high = adev->vm_manager.max_pfn -
769 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
751 770
752 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); 771 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
753 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); 772 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@@ -837,11 +856,11 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
837 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); 856 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
838 WREG32(mmVM_L2_CNTL4, tmp); 857 WREG32(mmVM_L2_CNTL4, tmp);
839 /* setup context0 */ 858 /* setup context0 */
840 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); 859 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
841 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); 860 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
842 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 861 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
843 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 862 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
844 (u32)(adev->dummy_page.addr >> 12)); 863 (u32)(adev->dummy_page_addr >> 12));
845 WREG32(mmVM_CONTEXT0_CNTL2, 0); 864 WREG32(mmVM_CONTEXT0_CNTL2, 0);
846 tmp = RREG32(mmVM_CONTEXT0_CNTL); 865 tmp = RREG32(mmVM_CONTEXT0_CNTL);
847 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 866 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
@@ -871,7 +890,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
871 890
872 /* enable context1-15 */ 891 /* enable context1-15 */
873 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 892 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
874 (u32)(adev->dummy_page.addr >> 12)); 893 (u32)(adev->dummy_page_addr >> 12));
875 WREG32(mmVM_CONTEXT1_CNTL2, 4); 894 WREG32(mmVM_CONTEXT1_CNTL2, 4);
876 tmp = RREG32(mmVM_CONTEXT1_CNTL); 895 tmp = RREG32(mmVM_CONTEXT1_CNTL);
877 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 896 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
@@ -891,9 +910,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
891 else 910 else
892 gmc_v8_0_set_fault_enable_default(adev, true); 911 gmc_v8_0_set_fault_enable_default(adev, true);
893 912
894 gmc_v8_0_gart_flush_gpu_tlb(adev, 0); 913 gmc_v8_0_flush_gpu_tlb(adev, 0);
895 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 914 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
896 (unsigned)(adev->mc.gart_size >> 20), 915 (unsigned)(adev->gmc.gart_size >> 20),
897 (unsigned long long)adev->gart.table_addr); 916 (unsigned long long)adev->gart.table_addr);
898 adev->gart.ready = true; 917 adev->gart.ready = true;
899 return 0; 918 return 0;
@@ -966,21 +985,21 @@ static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
966 * 985 *
967 * Print human readable fault information (CIK). 986 * Print human readable fault information (CIK).
968 */ 987 */
969static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, 988static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
970 u32 status, u32 addr, u32 mc_client) 989 u32 addr, u32 mc_client, unsigned pasid)
971{ 990{
972 u32 mc_id;
973 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); 991 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
974 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 992 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
975 PROTECTIONS); 993 PROTECTIONS);
976 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 994 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
977 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 995 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
996 u32 mc_id;
978 997
979 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 998 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
980 MEMORY_CLIENT_ID); 999 MEMORY_CLIENT_ID);
981 1000
982 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 1001 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
983 protections, vmid, addr, 1002 protections, vmid, pasid, addr,
984 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1003 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
985 MEMORY_CLIENT_RW) ? 1004 MEMORY_CLIENT_RW) ?
986 "write" : "read", block, mc_client, mc_id); 1005 "write" : "read", block, mc_client, mc_id);
@@ -1012,16 +1031,16 @@ static int gmc_v8_0_early_init(void *handle)
1012{ 1031{
1013 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1014 1033
1015 gmc_v8_0_set_gart_funcs(adev); 1034 gmc_v8_0_set_gmc_funcs(adev);
1016 gmc_v8_0_set_irq_funcs(adev); 1035 gmc_v8_0_set_irq_funcs(adev);
1017 1036
1018 adev->mc.shared_aperture_start = 0x2000000000000000ULL; 1037 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1019 adev->mc.shared_aperture_end = 1038 adev->gmc.shared_aperture_end =
1020 adev->mc.shared_aperture_start + (4ULL << 30) - 1; 1039 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1021 adev->mc.private_aperture_start = 1040 adev->gmc.private_aperture_start =
1022 adev->mc.shared_aperture_end + 1; 1041 adev->gmc.shared_aperture_end + 1;
1023 adev->mc.private_aperture_end = 1042 adev->gmc.private_aperture_end =
1024 adev->mc.private_aperture_start + (4ULL << 30) - 1; 1043 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1025 1044
1026 return 0; 1045 return 0;
1027} 1046}
@@ -1031,7 +1050,7 @@ static int gmc_v8_0_late_init(void *handle)
1031 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032 1051
1033 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 1052 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1034 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 1053 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1035 else 1054 else
1036 return 0; 1055 return 0;
1037} 1056}
@@ -1045,7 +1064,7 @@ static int gmc_v8_0_sw_init(void *handle)
1045 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1046 1065
1047 if (adev->flags & AMD_IS_APU) { 1066 if (adev->flags & AMD_IS_APU) {
1048 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 1067 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1049 } else { 1068 } else {
1050 u32 tmp; 1069 u32 tmp;
1051 1070
@@ -1054,14 +1073,14 @@ static int gmc_v8_0_sw_init(void *handle)
1054 else 1073 else
1055 tmp = RREG32(mmMC_SEQ_MISC0); 1074 tmp = RREG32(mmMC_SEQ_MISC0);
1056 tmp &= MC_SEQ_MISC0__MT__MASK; 1075 tmp &= MC_SEQ_MISC0__MT__MASK;
1057 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); 1076 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1058 } 1077 }
1059 1078
1060 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); 1079 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
1061 if (r) 1080 if (r)
1062 return r; 1081 return r;
1063 1082
1064 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault); 1083 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
1065 if (r) 1084 if (r)
1066 return r; 1085 return r;
1067 1086
@@ -1075,9 +1094,9 @@ static int gmc_v8_0_sw_init(void *handle)
1075 * This is the max address of the GPU's 1094 * This is the max address of the GPU's
1076 * internal address space. 1095 * internal address space.
1077 */ 1096 */
1078 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 1097 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1079 1098
1080 adev->mc.stolen_size = 256 * 1024; 1099 adev->gmc.stolen_size = 256 * 1024;
1081 1100
1082 /* set DMA mask + need_dma32 flags. 1101 /* set DMA mask + need_dma32 flags.
1083 * PCIE - can handle 40-bits. 1102 * PCIE - can handle 40-bits.
@@ -1086,7 +1105,6 @@ static int gmc_v8_0_sw_init(void *handle)
1086 */ 1105 */
1087 adev->need_dma32 = false; 1106 adev->need_dma32 = false;
1088 dma_bits = adev->need_dma32 ? 32 : 40; 1107 dma_bits = adev->need_dma32 ? 32 : 40;
1089 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1090 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); 1108 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1091 if (r) { 1109 if (r) {
1092 adev->need_dma32 = true; 1110 adev->need_dma32 = true;
@@ -1149,8 +1167,8 @@ static int gmc_v8_0_sw_fini(void *handle)
1149 amdgpu_vm_manager_fini(adev); 1167 amdgpu_vm_manager_fini(adev);
1150 gmc_v8_0_gart_fini(adev); 1168 gmc_v8_0_gart_fini(adev);
1151 amdgpu_bo_fini(adev); 1169 amdgpu_bo_fini(adev);
1152 release_firmware(adev->mc.fw); 1170 release_firmware(adev->gmc.fw);
1153 adev->mc.fw = NULL; 1171 adev->gmc.fw = NULL;
1154 1172
1155 return 0; 1173 return 0;
1156} 1174}
@@ -1191,7 +1209,7 @@ static int gmc_v8_0_hw_fini(void *handle)
1191{ 1209{
1192 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1193 1211
1194 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); 1212 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1195 gmc_v8_0_gart_disable(adev); 1213 gmc_v8_0_gart_disable(adev);
1196 1214
1197 return 0; 1215 return 0;
@@ -1271,10 +1289,10 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
1271 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1289 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1272 } 1290 }
1273 if (srbm_soft_reset) { 1291 if (srbm_soft_reset) {
1274 adev->mc.srbm_soft_reset = srbm_soft_reset; 1292 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1275 return true; 1293 return true;
1276 } else { 1294 } else {
1277 adev->mc.srbm_soft_reset = 0; 1295 adev->gmc.srbm_soft_reset = 0;
1278 return false; 1296 return false;
1279 } 1297 }
1280} 1298}
@@ -1283,7 +1301,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
1283{ 1301{
1284 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1285 1303
1286 if (!adev->mc.srbm_soft_reset) 1304 if (!adev->gmc.srbm_soft_reset)
1287 return 0; 1305 return 0;
1288 1306
1289 gmc_v8_0_mc_stop(adev); 1307 gmc_v8_0_mc_stop(adev);
@@ -1299,9 +1317,9 @@ static int gmc_v8_0_soft_reset(void *handle)
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300 u32 srbm_soft_reset; 1318 u32 srbm_soft_reset;
1301 1319
1302 if (!adev->mc.srbm_soft_reset) 1320 if (!adev->gmc.srbm_soft_reset)
1303 return 0; 1321 return 0;
1304 srbm_soft_reset = adev->mc.srbm_soft_reset; 1322 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1305 1323
1306 if (srbm_soft_reset) { 1324 if (srbm_soft_reset) {
1307 u32 tmp; 1325 u32 tmp;
@@ -1329,7 +1347,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
1329{ 1347{
1330 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1331 1349
1332 if (!adev->mc.srbm_soft_reset) 1350 if (!adev->gmc.srbm_soft_reset)
1333 return 0; 1351 return 0;
1334 1352
1335 gmc_v8_0_mc_resume(adev); 1353 gmc_v8_0_mc_resume(adev);
@@ -1410,7 +1428,8 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1410 addr); 1428 addr);
1411 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1429 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1412 status); 1430 status);
1413 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); 1431 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1432 entry->pasid);
1414 } 1433 }
1415 1434
1416 return 0; 1435 return 0;
@@ -1642,9 +1661,11 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1642 .get_clockgating_state = gmc_v8_0_get_clockgating_state, 1661 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1643}; 1662};
1644 1663
1645static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { 1664static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1646 .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, 1665 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1647 .set_pte_pde = gmc_v8_0_gart_set_pte_pde, 1666 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1667 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1668 .set_pte_pde = gmc_v8_0_set_pte_pde,
1648 .set_prt = gmc_v8_0_set_prt, 1669 .set_prt = gmc_v8_0_set_prt,
1649 .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, 1670 .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
1650 .get_vm_pde = gmc_v8_0_get_vm_pde 1671 .get_vm_pde = gmc_v8_0_get_vm_pde
@@ -1655,16 +1676,16 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1655 .process = gmc_v8_0_process_interrupt, 1676 .process = gmc_v8_0_process_interrupt,
1656}; 1677};
1657 1678
1658static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) 1679static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1659{ 1680{
1660 if (adev->gart.gart_funcs == NULL) 1681 if (adev->gmc.gmc_funcs == NULL)
1661 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; 1682 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1662} 1683}
1663 1684
1664static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) 1685static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1665{ 1686{
1666 adev->mc.vm_fault.num_types = 1; 1687 adev->gmc.vm_fault.num_types = 1;
1667 adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; 1688 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1668} 1689}
1669 1690
1670const struct amdgpu_ip_block_version gmc_v8_0_ip_block = 1691const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 100ec69f020a..a70cbc45c4c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -34,6 +34,7 @@
34#include "vega10_enum.h" 34#include "vega10_enum.h"
35#include "mmhub/mmhub_1_0_offset.h" 35#include "mmhub/mmhub_1_0_offset.h"
36#include "athub/athub_1_0_offset.h" 36#include "athub/athub_1_0_offset.h"
37#include "oss/osssys_4_0_offset.h"
37 38
38#include "soc15.h" 39#include "soc15.h"
39#include "soc15_common.h" 40#include "soc15_common.h"
@@ -263,10 +264,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
263 264
264 if (printk_ratelimit()) { 265 if (printk_ratelimit()) {
265 dev_err(adev->dev, 266 dev_err(adev->dev,
266 "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n", 267 "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
267 entry->vmid_src ? "mmhub" : "gfxhub", 268 entry->vmid_src ? "mmhub" : "gfxhub",
268 entry->src_id, entry->ring_id, entry->vmid, 269 entry->src_id, entry->ring_id, entry->vmid,
269 entry->pas_id); 270 entry->pasid);
270 dev_err(adev->dev, " at page 0x%016llx from %d\n", 271 dev_err(adev->dev, " at page 0x%016llx from %d\n",
271 addr, entry->client_id); 272 addr, entry->client_id);
272 if (!amdgpu_sriov_vf(adev)) 273 if (!amdgpu_sriov_vf(adev))
@@ -285,8 +286,8 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
285 286
286static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) 287static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
287{ 288{
288 adev->mc.vm_fault.num_types = 1; 289 adev->gmc.vm_fault.num_types = 1;
289 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 290 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
290} 291}
291 292
292static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) 293static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
@@ -316,24 +317,21 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
316 */ 317 */
317 318
318/** 319/**
319 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback 320 * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
320 * 321 *
321 * @adev: amdgpu_device pointer 322 * @adev: amdgpu_device pointer
322 * @vmid: vm instance to flush 323 * @vmid: vm instance to flush
323 * 324 *
324 * Flush the TLB for the requested page table. 325 * Flush the TLB for the requested page table.
325 */ 326 */
326static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 327static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
327 uint32_t vmid) 328 uint32_t vmid)
328{ 329{
329 /* Use register 17 for GART */ 330 /* Use register 17 for GART */
330 const unsigned eng = 17; 331 const unsigned eng = 17;
331 unsigned i, j; 332 unsigned i, j;
332 333
333 /* flush hdp cache */ 334 spin_lock(&adev->gmc.invalidate_lock);
334 adev->nbio_funcs->hdp_flush(adev);
335
336 spin_lock(&adev->mc.invalidate_lock);
337 335
338 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 336 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
339 struct amdgpu_vmhub *hub = &adev->vmhub[i]; 337 struct amdgpu_vmhub *hub = &adev->vmhub[i];
@@ -366,11 +364,52 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
366 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 364 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
367 } 365 }
368 366
369 spin_unlock(&adev->mc.invalidate_lock); 367 spin_unlock(&adev->gmc.invalidate_lock);
368}
369
370static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
371 unsigned vmid, uint64_t pd_addr)
372{
373 struct amdgpu_device *adev = ring->adev;
374 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
375 uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
376 uint64_t flags = AMDGPU_PTE_VALID;
377 unsigned eng = ring->vm_inv_eng;
378
379 amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
380 pd_addr |= flags;
381
382 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
383 lower_32_bits(pd_addr));
384
385 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
386 upper_32_bits(pd_addr));
387
388 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
389
390 /* wait for the invalidate to complete */
391 amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
392 1 << vmid, 1 << vmid);
393
394 return pd_addr;
395}
396
397static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
398 unsigned pasid)
399{
400 struct amdgpu_device *adev = ring->adev;
401 uint32_t reg;
402
403 if (ring->funcs->vmhub == AMDGPU_GFXHUB)
404 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
405 else
406 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
407
408 amdgpu_ring_emit_wreg(ring, reg, pasid);
370} 409}
371 410
372/** 411/**
373 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO 412 * gmc_v9_0_set_pte_pde - update the page tables using MMIO
374 * 413 *
375 * @adev: amdgpu_device pointer 414 * @adev: amdgpu_device pointer
376 * @cpu_pt_addr: cpu address of the page table 415 * @cpu_pt_addr: cpu address of the page table
@@ -380,11 +419,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
380 * 419 *
381 * Update the page tables using the CPU. 420 * Update the page tables using the CPU.
382 */ 421 */
383static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev, 422static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
384 void *cpu_pt_addr, 423 uint32_t gpu_page_idx, uint64_t addr,
385 uint32_t gpu_page_idx, 424 uint64_t flags)
386 uint64_t addr,
387 uint64_t flags)
388{ 425{
389 void __iomem *ptr = (void *)cpu_pt_addr; 426 void __iomem *ptr = (void *)cpu_pt_addr;
390 uint64_t value; 427 uint64_t value;
@@ -475,10 +512,10 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
475{ 512{
476 if (!(*flags & AMDGPU_PDE_PTE)) 513 if (!(*flags & AMDGPU_PDE_PTE))
477 *addr = adev->vm_manager.vram_base_offset + *addr - 514 *addr = adev->vm_manager.vram_base_offset + *addr -
478 adev->mc.vram_start; 515 adev->gmc.vram_start;
479 BUG_ON(*addr & 0xFFFF00000000003FULL); 516 BUG_ON(*addr & 0xFFFF00000000003FULL);
480 517
481 if (!adev->mc.translate_further) 518 if (!adev->gmc.translate_further)
482 return; 519 return;
483 520
484 if (level == AMDGPU_VM_PDB1) { 521 if (level == AMDGPU_VM_PDB1) {
@@ -494,34 +531,35 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
494 } 531 }
495} 532}
496 533
497static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { 534static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
498 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, 535 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
499 .set_pte_pde = gmc_v9_0_gart_set_pte_pde, 536 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
500 .get_invalidate_req = gmc_v9_0_get_invalidate_req, 537 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
538 .set_pte_pde = gmc_v9_0_set_pte_pde,
501 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, 539 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
502 .get_vm_pde = gmc_v9_0_get_vm_pde 540 .get_vm_pde = gmc_v9_0_get_vm_pde
503}; 541};
504 542
505static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) 543static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
506{ 544{
507 if (adev->gart.gart_funcs == NULL) 545 if (adev->gmc.gmc_funcs == NULL)
508 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; 546 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
509} 547}
510 548
511static int gmc_v9_0_early_init(void *handle) 549static int gmc_v9_0_early_init(void *handle)
512{ 550{
513 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 551 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
514 552
515 gmc_v9_0_set_gart_funcs(adev); 553 gmc_v9_0_set_gmc_funcs(adev);
516 gmc_v9_0_set_irq_funcs(adev); 554 gmc_v9_0_set_irq_funcs(adev);
517 555
518 adev->mc.shared_aperture_start = 0x2000000000000000ULL; 556 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
519 adev->mc.shared_aperture_end = 557 adev->gmc.shared_aperture_end =
520 adev->mc.shared_aperture_start + (4ULL << 30) - 1; 558 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
521 adev->mc.private_aperture_start = 559 adev->gmc.private_aperture_start =
522 adev->mc.shared_aperture_end + 1; 560 adev->gmc.shared_aperture_end + 1;
523 adev->mc.private_aperture_end = 561 adev->gmc.private_aperture_end =
524 adev->mc.private_aperture_start + (4ULL << 30) - 1; 562 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
525 563
526 return 0; 564 return 0;
527} 565}
@@ -635,7 +673,7 @@ static int gmc_v9_0_late_init(void *handle)
635 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) 673 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
636 BUG_ON(vm_inv_eng[i] > 16); 674 BUG_ON(vm_inv_eng[i] > 16);
637 675
638 if (adev->asic_type == CHIP_VEGA10) { 676 if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
639 r = gmc_v9_0_ecc_available(adev); 677 r = gmc_v9_0_ecc_available(adev);
640 if (r == 1) { 678 if (r == 1) {
641 DRM_INFO("ECC is active.\n"); 679 DRM_INFO("ECC is active.\n");
@@ -647,16 +685,16 @@ static int gmc_v9_0_late_init(void *handle)
647 } 685 }
648 } 686 }
649 687
650 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 688 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
651} 689}
652 690
653static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, 691static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
654 struct amdgpu_mc *mc) 692 struct amdgpu_gmc *mc)
655{ 693{
656 u64 base = 0; 694 u64 base = 0;
657 if (!amdgpu_sriov_vf(adev)) 695 if (!amdgpu_sriov_vf(adev))
658 base = mmhub_v1_0_get_fb_location(adev); 696 base = mmhub_v1_0_get_fb_location(adev);
659 amdgpu_device_vram_location(adev, &adev->mc, base); 697 amdgpu_device_vram_location(adev, &adev->gmc, base);
660 amdgpu_device_gart_location(adev, mc); 698 amdgpu_device_gart_location(adev, mc);
661 /* base offset of vram pages */ 699 /* base offset of vram pages */
662 if (adev->flags & AMD_IS_APU) 700 if (adev->flags & AMD_IS_APU)
@@ -680,10 +718,14 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
680 int chansize, numchan; 718 int chansize, numchan;
681 int r; 719 int r;
682 720
683 adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); 721 if (amdgpu_emu_mode != 1)
684 if (!adev->mc.vram_width) { 722 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
723 if (!adev->gmc.vram_width) {
685 /* hbm memory channel size */ 724 /* hbm memory channel size */
686 chansize = 128; 725 if (adev->flags & AMD_IS_APU)
726 chansize = 64;
727 else
728 chansize = 128;
687 729
688 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); 730 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
689 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; 731 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
@@ -718,43 +760,49 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
718 numchan = 2; 760 numchan = 2;
719 break; 761 break;
720 } 762 }
721 adev->mc.vram_width = numchan * chansize; 763 adev->gmc.vram_width = numchan * chansize;
722 } 764 }
723 765
724 /* size in MB on si */ 766 /* size in MB on si */
725 adev->mc.mc_vram_size = 767 adev->gmc.mc_vram_size =
726 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; 768 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
727 adev->mc.real_vram_size = adev->mc.mc_vram_size; 769 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
728 770
729 if (!(adev->flags & AMD_IS_APU)) { 771 if (!(adev->flags & AMD_IS_APU)) {
730 r = amdgpu_device_resize_fb_bar(adev); 772 r = amdgpu_device_resize_fb_bar(adev);
731 if (r) 773 if (r)
732 return r; 774 return r;
733 } 775 }
734 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); 776 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
735 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); 777 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
736 778
779#ifdef CONFIG_X86_64
780 if (adev->flags & AMD_IS_APU) {
781 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
782 adev->gmc.aper_size = adev->gmc.real_vram_size;
783 }
784#endif
737 /* In case the PCI BAR is larger than the actual amount of vram */ 785 /* In case the PCI BAR is larger than the actual amount of vram */
738 adev->mc.visible_vram_size = adev->mc.aper_size; 786 adev->gmc.visible_vram_size = adev->gmc.aper_size;
739 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 787 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
740 adev->mc.visible_vram_size = adev->mc.real_vram_size; 788 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
741 789
742 /* set the gart size */ 790 /* set the gart size */
743 if (amdgpu_gart_size == -1) { 791 if (amdgpu_gart_size == -1) {
744 switch (adev->asic_type) { 792 switch (adev->asic_type) {
745 case CHIP_VEGA10: /* all engines support GPUVM */ 793 case CHIP_VEGA10: /* all engines support GPUVM */
746 default: 794 default:
747 adev->mc.gart_size = 256ULL << 20; 795 adev->gmc.gart_size = 512ULL << 20;
748 break; 796 break;
749 case CHIP_RAVEN: /* DCE SG support */ 797 case CHIP_RAVEN: /* DCE SG support */
750 adev->mc.gart_size = 1024ULL << 20; 798 adev->gmc.gart_size = 1024ULL << 20;
751 break; 799 break;
752 } 800 }
753 } else { 801 } else {
754 adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 802 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
755 } 803 }
756 804
757 gmc_v9_0_vram_gtt_location(adev, &adev->mc); 805 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
758 806
759 return 0; 807 return 0;
760} 808}
@@ -786,23 +834,21 @@ static int gmc_v9_0_sw_init(void *handle)
786 gfxhub_v1_0_init(adev); 834 gfxhub_v1_0_init(adev);
787 mmhub_v1_0_init(adev); 835 mmhub_v1_0_init(adev);
788 836
789 spin_lock_init(&adev->mc.invalidate_lock); 837 spin_lock_init(&adev->gmc.invalidate_lock);
790 838
839 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
791 switch (adev->asic_type) { 840 switch (adev->asic_type) {
792 case CHIP_RAVEN: 841 case CHIP_RAVEN:
793 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
794 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { 842 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
795 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 843 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
796 } else { 844 } else {
797 /* vm_size is 128TB + 512GB for legacy 3-level page support */ 845 /* vm_size is 128TB + 512GB for legacy 3-level page support */
798 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); 846 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
799 adev->mc.translate_further = 847 adev->gmc.translate_further =
800 adev->vm_manager.num_level > 1; 848 adev->vm_manager.num_level > 1;
801 } 849 }
802 break; 850 break;
803 case CHIP_VEGA10: 851 case CHIP_VEGA10:
804 /* XXX Don't know how to get VRAM type yet. */
805 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
806 /* 852 /*
807 * To fulfill 4-level page support, 853 * To fulfill 4-level page support,
808 * vm size is 256TB (48bit), maximum size of Vega10, 854 * vm size is 256TB (48bit), maximum size of Vega10,
@@ -815,10 +861,10 @@ static int gmc_v9_0_sw_init(void *handle)
815 } 861 }
816 862
817 /* This interrupt is VMC page fault.*/ 863 /* This interrupt is VMC page fault.*/
818 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, 864 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
819 &adev->mc.vm_fault); 865 &adev->gmc.vm_fault);
820 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0, 866 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
821 &adev->mc.vm_fault); 867 &adev->gmc.vm_fault);
822 868
823 if (r) 869 if (r)
824 return r; 870 return r;
@@ -827,13 +873,13 @@ static int gmc_v9_0_sw_init(void *handle)
827 * This is the max address of the GPU's 873 * This is the max address of the GPU's
828 * internal address space. 874 * internal address space.
829 */ 875 */
830 adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 876 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
831 877
832 /* 878 /*
833 * It needs to reserve 8M stolen memory for vega10 879 * It needs to reserve 8M stolen memory for vega10
834 * TODO: Figure out how to avoid that... 880 * TODO: Figure out how to avoid that...
835 */ 881 */
836 adev->mc.stolen_size = 8 * 1024 * 1024; 882 adev->gmc.stolen_size = 8 * 1024 * 1024;
837 883
838 /* set DMA mask + need_dma32 flags. 884 /* set DMA mask + need_dma32 flags.
839 * PCIE - can handle 44-bits. 885 * PCIE - can handle 44-bits.
@@ -975,7 +1021,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
975 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); 1021 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
976 1022
977 /* After HDP is initialized, flush HDP.*/ 1023 /* After HDP is initialized, flush HDP.*/
978 adev->nbio_funcs->hdp_flush(adev); 1024 adev->nbio_funcs->hdp_flush(adev, NULL);
979 1025
980 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 1026 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
981 value = false; 1027 value = false;
@@ -984,10 +1030,10 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
984 1030
985 gfxhub_v1_0_set_fault_enable_default(adev, value); 1031 gfxhub_v1_0_set_fault_enable_default(adev, value);
986 mmhub_v1_0_set_fault_enable_default(adev, value); 1032 mmhub_v1_0_set_fault_enable_default(adev, value);
987 gmc_v9_0_gart_flush_gpu_tlb(adev, 0); 1033 gmc_v9_0_flush_gpu_tlb(adev, 0);
988 1034
989 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1035 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
990 (unsigned)(adev->mc.gart_size >> 20), 1036 (unsigned)(adev->gmc.gart_size >> 20),
991 (unsigned long long)adev->gart.table_addr); 1037 (unsigned long long)adev->gart.table_addr);
992 adev->gart.ready = true; 1038 adev->gart.ready = true;
993 return 0; 1039 return 0;
@@ -1038,7 +1084,7 @@ static int gmc_v9_0_hw_fini(void *handle)
1038 return 0; 1084 return 0;
1039 } 1085 }
1040 1086
1041 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); 1087 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1042 gmc_v9_0_gart_disable(adev); 1088 gmc_v9_0_gart_disable(adev);
1043 1089
1044 return 0; 1090 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index c4e4be3dd31d..842c4b677b4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -111,7 +111,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
111 iceland_ih_disable_interrupts(adev); 111 iceland_ih_disable_interrupts(adev);
112 112
113 /* setup interrupt control */ 113 /* setup interrupt control */
114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); 114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL); 115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
116 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 116 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
117 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 117 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -260,7 +260,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
260 entry->src_data[0] = dw[1] & 0xfffffff; 260 entry->src_data[0] = dw[1] & 0xfffffff;
261 entry->ring_id = dw[2] & 0xff; 261 entry->ring_id = dw[2] & 0xff;
262 entry->vmid = (dw[2] >> 8) & 0xff; 262 entry->vmid = (dw[2] >> 8) & 0xff;
263 entry->pas_id = (dw[2] >> 16) & 0xffff; 263 entry->pasid = (dw[2] >> 16) & 0xffff;
264 264
265 /* wptr/rptr are in bytes! */ 265 /* wptr/rptr are in bytes! */
266 adev->irq.ih.rptr += 16; 266 adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d9e9e52a0def..81babe026529 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,6 +42,8 @@
42#define KV_MINIMUM_ENGINE_CLOCK 800 42#define KV_MINIMUM_ENGINE_CLOCK 800
43#define SMC_RAM_END 0x40000 43#define SMC_RAM_END 0x40000
44 44
45static const struct amd_pm_funcs kv_dpm_funcs;
46
45static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 47static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
46static int kv_enable_nb_dpm(struct amdgpu_device *adev, 48static int kv_enable_nb_dpm(struct amdgpu_device *adev,
47 bool enable); 49 bool enable);
@@ -2960,6 +2962,7 @@ static int kv_dpm_early_init(void *handle)
2960{ 2962{
2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962 2964
2965 adev->powerplay.pp_funcs = &kv_dpm_funcs;
2963 kv_dpm_set_irq_funcs(adev); 2966 kv_dpm_set_irq_funcs(adev);
2964 2967
2965 return 0; 2968 return 0;
@@ -3301,7 +3304,7 @@ static int kv_dpm_read_sensor(void *handle, int idx,
3301 } 3304 }
3302} 3305}
3303 3306
3304const struct amd_ip_funcs kv_dpm_ip_funcs = { 3307static const struct amd_ip_funcs kv_dpm_ip_funcs = {
3305 .name = "kv_dpm", 3308 .name = "kv_dpm",
3306 .early_init = kv_dpm_early_init, 3309 .early_init = kv_dpm_early_init,
3307 .late_init = kv_dpm_late_init, 3310 .late_init = kv_dpm_late_init,
@@ -3318,8 +3321,16 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
3318 .set_powergating_state = kv_dpm_set_powergating_state, 3321 .set_powergating_state = kv_dpm_set_powergating_state,
3319}; 3322};
3320 3323
3321const struct amd_pm_funcs kv_dpm_funcs = { 3324const struct amdgpu_ip_block_version kv_smu_ip_block =
3322 .get_temperature = &kv_dpm_get_temp, 3325{
3326 .type = AMD_IP_BLOCK_TYPE_SMC,
3327 .major = 1,
3328 .minor = 0,
3329 .rev = 0,
3330 .funcs = &kv_dpm_ip_funcs,
3331};
3332
3333static const struct amd_pm_funcs kv_dpm_funcs = {
3323 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3334 .pre_set_power_state = &kv_dpm_pre_set_power_state,
3324 .set_power_state = &kv_dpm_set_power_state, 3335 .set_power_state = &kv_dpm_set_power_state,
3325 .post_set_power_state = &kv_dpm_post_set_power_state, 3336 .post_set_power_state = &kv_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index ffd5b7ee49c4..3dd5816495a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -50,7 +50,7 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
50 uint64_t value; 50 uint64_t value;
51 51
52 BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); 52 BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
53 value = adev->gart.table_addr - adev->mc.vram_start + 53 value = adev->gart.table_addr - adev->gmc.vram_start +
54 adev->vm_manager.vram_base_offset; 54 adev->vm_manager.vram_base_offset;
55 value &= 0x0000FFFFFFFFF000ULL; 55 value &= 0x0000FFFFFFFFF000ULL;
56 value |= 0x1; /* valid bit */ 56 value |= 0x1; /* valid bit */
@@ -67,14 +67,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
67 mmhub_v1_0_init_gart_pt_regs(adev); 67 mmhub_v1_0_init_gart_pt_regs(adev);
68 68
69 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 69 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
70 (u32)(adev->mc.gart_start >> 12)); 70 (u32)(adev->gmc.gart_start >> 12));
71 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 71 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
72 (u32)(adev->mc.gart_start >> 44)); 72 (u32)(adev->gmc.gart_start >> 44));
73 73
74 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 74 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
75 (u32)(adev->mc.gart_end >> 12)); 75 (u32)(adev->gmc.gart_end >> 12));
76 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 76 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
77 (u32)(adev->mc.gart_end >> 44)); 77 (u32)(adev->gmc.gart_end >> 44));
78} 78}
79 79
80static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) 80static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -89,12 +89,12 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
89 89
90 /* Program the system aperture low logical page number. */ 90 /* Program the system aperture low logical page number. */
91 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 91 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
92 adev->mc.vram_start >> 18); 92 adev->gmc.vram_start >> 18);
93 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 93 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
94 adev->mc.vram_end >> 18); 94 adev->gmc.vram_end >> 18);
95 95
96 /* Set default page address. */ 96 /* Set default page address. */
97 value = adev->vram_scratch.gpu_addr - adev->mc.vram_start + 97 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
98 adev->vm_manager.vram_base_offset; 98 adev->vm_manager.vram_base_offset;
99 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 99 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
100 (u32)(value >> 12)); 100 (u32)(value >> 12));
@@ -103,9 +103,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
103 103
104 /* Program "protection fault". */ 104 /* Program "protection fault". */
105 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 105 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
106 (u32)(adev->dummy_page.addr >> 12)); 106 (u32)(adev->dummy_page_addr >> 12));
107 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 107 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
108 (u32)((u64)adev->dummy_page.addr >> 44)); 108 (u32)((u64)adev->dummy_page_addr >> 44));
109 109
110 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2); 110 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
111 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 111 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
@@ -155,7 +155,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
155 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 155 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
156 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); 156 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
157 157
158 if (adev->mc.translate_further) { 158 if (adev->gmc.translate_further) {
159 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); 159 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
160 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 160 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
161 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 161 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
@@ -207,7 +207,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
207 207
208 num_level = adev->vm_manager.num_level; 208 num_level = adev->vm_manager.num_level;
209 block_size = adev->vm_manager.block_size; 209 block_size = adev->vm_manager.block_size;
210 if (adev->mc.translate_further) 210 if (adev->gmc.translate_further)
211 num_level -= 1; 211 num_level -= 1;
212 else 212 else
213 block_size -= 9; 213 block_size -= 9;
@@ -272,21 +272,21 @@ static const struct pctl_data pctl0_data[] = {
272 {0x11, 0x6a684}, 272 {0x11, 0x6a684},
273 {0x19, 0xea68e}, 273 {0x19, 0xea68e},
274 {0x29, 0xa69e}, 274 {0x29, 0xa69e},
275 {0x2b, 0x34a6c0}, 275 {0x2b, 0x0010a6c0},
276 {0x61, 0x83a707}, 276 {0x3d, 0x83a707},
277 {0xe6, 0x8a7a4}, 277 {0xc2, 0x8a7a4},
278 {0xf0, 0x1a7b8}, 278 {0xcc, 0x1a7b8},
279 {0xf3, 0xfa7cc}, 279 {0xcf, 0xfa7cc},
280 {0x104, 0x17a7dd}, 280 {0xe0, 0x17a7dd},
281 {0x11d, 0xa7dc}, 281 {0xf9, 0xa7dc},
282 {0x11f, 0x12a7f5}, 282 {0xfb, 0x12a7f5},
283 {0x133, 0xa808}, 283 {0x10f, 0xa808},
284 {0x135, 0x12a810}, 284 {0x111, 0x12a810},
285 {0x149, 0x7a82c} 285 {0x125, 0x7a82c}
286}; 286};
287#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data)) 287#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
288 288
289#define PCTL0_RENG_EXEC_END_PTR 0x151 289#define PCTL0_RENG_EXEC_END_PTR 0x12d
290#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 290#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
291#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 291#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
292 292
@@ -385,10 +385,9 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
385 if (amdgpu_sriov_vf(adev)) 385 if (amdgpu_sriov_vf(adev))
386 return; 386 return;
387 387
388 /****************** pctl0 **********************/
388 pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC); 389 pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC);
389 pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE); 390 pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
390 pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
391 pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
392 391
393 /* Light sleep must be disabled before writing to pctl0 registers */ 392 /* Light sleep must be disabled before writing to pctl0 registers */
394 pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK; 393 pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
@@ -402,12 +401,13 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
402 pctl0_data[i].data); 401 pctl0_data[i].data);
403 } 402 }
404 403
405 /* Set the reng execute end ptr for pctl0 */ 404 /* Re-enable light sleep */
406 pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, 405 pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
407 PCTL0_RENG_EXECUTE, 406 WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
408 RENG_EXECUTE_END_PTR, 407
409 PCTL0_RENG_EXEC_END_PTR); 408 /****************** pctl1 **********************/
410 WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); 409 pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
410 pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
411 411
412 /* Light sleep must be disabled before writing to pctl1 registers */ 412 /* Light sleep must be disabled before writing to pctl1 registers */
413 pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK; 413 pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
@@ -421,20 +421,25 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
421 pctl1_data[i].data); 421 pctl1_data[i].data);
422 } 422 }
423 423
424 /* Re-enable light sleep */
425 pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
426 WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
427
428 mmhub_v1_0_power_gating_write_save_ranges(adev);
429
430 /* Set the reng execute end ptr for pctl0 */
431 pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
432 PCTL0_RENG_EXECUTE,
433 RENG_EXECUTE_END_PTR,
434 PCTL0_RENG_EXEC_END_PTR);
435 WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
436
424 /* Set the reng execute end ptr for pctl1 */ 437 /* Set the reng execute end ptr for pctl1 */
425 pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, 438 pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
426 PCTL1_RENG_EXECUTE, 439 PCTL1_RENG_EXECUTE,
427 RENG_EXECUTE_END_PTR, 440 RENG_EXECUTE_END_PTR,
428 PCTL1_RENG_EXEC_END_PTR); 441 PCTL1_RENG_EXEC_END_PTR);
429 WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); 442 WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
430
431 mmhub_v1_0_power_gating_write_save_ranges(adev);
432
433 /* Re-enable light sleep */
434 pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
435 WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
436 pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
437 WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
438} 443}
439 444
440void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, 445void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
@@ -466,6 +471,9 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
466 RENG_EXECUTE_ON_REG_UPDATE, 1); 471 RENG_EXECUTE_ON_REG_UPDATE, 1);
467 WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); 472 WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
468 473
474 if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
475 amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
476
469 } else { 477 } else {
470 pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, 478 pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
471 PCTL0_RENG_EXECUTE, 479 PCTL0_RENG_EXECUTE,
@@ -494,9 +502,9 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
494 * SRIOV driver need to program them 502 * SRIOV driver need to program them
495 */ 503 */
496 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE, 504 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
497 adev->mc.vram_start >> 24); 505 adev->gmc.vram_start >> 24);
498 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP, 506 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
499 adev->mc.vram_end >> 24); 507 adev->gmc.vram_end >> 24);
500 } 508 }
501 509
502 /* GART Enable. */ 510 /* GART Enable. */
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 271452d3999a..8fb933c62cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -33,56 +33,34 @@
33 33
34static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 34static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
35{ 35{
36 u32 reg; 36 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37 int timeout = AI_MAILBOX_TIMEDOUT;
38 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
39
40 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
41 mmBIF_BX_PF0_MAILBOX_CONTROL));
42 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
43 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
44 mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
45
46 /*Wait for RCV_MSG_VALID to be 0*/
47 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
48 mmBIF_BX_PF0_MAILBOX_CONTROL));
49 while (reg & mask) {
50 if (timeout <= 0) {
51 pr_err("RCV_MSG_VALID is not cleared\n");
52 break;
53 }
54 mdelay(1);
55 timeout -=1;
56
57 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
58 mmBIF_BX_PF0_MAILBOX_CONTROL));
59 }
60} 37}
61 38
62static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 39static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
63{ 40{
64 u32 reg; 41 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42}
65 43
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 44/*
67 mmBIF_BX_PF0_MAILBOX_CONTROL)); 45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
68 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, 46 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
69 TRN_MSG_VALID, val ? 1 : 0); 47 * by host.
70 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), 48 *
71 reg); 49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
52 */
53static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
54{
55 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
56 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
72} 57}
73 58
59
74static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 60static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
75 enum idh_event event) 61 enum idh_event event)
76{ 62{
77 u32 reg; 63 u32 reg;
78 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
79
80 if (event != IDH_FLR_NOTIFICATION_CMPL) {
81 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
82 mmBIF_BX_PF0_MAILBOX_CONTROL));
83 if (!(reg & mask))
84 return -ENOENT;
85 }
86 64
87 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 65 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
88 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 66 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
@@ -94,54 +72,67 @@ static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
94 return 0; 72 return 0;
95} 73}
96 74
75static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
76 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
77}
78
97static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 79static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
98{ 80{
99 int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 81 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
100 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); 82 u8 reg;
101 u32 reg; 83
84 do {
85 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
86 if (reg & 2)
87 return 0;
102 88
103 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
104 mmBIF_BX_PF0_MAILBOX_CONTROL));
105 while (!(reg & mask)) {
106 if (timeout <= 0) {
107 pr_err("Doesn't get ack from pf.\n");
108 r = -ETIME;
109 break;
110 }
111 mdelay(5); 89 mdelay(5);
112 timeout -= 5; 90 timeout -= 5;
91 } while (timeout > 1);
113 92
114 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 93 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
115 mmBIF_BX_PF0_MAILBOX_CONTROL));
116 }
117 94
118 return r; 95 return -ETIME;
119} 96}
120 97
121static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 98static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
122{ 99{
123 int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 100 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
124
125 r = xgpu_ai_mailbox_rcv_msg(adev, event);
126 while (r) {
127 if (timeout <= 0) {
128 pr_err("Doesn't get msg:%d from pf.\n", event);
129 r = -ETIME;
130 break;
131 }
132 mdelay(5);
133 timeout -= 5;
134 101
102 do {
135 r = xgpu_ai_mailbox_rcv_msg(adev, event); 103 r = xgpu_ai_mailbox_rcv_msg(adev, event);
136 } 104 if (!r)
105 return 0;
137 106
138 return r; 107 msleep(10);
108 timeout -= 10;
109 } while (timeout > 1);
110
111 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
112
113 return -ETIME;
139} 114}
140 115
141static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 116static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
142 enum idh_request req, u32 data1, u32 data2, u32 data3) { 117 enum idh_request req, u32 data1, u32 data2, u32 data3) {
143 u32 reg; 118 u32 reg;
144 int r; 119 int r;
120 uint8_t trn;
121
122 /* IMPORTANT:
123 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
124 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
125 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
126 * will return immediatly
127 */
128 do {
129 xgpu_ai_mailbox_set_valid(adev, false);
130 trn = xgpu_ai_peek_ack(adev);
131 if (trn) {
132 pr_err("trn=%x ACK should not asssert! wait again !\n", trn);
133 msleep(1);
134 }
135 } while(trn);
145 136
146 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 137 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
147 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 138 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
@@ -245,15 +236,36 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
245{ 236{
246 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 237 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
247 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 238 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
248 239 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
249 /* wait until RCV_MSG become 3 */ 240 int locked;
250 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { 241
251 pr_err("failed to recieve FLR_CMPL\n"); 242 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
252 return; 243 * otherwise the mailbox msg will be ruined/reseted by
253 } 244 * the VF FLR.
254 245 *
255 /* Trigger recovery due to world switch failure */ 246 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
256 amdgpu_device_gpu_recover(adev, NULL, false); 247 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
248 * which means host side had finished this VF's FLR.
249 */
250 locked = mutex_trylock(&adev->lock_reset);
251 if (locked)
252 adev->in_gpu_reset = 1;
253
254 do {
255 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
256 goto flr_done;
257
258 msleep(10);
259 timeout -= 10;
260 } while (timeout > 1);
261
262flr_done:
263 if (locked)
264 mutex_unlock(&adev->lock_reset);
265
266 /* Trigger recovery for world switch failure if no TDR */
267 if (amdgpu_lockup_timeout == 0)
268 amdgpu_device_gpu_recover(adev, NULL, true);
257} 269}
258 270
259static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 271static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -274,24 +286,22 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
274 struct amdgpu_irq_src *source, 286 struct amdgpu_irq_src *source,
275 struct amdgpu_iv_entry *entry) 287 struct amdgpu_iv_entry *entry)
276{ 288{
277 int r; 289 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
278 290
279 /* trigger gpu-reset by hypervisor only if TDR disbaled */ 291 switch (event) {
280 if (!amdgpu_gpu_recovery) { 292 case IDH_FLR_NOTIFICATION:
281 /* see what event we get */ 293 if (amdgpu_sriov_runtime(adev))
282 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); 294 schedule_work(&adev->virt.flr_work);
283 295 break;
284 /* sometimes the interrupt is delayed to inject to VM, so under such case 296 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
285 * the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus 297 * it byfar since that polling thread will handle it,
286 * above recieve message could be failed, we should schedule the flr_work 298 * other msg like flr complete is not handled here.
287 * anyway
288 */ 299 */
289 if (r) { 300 case IDH_CLR_MSG_BUF:
290 DRM_ERROR("FLR_NOTIFICATION is missed\n"); 301 case IDH_FLR_NOTIFICATION_CMPL:
291 xgpu_ai_mailbox_send_ack(adev); 302 case IDH_READY_TO_ACCESS_GPU:
292 } 303 default:
293 304 break;
294 schedule_work(&adev->virt.flr_work);
295 } 305 }
296 306
297 return 0; 307 return 0;
@@ -319,11 +329,11 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
319{ 329{
320 int r; 330 int r;
321 331
322 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 332 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
323 if (r) 333 if (r)
324 return r; 334 return r;
325 335
326 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 336 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
327 if (r) { 337 if (r) {
328 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 338 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
329 return r; 339 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 67e78576a9eb..b4a9ceea334b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,9 @@
24#ifndef __MXGPU_AI_H__ 24#ifndef __MXGPU_AI_H__
25#define __MXGPU_AI_H__ 25#define __MXGPU_AI_H__
26 26
27#define AI_MAILBOX_TIMEDOUT 12000 27#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
28#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
29#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
28 30
29enum idh_request { 31enum idh_request {
30 IDH_REQ_GPU_INIT_ACCESS = 1, 32 IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -51,4 +53,7 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
51int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev); 53int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
52void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev); 54void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
53 55
56#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4
57#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1
58
54#endif 59#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index d4da663d5eb0..1cf34248dff4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -53,9 +53,16 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
53 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); 53 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
54} 54}
55 55
56static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev) 56static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
57 struct amdgpu_ring *ring)
57{ 58{
58 WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); 59 if (!ring || !ring->funcs->emit_wreg)
60 WREG32_SOC15_NO_KIQ(NBIO, 0,
61 mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
62 0);
63 else
64 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
65 NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
59} 66}
60 67
61static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) 68static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
@@ -126,7 +133,7 @@ static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
126 u32 interrupt_cntl; 133 u32 interrupt_cntl;
127 134
128 /* setup interrupt control */ 135 /* setup interrupt control */
129 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); 136 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
130 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL); 137 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
131 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 138 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
132 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 139 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 17a9131a4598..df34dc79d444 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -53,9 +53,14 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
53 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); 53 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
54} 54}
55 55
56static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev) 56static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
57 struct amdgpu_ring *ring)
57{ 58{
58 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); 59 if (!ring || !ring->funcs->emit_wreg)
60 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
61 else
62 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
63 NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
59} 64}
60 65
61static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) 66static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
@@ -203,7 +208,7 @@ static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
203 u32 interrupt_cntl; 208 u32 interrupt_cntl;
204 209
205 /* setup interrupt control */ 210 /* setup interrupt control */
206 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); 211 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
207 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL); 212 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
208 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 213 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
209 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 214 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 5a9fe24697f9..8873d833a7f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -87,7 +87,7 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
87 return 0; 87 return 0;
88} 88}
89 89
90int psp_v10_0_init_microcode(struct psp_context *psp) 90static int psp_v10_0_init_microcode(struct psp_context *psp)
91{ 91{
92 struct amdgpu_device *adev = psp->adev; 92 struct amdgpu_device *adev = psp->adev;
93 const char *chip_name; 93 const char *chip_name;
@@ -133,7 +133,8 @@ out:
133 return err; 133 return err;
134} 134}
135 135
136int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd) 136static int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
137 struct psp_gfx_cmd_resp *cmd)
137{ 138{
138 int ret; 139 int ret;
139 uint64_t fw_mem_mc_addr = ucode->mc_addr; 140 uint64_t fw_mem_mc_addr = ucode->mc_addr;
@@ -152,7 +153,8 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
152 return ret; 153 return ret;
153} 154}
154 155
155int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) 156static int psp_v10_0_ring_init(struct psp_context *psp,
157 enum psp_ring_type ring_type)
156{ 158{
157 int ret = 0; 159 int ret = 0;
158 struct psp_ring *ring; 160 struct psp_ring *ring;
@@ -177,7 +179,8 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
177 return 0; 179 return 0;
178} 180}
179 181
180int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) 182static int psp_v10_0_ring_create(struct psp_context *psp,
183 enum psp_ring_type ring_type)
181{ 184{
182 int ret = 0; 185 int ret = 0;
183 unsigned int psp_ring_reg = 0; 186 unsigned int psp_ring_reg = 0;
@@ -208,7 +211,8 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
208 return ret; 211 return ret;
209} 212}
210 213
211int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) 214static int psp_v10_0_ring_stop(struct psp_context *psp,
215 enum psp_ring_type ring_type)
212{ 216{
213 int ret = 0; 217 int ret = 0;
214 struct psp_ring *ring; 218 struct psp_ring *ring;
@@ -231,7 +235,8 @@ int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
231 return ret; 235 return ret;
232} 236}
233 237
234int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) 238static int psp_v10_0_ring_destroy(struct psp_context *psp,
239 enum psp_ring_type ring_type)
235{ 240{
236 int ret = 0; 241 int ret = 0;
237 struct psp_ring *ring = &psp->km_ring; 242 struct psp_ring *ring = &psp->km_ring;
@@ -248,10 +253,10 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type
248 return ret; 253 return ret;
249} 254}
250 255
251int psp_v10_0_cmd_submit(struct psp_context *psp, 256static int psp_v10_0_cmd_submit(struct psp_context *psp,
252 struct amdgpu_firmware_info *ucode, 257 struct amdgpu_firmware_info *ucode,
253 uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, 258 uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
254 int index) 259 int index)
255{ 260{
256 unsigned int psp_write_ptr_reg = 0; 261 unsigned int psp_write_ptr_reg = 0;
257 struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; 262 struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
@@ -298,9 +303,9 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
298 303
299static int 304static int
300psp_v10_0_sram_map(struct amdgpu_device *adev, 305psp_v10_0_sram_map(struct amdgpu_device *adev,
301 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, 306 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
302 unsigned int *sram_data_reg_offset, 307 unsigned int *sram_data_reg_offset,
303 enum AMDGPU_UCODE_ID ucode_id) 308 enum AMDGPU_UCODE_ID ucode_id)
304{ 309{
305 int ret = 0; 310 int ret = 0;
306 311
@@ -383,9 +388,9 @@ psp_v10_0_sram_map(struct amdgpu_device *adev,
383 return ret; 388 return ret;
384} 389}
385 390
386bool psp_v10_0_compare_sram_data(struct psp_context *psp, 391static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
387 struct amdgpu_firmware_info *ucode, 392 struct amdgpu_firmware_info *ucode,
388 enum AMDGPU_UCODE_ID ucode_type) 393 enum AMDGPU_UCODE_ID ucode_type)
389{ 394{
390 int err = 0; 395 int err = 0;
391 unsigned int fw_sram_reg_val = 0; 396 unsigned int fw_sram_reg_val = 0;
@@ -419,8 +424,25 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
419} 424}
420 425
421 426
422int psp_v10_0_mode1_reset(struct psp_context *psp) 427static int psp_v10_0_mode1_reset(struct psp_context *psp)
423{ 428{
424 DRM_INFO("psp mode 1 reset not supported now! \n"); 429 DRM_INFO("psp mode 1 reset not supported now! \n");
425 return -EINVAL; 430 return -EINVAL;
426} 431}
432
433static const struct psp_funcs psp_v10_0_funcs = {
434 .init_microcode = psp_v10_0_init_microcode,
435 .prep_cmd_buf = psp_v10_0_prep_cmd_buf,
436 .ring_init = psp_v10_0_ring_init,
437 .ring_create = psp_v10_0_ring_create,
438 .ring_stop = psp_v10_0_ring_stop,
439 .ring_destroy = psp_v10_0_ring_destroy,
440 .cmd_submit = psp_v10_0_cmd_submit,
441 .compare_sram_data = psp_v10_0_compare_sram_data,
442 .mode1_reset = psp_v10_0_mode1_reset,
443};
444
445void psp_v10_0_set_psp_funcs(struct psp_context *psp)
446{
447 psp->funcs = &psp_v10_0_funcs;
448}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
index 451e8308303f..20c2a94859d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
@@ -27,24 +27,6 @@
27 27
28#include "amdgpu_psp.h" 28#include "amdgpu_psp.h"
29 29
30extern int psp_v10_0_init_microcode(struct psp_context *psp); 30void psp_v10_0_set_psp_funcs(struct psp_context *psp);
31extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
32 struct psp_gfx_cmd_resp *cmd);
33extern int psp_v10_0_ring_init(struct psp_context *psp,
34 enum psp_ring_type ring_type);
35extern int psp_v10_0_ring_create(struct psp_context *psp,
36 enum psp_ring_type ring_type);
37extern int psp_v10_0_ring_stop(struct psp_context *psp,
38 enum psp_ring_type ring_type);
39extern int psp_v10_0_ring_destroy(struct psp_context *psp,
40 enum psp_ring_type ring_type);
41extern int psp_v10_0_cmd_submit(struct psp_context *psp,
42 struct amdgpu_firmware_info *ucode,
43 uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
44 int index);
45extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
46 struct amdgpu_firmware_info *ucode,
47 enum AMDGPU_UCODE_ID ucode_type);
48 31
49extern int psp_v10_0_mode1_reset(struct psp_context *psp);
50#endif 32#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 19bd1934e63d..690b9766d8ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -93,7 +93,7 @@ psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *t
93 return 0; 93 return 0;
94} 94}
95 95
96int psp_v3_1_init_microcode(struct psp_context *psp) 96static int psp_v3_1_init_microcode(struct psp_context *psp)
97{ 97{
98 struct amdgpu_device *adev = psp->adev; 98 struct amdgpu_device *adev = psp->adev;
99 const char *chip_name; 99 const char *chip_name;
@@ -161,7 +161,7 @@ out:
161 return err; 161 return err;
162} 162}
163 163
164int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) 164static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
165{ 165{
166 int ret; 166 int ret;
167 uint32_t psp_gfxdrv_command_reg = 0; 167 uint32_t psp_gfxdrv_command_reg = 0;
@@ -202,7 +202,7 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
202 return ret; 202 return ret;
203} 203}
204 204
205int psp_v3_1_bootloader_load_sos(struct psp_context *psp) 205static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
206{ 206{
207 int ret; 207 int ret;
208 unsigned int psp_gfxdrv_command_reg = 0; 208 unsigned int psp_gfxdrv_command_reg = 0;
@@ -243,7 +243,8 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
243 return ret; 243 return ret;
244} 244}
245 245
246int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd) 246static int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
247 struct psp_gfx_cmd_resp *cmd)
247{ 248{
248 int ret; 249 int ret;
249 uint64_t fw_mem_mc_addr = ucode->mc_addr; 250 uint64_t fw_mem_mc_addr = ucode->mc_addr;
@@ -262,7 +263,8 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd
262 return ret; 263 return ret;
263} 264}
264 265
265int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) 266static int psp_v3_1_ring_init(struct psp_context *psp,
267 enum psp_ring_type ring_type)
266{ 268{
267 int ret = 0; 269 int ret = 0;
268 struct psp_ring *ring; 270 struct psp_ring *ring;
@@ -287,7 +289,8 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
287 return 0; 289 return 0;
288} 290}
289 291
290int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) 292static int psp_v3_1_ring_create(struct psp_context *psp,
293 enum psp_ring_type ring_type)
291{ 294{
292 int ret = 0; 295 int ret = 0;
293 unsigned int psp_ring_reg = 0; 296 unsigned int psp_ring_reg = 0;
@@ -318,7 +321,8 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
318 return ret; 321 return ret;
319} 322}
320 323
321int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) 324static int psp_v3_1_ring_stop(struct psp_context *psp,
325 enum psp_ring_type ring_type)
322{ 326{
323 int ret = 0; 327 int ret = 0;
324 struct psp_ring *ring; 328 struct psp_ring *ring;
@@ -341,7 +345,8 @@ int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
341 return ret; 345 return ret;
342} 346}
343 347
344int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) 348static int psp_v3_1_ring_destroy(struct psp_context *psp,
349 enum psp_ring_type ring_type)
345{ 350{
346 int ret = 0; 351 int ret = 0;
347 struct psp_ring *ring = &psp->km_ring; 352 struct psp_ring *ring = &psp->km_ring;
@@ -358,10 +363,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
358 return ret; 363 return ret;
359} 364}
360 365
361int psp_v3_1_cmd_submit(struct psp_context *psp, 366static int psp_v3_1_cmd_submit(struct psp_context *psp,
362 struct amdgpu_firmware_info *ucode, 367 struct amdgpu_firmware_info *ucode,
363 uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, 368 uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
364 int index) 369 int index)
365{ 370{
366 unsigned int psp_write_ptr_reg = 0; 371 unsigned int psp_write_ptr_reg = 0;
367 struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; 372 struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
@@ -410,9 +415,9 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
410 415
411static int 416static int
412psp_v3_1_sram_map(struct amdgpu_device *adev, 417psp_v3_1_sram_map(struct amdgpu_device *adev,
413 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, 418 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
414 unsigned int *sram_data_reg_offset, 419 unsigned int *sram_data_reg_offset,
415 enum AMDGPU_UCODE_ID ucode_id) 420 enum AMDGPU_UCODE_ID ucode_id)
416{ 421{
417 int ret = 0; 422 int ret = 0;
418 423
@@ -495,9 +500,9 @@ psp_v3_1_sram_map(struct amdgpu_device *adev,
495 return ret; 500 return ret;
496} 501}
497 502
498bool psp_v3_1_compare_sram_data(struct psp_context *psp, 503static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
499 struct amdgpu_firmware_info *ucode, 504 struct amdgpu_firmware_info *ucode,
500 enum AMDGPU_UCODE_ID ucode_type) 505 enum AMDGPU_UCODE_ID ucode_type)
501{ 506{
502 int err = 0; 507 int err = 0;
503 unsigned int fw_sram_reg_val = 0; 508 unsigned int fw_sram_reg_val = 0;
@@ -530,7 +535,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
530 return true; 535 return true;
531} 536}
532 537
533bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) 538static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
534{ 539{
535 struct amdgpu_device *adev = psp->adev; 540 struct amdgpu_device *adev = psp->adev;
536 uint32_t reg; 541 uint32_t reg;
@@ -541,7 +546,7 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
541 return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; 546 return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
542} 547}
543 548
544int psp_v3_1_mode1_reset(struct psp_context *psp) 549static int psp_v3_1_mode1_reset(struct psp_context *psp)
545{ 550{
546 int ret; 551 int ret;
547 uint32_t offset; 552 uint32_t offset;
@@ -574,3 +579,23 @@ int psp_v3_1_mode1_reset(struct psp_context *psp)
574 579
575 return 0; 580 return 0;
576} 581}
582
583static const struct psp_funcs psp_v3_1_funcs = {
584 .init_microcode = psp_v3_1_init_microcode,
585 .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
586 .bootloader_load_sos = psp_v3_1_bootloader_load_sos,
587 .prep_cmd_buf = psp_v3_1_prep_cmd_buf,
588 .ring_init = psp_v3_1_ring_init,
589 .ring_create = psp_v3_1_ring_create,
590 .ring_stop = psp_v3_1_ring_stop,
591 .ring_destroy = psp_v3_1_ring_destroy,
592 .cmd_submit = psp_v3_1_cmd_submit,
593 .compare_sram_data = psp_v3_1_compare_sram_data,
594 .smu_reload_quirk = psp_v3_1_smu_reload_quirk,
595 .mode1_reset = psp_v3_1_mode1_reset,
596};
597
598void psp_v3_1_set_psp_funcs(struct psp_context *psp)
599{
600 psp->funcs = &psp_v3_1_funcs;
601}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
index b05dbada7751..e411e31ba452 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
@@ -32,26 +32,6 @@ enum { PSP_BINARY_ALIGNMENT = 64 };
32enum { PSP_BOOTLOADER_1_MEG_ALIGNMENT = 0x100000 }; 32enum { PSP_BOOTLOADER_1_MEG_ALIGNMENT = 0x100000 };
33enum { PSP_BOOTLOADER_8_MEM_ALIGNMENT = 0x800000 }; 33enum { PSP_BOOTLOADER_8_MEM_ALIGNMENT = 0x800000 };
34 34
35extern int psp_v3_1_init_microcode(struct psp_context *psp); 35void psp_v3_1_set_psp_funcs(struct psp_context *psp);
36extern int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp); 36
37extern int psp_v3_1_bootloader_load_sos(struct psp_context *psp);
38extern int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
39 struct psp_gfx_cmd_resp *cmd);
40extern int psp_v3_1_ring_init(struct psp_context *psp,
41 enum psp_ring_type ring_type);
42extern int psp_v3_1_ring_create(struct psp_context *psp,
43 enum psp_ring_type ring_type);
44extern int psp_v3_1_ring_stop(struct psp_context *psp,
45 enum psp_ring_type ring_type);
46extern int psp_v3_1_ring_destroy(struct psp_context *psp,
47 enum psp_ring_type ring_type);
48extern int psp_v3_1_cmd_submit(struct psp_context *psp,
49 struct amdgpu_firmware_info *ucode,
50 uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
51 int index);
52extern bool psp_v3_1_compare_sram_data(struct psp_context *psp,
53 struct amdgpu_firmware_info *ucode,
54 enum AMDGPU_UCODE_ID ucode_type);
55extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
56extern int psp_v3_1_mode1_reset(struct psp_context *psp);
57#endif 37#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d4787ad4d346..6452101c7aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -289,13 +289,6 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
289 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 289 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
290} 290}
291 291
292static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
293{
294 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
295 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
296 amdgpu_ring_write(ring, mmHDP_DEBUG0);
297 amdgpu_ring_write(ring, 1);
298}
299/** 292/**
300 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring 293 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
301 * 294 *
@@ -346,7 +339,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
346 339
347 if ((adev->mman.buffer_funcs_ring == sdma0) || 340 if ((adev->mman.buffer_funcs_ring == sdma0) ||
348 (adev->mman.buffer_funcs_ring == sdma1)) 341 (adev->mman.buffer_funcs_ring == sdma1))
349 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 342 amdgpu_ttm_set_buffer_funcs_status(adev, false);
350 343
351 for (i = 0; i < adev->sdma.num_instances; i++) { 344 for (i = 0; i < adev->sdma.num_instances; i++) {
352 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 345 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -491,7 +484,7 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
491 } 484 }
492 485
493 if (adev->mman.buffer_funcs_ring == ring) 486 if (adev->mman.buffer_funcs_ring == ring)
494 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 487 amdgpu_ttm_set_buffer_funcs_status(adev, true);
495 } 488 }
496 489
497 return 0; 490 return 0;
@@ -861,20 +854,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
861static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, 854static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
862 unsigned vmid, uint64_t pd_addr) 855 unsigned vmid, uint64_t pd_addr)
863{ 856{
864 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 857 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
865 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
866 if (vmid < 8) {
867 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
868 } else {
869 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
870 }
871 amdgpu_ring_write(ring, pd_addr >> 12);
872
873 /* flush TLB */
874 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
875 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
876 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
877 amdgpu_ring_write(ring, 1 << vmid);
878 858
879 /* wait for flush */ 859 /* wait for flush */
880 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 860 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@@ -888,6 +868,15 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
888 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 868 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
889} 869}
890 870
871static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
872 uint32_t reg, uint32_t val)
873{
874 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
875 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
876 amdgpu_ring_write(ring, reg);
877 amdgpu_ring_write(ring, val);
878}
879
891static int sdma_v2_4_early_init(void *handle) 880static int sdma_v2_4_early_init(void *handle)
892{ 881{
893 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 882 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1203,9 +1192,9 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1203 .set_wptr = sdma_v2_4_ring_set_wptr, 1192 .set_wptr = sdma_v2_4_ring_set_wptr,
1204 .emit_frame_size = 1193 .emit_frame_size =
1205 6 + /* sdma_v2_4_ring_emit_hdp_flush */ 1194 6 + /* sdma_v2_4_ring_emit_hdp_flush */
1206 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ 1195 3 + /* hdp invalidate */
1207 6 + /* sdma_v2_4_ring_emit_pipeline_sync */ 1196 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
1208 12 + /* sdma_v2_4_ring_emit_vm_flush */ 1197 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
1209 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ 1198 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
1210 .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */ 1199 .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
1211 .emit_ib = sdma_v2_4_ring_emit_ib, 1200 .emit_ib = sdma_v2_4_ring_emit_ib,
@@ -1213,11 +1202,11 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1213 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, 1202 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1214 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, 1203 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1215 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, 1204 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1216 .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
1217 .test_ring = sdma_v2_4_ring_test_ring, 1205 .test_ring = sdma_v2_4_ring_test_ring,
1218 .test_ib = sdma_v2_4_ring_test_ib, 1206 .test_ib = sdma_v2_4_ring_test_ib,
1219 .insert_nop = sdma_v2_4_ring_insert_nop, 1207 .insert_nop = sdma_v2_4_ring_insert_nop,
1220 .pad_ib = sdma_v2_4_ring_pad_ib, 1208 .pad_ib = sdma_v2_4_ring_pad_ib,
1209 .emit_wreg = sdma_v2_4_ring_emit_wreg,
1221}; 1210};
1222 1211
1223static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) 1212static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1316,9 +1305,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
1316 .copy_pte = sdma_v2_4_vm_copy_pte, 1305 .copy_pte = sdma_v2_4_vm_copy_pte,
1317 1306
1318 .write_pte = sdma_v2_4_vm_write_pte, 1307 .write_pte = sdma_v2_4_vm_write_pte,
1319
1320 .set_max_nums_pte_pde = 0x1fffff >> 3,
1321 .set_pte_pde_num_dw = 10,
1322 .set_pte_pde = sdma_v2_4_vm_set_pte_pde, 1308 .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
1323}; 1309};
1324 1310
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 521978c40537..ecaef084dab1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -460,14 +460,6 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
460 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 460 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
461} 461}
462 462
463static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
464{
465 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
466 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
467 amdgpu_ring_write(ring, mmHDP_DEBUG0);
468 amdgpu_ring_write(ring, 1);
469}
470
471/** 463/**
472 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring 464 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
473 * 465 *
@@ -518,7 +510,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
518 510
519 if ((adev->mman.buffer_funcs_ring == sdma0) || 511 if ((adev->mman.buffer_funcs_ring == sdma0) ||
520 (adev->mman.buffer_funcs_ring == sdma1)) 512 (adev->mman.buffer_funcs_ring == sdma1))
521 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 513 amdgpu_ttm_set_buffer_funcs_status(adev, false);
522 514
523 for (i = 0; i < adev->sdma.num_instances; i++) { 515 for (i = 0; i < adev->sdma.num_instances; i++) {
524 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 516 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -719,14 +711,17 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
719 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i], 711 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
720 upper_32_bits(wptr_gpu_addr)); 712 upper_32_bits(wptr_gpu_addr));
721 wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]); 713 wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
722 if (ring->use_pollmem) 714 if (ring->use_pollmem) {
715 /*wptr polling is not enogh fast, directly clean the wptr register */
716 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
723 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 717 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
724 SDMA0_GFX_RB_WPTR_POLL_CNTL, 718 SDMA0_GFX_RB_WPTR_POLL_CNTL,
725 ENABLE, 1); 719 ENABLE, 1);
726 else 720 } else {
727 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 721 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
728 SDMA0_GFX_RB_WPTR_POLL_CNTL, 722 SDMA0_GFX_RB_WPTR_POLL_CNTL,
729 ENABLE, 0); 723 ENABLE, 0);
724 }
730 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl); 725 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
731 726
732 /* enable DMA RB */ 727 /* enable DMA RB */
@@ -758,7 +753,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
758 } 753 }
759 754
760 if (adev->mman.buffer_funcs_ring == ring) 755 if (adev->mman.buffer_funcs_ring == ring)
761 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 756 amdgpu_ttm_set_buffer_funcs_status(adev, true);
762 } 757 }
763 758
764 return 0; 759 return 0;
@@ -1127,20 +1122,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1127static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1122static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1128 unsigned vmid, uint64_t pd_addr) 1123 unsigned vmid, uint64_t pd_addr)
1129{ 1124{
1130 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1125 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1131 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1132 if (vmid < 8) {
1133 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
1134 } else {
1135 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
1136 }
1137 amdgpu_ring_write(ring, pd_addr >> 12);
1138
1139 /* flush TLB */
1140 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1141 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1142 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
1143 amdgpu_ring_write(ring, 1 << vmid);
1144 1126
1145 /* wait for flush */ 1127 /* wait for flush */
1146 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1128 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@@ -1154,6 +1136,15 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1154 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 1136 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1155} 1137}
1156 1138
1139static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
1140 uint32_t reg, uint32_t val)
1141{
1142 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1143 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1144 amdgpu_ring_write(ring, reg);
1145 amdgpu_ring_write(ring, val);
1146}
1147
1157static int sdma_v3_0_early_init(void *handle) 1148static int sdma_v3_0_early_init(void *handle)
1158{ 1149{
1159 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1150 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1637,9 +1628,9 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1637 .set_wptr = sdma_v3_0_ring_set_wptr, 1628 .set_wptr = sdma_v3_0_ring_set_wptr,
1638 .emit_frame_size = 1629 .emit_frame_size =
1639 6 + /* sdma_v3_0_ring_emit_hdp_flush */ 1630 6 + /* sdma_v3_0_ring_emit_hdp_flush */
1640 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ 1631 3 + /* hdp invalidate */
1641 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ 1632 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
1642 12 + /* sdma_v3_0_ring_emit_vm_flush */ 1633 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */
1643 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ 1634 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
1644 .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */ 1635 .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
1645 .emit_ib = sdma_v3_0_ring_emit_ib, 1636 .emit_ib = sdma_v3_0_ring_emit_ib,
@@ -1647,11 +1638,11 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1647 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, 1638 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
1648 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, 1639 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
1649 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, 1640 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1650 .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
1651 .test_ring = sdma_v3_0_ring_test_ring, 1641 .test_ring = sdma_v3_0_ring_test_ring,
1652 .test_ib = sdma_v3_0_ring_test_ib, 1642 .test_ib = sdma_v3_0_ring_test_ib,
1653 .insert_nop = sdma_v3_0_ring_insert_nop, 1643 .insert_nop = sdma_v3_0_ring_insert_nop,
1654 .pad_ib = sdma_v3_0_ring_pad_ib, 1644 .pad_ib = sdma_v3_0_ring_pad_ib,
1645 .emit_wreg = sdma_v3_0_ring_emit_wreg,
1655}; 1646};
1656 1647
1657static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) 1648static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1750,10 +1741,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1750 .copy_pte = sdma_v3_0_vm_copy_pte, 1741 .copy_pte = sdma_v3_0_vm_copy_pte,
1751 1742
1752 .write_pte = sdma_v3_0_vm_write_pte, 1743 .write_pte = sdma_v3_0_vm_write_pte,
1753
1754 /* not 0x3fffff due to HW limitation */
1755 .set_max_nums_pte_pde = 0x3fffe0 >> 3,
1756 .set_pte_pde_num_dw = 10,
1757 .set_pte_pde = sdma_v3_0_vm_set_pte_pde, 1744 .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
1758}; 1745};
1759 1746
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e92fb372bc99..9448c45d1b60 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -31,8 +31,6 @@
31#include "sdma0/sdma0_4_0_sh_mask.h" 31#include "sdma0/sdma0_4_0_sh_mask.h"
32#include "sdma1/sdma1_4_0_offset.h" 32#include "sdma1/sdma1_4_0_offset.h"
33#include "sdma1/sdma1_4_0_sh_mask.h" 33#include "sdma1/sdma1_4_0_sh_mask.h"
34#include "mmhub/mmhub_1_0_offset.h"
35#include "mmhub/mmhub_1_0_sh_mask.h"
36#include "hdp/hdp_4_0_offset.h" 34#include "hdp/hdp_4_0_offset.h"
37#include "sdma0/sdma0_4_1_default.h" 35#include "sdma0/sdma0_4_1_default.h"
38 36
@@ -238,31 +236,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
238static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) 236static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
239{ 237{
240 struct amdgpu_device *adev = ring->adev; 238 struct amdgpu_device *adev = ring->adev;
241 u64 *wptr = NULL; 239 u64 wptr;
242 uint64_t local_wptr = 0;
243 240
244 if (ring->use_doorbell) { 241 if (ring->use_doorbell) {
245 /* XXX check if swapping is necessary on BE */ 242 /* XXX check if swapping is necessary on BE */
246 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); 243 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
247 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); 244 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
248 *wptr = (*wptr) >> 2;
249 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
250 } else { 245 } else {
251 u32 lowbit, highbit; 246 u32 lowbit, highbit;
252 int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 247 int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
253 248
254 wptr = &local_wptr;
255 lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; 249 lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
256 highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; 250 highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
257 251
258 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", 252 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
259 me, highbit, lowbit); 253 me, highbit, lowbit);
260 *wptr = highbit; 254 wptr = highbit;
261 *wptr = (*wptr) << 32; 255 wptr = wptr << 32;
262 *wptr |= lowbit; 256 wptr |= lowbit;
263 } 257 }
264 258
265 return *wptr; 259 return wptr >> 2;
266} 260}
267 261
268/** 262/**
@@ -375,16 +369,6 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
375 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 369 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
376} 370}
377 371
378static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
379{
380 struct amdgpu_device *adev = ring->adev;
381
382 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
383 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
384 amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
385 amdgpu_ring_write(ring, 1);
386}
387
388/** 372/**
389 * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring 373 * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
390 * 374 *
@@ -440,7 +424,7 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
440 424
441 if ((adev->mman.buffer_funcs_ring == sdma0) || 425 if ((adev->mman.buffer_funcs_ring == sdma0) ||
442 (adev->mman.buffer_funcs_ring == sdma1)) 426 (adev->mman.buffer_funcs_ring == sdma1))
443 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 427 amdgpu_ttm_set_buffer_funcs_status(adev, false);
444 428
445 for (i = 0; i < adev->sdma.num_instances; i++) { 429 for (i = 0; i < adev->sdma.num_instances; i++) {
446 rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); 430 rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -682,7 +666,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
682 } 666 }
683 667
684 if (adev->mman.buffer_funcs_ring == ring) 668 if (adev->mman.buffer_funcs_ring == ring)
685 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 669 amdgpu_ttm_set_buffer_funcs_status(adev, true);
686 670
687 } 671 }
688 672
@@ -1135,38 +1119,28 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1135static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1119static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1136 unsigned vmid, uint64_t pd_addr) 1120 unsigned vmid, uint64_t pd_addr)
1137{ 1121{
1138 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1122 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1139 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 1123}
1140 uint64_t flags = AMDGPU_PTE_VALID;
1141 unsigned eng = ring->vm_inv_eng;
1142
1143 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
1144 pd_addr |= flags;
1145
1146 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1147 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1148 amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2);
1149 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1150
1151 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1152 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1153 amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2);
1154 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
1155 1124
1156 /* flush TLB */ 1125static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
1126 uint32_t reg, uint32_t val)
1127{
1157 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1128 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1158 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1129 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1159 amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng); 1130 amdgpu_ring_write(ring, reg);
1160 amdgpu_ring_write(ring, req); 1131 amdgpu_ring_write(ring, val);
1132}
1161 1133
1162 /* wait for flush */ 1134static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1135 uint32_t val, uint32_t mask)
1136{
1163 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1137 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1164 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1138 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1165 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ 1139 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1166 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); 1140 amdgpu_ring_write(ring, reg << 2);
1167 amdgpu_ring_write(ring, 0); 1141 amdgpu_ring_write(ring, 0);
1168 amdgpu_ring_write(ring, 1 << vmid); /* reference */ 1142 amdgpu_ring_write(ring, val); /* reference */
1169 amdgpu_ring_write(ring, 1 << vmid); /* mask */ 1143 amdgpu_ring_write(ring, mask); /* mask */
1170 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1144 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1171 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); 1145 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1172} 1146}
@@ -1196,13 +1170,13 @@ static int sdma_v4_0_sw_init(void *handle)
1196 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1170 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1197 1171
1198 /* SDMA trap event */ 1172 /* SDMA trap event */
1199 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA0, 224, 1173 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
1200 &adev->sdma.trap_irq); 1174 &adev->sdma.trap_irq);
1201 if (r) 1175 if (r)
1202 return r; 1176 return r;
1203 1177
1204 /* SDMA trap event */ 1178 /* SDMA trap event */
1205 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA1, 224, 1179 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
1206 &adev->sdma.trap_irq); 1180 &adev->sdma.trap_irq);
1207 if (r) 1181 if (r)
1208 return r; 1182 return r;
@@ -1357,7 +1331,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
1357{ 1331{
1358 DRM_DEBUG("IH: SDMA trap\n"); 1332 DRM_DEBUG("IH: SDMA trap\n");
1359 switch (entry->client_id) { 1333 switch (entry->client_id) {
1360 case AMDGPU_IH_CLIENTID_SDMA0: 1334 case SOC15_IH_CLIENTID_SDMA0:
1361 switch (entry->ring_id) { 1335 switch (entry->ring_id) {
1362 case 0: 1336 case 0:
1363 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1337 amdgpu_fence_process(&adev->sdma.instance[0].ring);
@@ -1373,7 +1347,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
1373 break; 1347 break;
1374 } 1348 }
1375 break; 1349 break;
1376 case AMDGPU_IH_CLIENTID_SDMA1: 1350 case SOC15_IH_CLIENTID_SDMA1:
1377 switch (entry->ring_id) { 1351 switch (entry->ring_id) {
1378 case 0: 1352 case 0:
1379 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1353 amdgpu_fence_process(&adev->sdma.instance[1].ring);
@@ -1423,7 +1397,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
1423 if (def != data) 1397 if (def != data)
1424 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data); 1398 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1425 1399
1426 if (adev->asic_type == CHIP_VEGA10) { 1400 if (adev->sdma.num_instances > 1) {
1427 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL)); 1401 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1428 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1402 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1429 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1403 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1451,7 +1425,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
1451 if (def != data) 1425 if (def != data)
1452 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data); 1426 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1453 1427
1454 if (adev->asic_type == CHIP_VEGA10) { 1428 if (adev->sdma.num_instances > 1) {
1455 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL)); 1429 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1456 data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1430 data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1457 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1431 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1482,7 +1456,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
1482 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data); 1456 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1483 1457
1484 /* 1-not override: enable sdma1 mem light sleep */ 1458 /* 1-not override: enable sdma1 mem light sleep */
1485 if (adev->asic_type == CHIP_VEGA10) { 1459 if (adev->sdma.num_instances > 1) {
1486 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL)); 1460 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1487 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1461 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1488 if (def != data) 1462 if (def != data)
@@ -1496,7 +1470,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
1496 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data); 1470 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1497 1471
1498 /* 0-override:disable sdma1 mem light sleep */ 1472 /* 0-override:disable sdma1 mem light sleep */
1499 if (adev->asic_type == CHIP_VEGA10) { 1473 if (adev->sdma.num_instances > 1) {
1500 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL)); 1474 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1501 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1475 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1502 if (def != data) 1476 if (def != data)
@@ -1592,9 +1566,11 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
1592 .set_wptr = sdma_v4_0_ring_set_wptr, 1566 .set_wptr = sdma_v4_0_ring_set_wptr,
1593 .emit_frame_size = 1567 .emit_frame_size =
1594 6 + /* sdma_v4_0_ring_emit_hdp_flush */ 1568 6 + /* sdma_v4_0_ring_emit_hdp_flush */
1595 3 + /* sdma_v4_0_ring_emit_hdp_invalidate */ 1569 3 + /* hdp invalidate */
1596 6 + /* sdma_v4_0_ring_emit_pipeline_sync */ 1570 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
1597 18 + /* sdma_v4_0_ring_emit_vm_flush */ 1571 /* sdma_v4_0_ring_emit_vm_flush */
1572 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1573 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1598 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ 1574 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
1599 .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ 1575 .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
1600 .emit_ib = sdma_v4_0_ring_emit_ib, 1576 .emit_ib = sdma_v4_0_ring_emit_ib,
@@ -1602,11 +1578,12 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
1602 .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, 1578 .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
1603 .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, 1579 .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
1604 .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, 1580 .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
1605 .emit_hdp_invalidate = sdma_v4_0_ring_emit_hdp_invalidate,
1606 .test_ring = sdma_v4_0_ring_test_ring, 1581 .test_ring = sdma_v4_0_ring_test_ring,
1607 .test_ib = sdma_v4_0_ring_test_ib, 1582 .test_ib = sdma_v4_0_ring_test_ib,
1608 .insert_nop = sdma_v4_0_ring_insert_nop, 1583 .insert_nop = sdma_v4_0_ring_insert_nop,
1609 .pad_ib = sdma_v4_0_ring_pad_ib, 1584 .pad_ib = sdma_v4_0_ring_pad_ib,
1585 .emit_wreg = sdma_v4_0_ring_emit_wreg,
1586 .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
1610}; 1587};
1611 1588
1612static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) 1589static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1705,9 +1682,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
1705 .copy_pte = sdma_v4_0_vm_copy_pte, 1682 .copy_pte = sdma_v4_0_vm_copy_pte,
1706 1683
1707 .write_pte = sdma_v4_0_vm_write_pte, 1684 .write_pte = sdma_v4_0_vm_write_pte,
1708
1709 .set_max_nums_pte_pde = 0x400000 >> 3,
1710 .set_pte_pde_num_dw = 10,
1711 .set_pte_pde = sdma_v4_0_vm_set_pte_pde, 1685 .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
1712}; 1686};
1713 1687
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 543101d5a5ed..b154667a8fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -31,7 +31,8 @@
31#include "amdgpu_uvd.h" 31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "atom.h" 33#include "atom.h"
34#include "amdgpu_powerplay.h" 34#include "amd_pcie.h"
35#include "si_dpm.h"
35#include "sid.h" 36#include "sid.h"
36#include "si_ih.h" 37#include "si_ih.h"
37#include "gfx_v6_0.h" 38#include "gfx_v6_0.h"
@@ -1230,6 +1231,27 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
1230 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 1231 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
1231} 1232}
1232 1233
1234static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1235{
1236 if (!ring || !ring->funcs->emit_wreg) {
1237 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1238 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1239 } else {
1240 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1241 }
1242}
1243
1244static void si_invalidate_hdp(struct amdgpu_device *adev,
1245 struct amdgpu_ring *ring)
1246{
1247 if (!ring || !ring->funcs->emit_wreg) {
1248 WREG32(mmHDP_DEBUG0, 1);
1249 RREG32(mmHDP_DEBUG0);
1250 } else {
1251 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1252 }
1253}
1254
1233static const struct amdgpu_asic_funcs si_asic_funcs = 1255static const struct amdgpu_asic_funcs si_asic_funcs =
1234{ 1256{
1235 .read_disabled_bios = &si_read_disabled_bios, 1257 .read_disabled_bios = &si_read_disabled_bios,
@@ -1241,6 +1263,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
1241 .set_uvd_clocks = &si_set_uvd_clocks, 1263 .set_uvd_clocks = &si_set_uvd_clocks,
1242 .set_vce_clocks = NULL, 1264 .set_vce_clocks = NULL,
1243 .get_config_memsize = &si_get_config_memsize, 1265 .get_config_memsize = &si_get_config_memsize,
1266 .flush_hdp = &si_flush_hdp,
1267 .invalidate_hdp = &si_invalidate_hdp,
1244}; 1268};
1245 1269
1246static uint32_t si_get_rev_id(struct amdgpu_device *adev) 1270static uint32_t si_get_rev_id(struct amdgpu_device *adev)
@@ -1461,8 +1485,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1461{ 1485{
1462 struct pci_dev *root = adev->pdev->bus->self; 1486 struct pci_dev *root = adev->pdev->bus->self;
1463 int bridge_pos, gpu_pos; 1487 int bridge_pos, gpu_pos;
1464 u32 speed_cntl, mask, current_data_rate; 1488 u32 speed_cntl, current_data_rate;
1465 int ret, i; 1489 int i;
1466 u16 tmp16; 1490 u16 tmp16;
1467 1491
1468 if (pci_is_root_bus(adev->pdev->bus)) 1492 if (pci_is_root_bus(adev->pdev->bus))
@@ -1474,23 +1498,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1474 if (adev->flags & AMD_IS_APU) 1498 if (adev->flags & AMD_IS_APU)
1475 return; 1499 return;
1476 1500
1477 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1501 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1478 if (ret != 0) 1502 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1479 return;
1480
1481 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1482 return; 1503 return;
1483 1504
1484 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 1505 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1485 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> 1506 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
1486 LC_CURRENT_DATA_RATE_SHIFT; 1507 LC_CURRENT_DATA_RATE_SHIFT;
1487 if (mask & DRM_PCIE_SPEED_80) { 1508 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1488 if (current_data_rate == 2) { 1509 if (current_data_rate == 2) {
1489 DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 1510 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
1490 return; 1511 return;
1491 } 1512 }
1492 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1513 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
1493 } else if (mask & DRM_PCIE_SPEED_50) { 1514 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
1494 if (current_data_rate == 1) { 1515 if (current_data_rate == 1) {
1495 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 1516 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
1496 return; 1517 return;
@@ -1506,7 +1527,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1506 if (!gpu_pos) 1527 if (!gpu_pos)
1507 return; 1528 return;
1508 1529
1509 if (mask & DRM_PCIE_SPEED_80) { 1530 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1510 if (current_data_rate != 2) { 1531 if (current_data_rate != 2) {
1511 u16 bridge_cfg, gpu_cfg; 1532 u16 bridge_cfg, gpu_cfg;
1512 u16 bridge_cfg2, gpu_cfg2; 1533 u16 bridge_cfg2, gpu_cfg2;
@@ -1589,9 +1610,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1589 1610
1590 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); 1611 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1591 tmp16 &= ~0xf; 1612 tmp16 &= ~0xf;
1592 if (mask & DRM_PCIE_SPEED_80) 1613 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1593 tmp16 |= 3; 1614 tmp16 |= 3;
1594 else if (mask & DRM_PCIE_SPEED_50) 1615 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1595 tmp16 |= 2; 1616 tmp16 |= 2;
1596 else 1617 else
1597 tmp16 |= 1; 1618 tmp16 |= 1;
@@ -1962,7 +1983,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1962 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1983 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1963 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1984 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1964 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 1985 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1965 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1986 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1966 if (adev->enable_virtual_display) 1987 if (adev->enable_virtual_display)
1967 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1988 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1968 else 1989 else
@@ -1976,7 +1997,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1976 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1997 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1977 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1998 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1978 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 1999 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1979 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2000 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1980 if (adev->enable_virtual_display) 2001 if (adev->enable_virtual_display)
1981 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2002 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1982 else 2003 else
@@ -1990,7 +2011,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1990 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 2011 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1991 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 2012 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1992 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2013 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1993 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2014 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1994 if (adev->enable_virtual_display) 2015 if (adev->enable_virtual_display)
1995 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2016 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1996 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); 2017 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 589225080c24..06ed7212a0d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,6 +24,8 @@
24#ifndef __SI_H__ 24#ifndef __SI_H__
25#define __SI_H__ 25#define __SI_H__
26 26
27#define SI_FLUSH_GPU_TLB_NUM_WREG 2
28
27void si_srbm_select(struct amdgpu_device *adev, 29void si_srbm_select(struct amdgpu_device *adev,
28 u32 me, u32 pipe, u32 queue, u32 vmid); 30 u32 me, u32 pipe, u32 queue, u32 vmid);
29int si_set_ip_blocks(struct amdgpu_device *adev); 31int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 9a29c1399091..b75d901ba3c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -24,6 +24,7 @@
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "amdgpu.h" 25#include "amdgpu.h"
26#include "amdgpu_trace.h" 26#include "amdgpu_trace.h"
27#include "si.h"
27#include "sid.h" 28#include "sid.h"
28 29
29const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 30const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -74,20 +75,6 @@ static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
74 75
75} 76}
76 77
77static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
78{
79 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
80 amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
81 amdgpu_ring_write(ring, 1);
82}
83
84static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
85{
86 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
87 amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
88 amdgpu_ring_write(ring, 1);
89}
90
91/** 78/**
92 * si_dma_ring_emit_fence - emit a fence on the DMA ring 79 * si_dma_ring_emit_fence - emit a fence on the DMA ring
93 * 80 *
@@ -134,7 +121,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
134 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); 121 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
135 122
136 if (adev->mman.buffer_funcs_ring == ring) 123 if (adev->mman.buffer_funcs_ring == ring)
137 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 124 amdgpu_ttm_set_buffer_funcs_status(adev, false);
138 ring->ready = false; 125 ring->ready = false;
139 } 126 }
140} 127}
@@ -197,7 +184,7 @@ static int si_dma_start(struct amdgpu_device *adev)
197 } 184 }
198 185
199 if (adev->mman.buffer_funcs_ring == ring) 186 if (adev->mman.buffer_funcs_ring == ring)
200 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 187 amdgpu_ttm_set_buffer_funcs_status(adev, true);
201 } 188 }
202 189
203 return 0; 190 return 0;
@@ -475,17 +462,7 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
475static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, 462static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
476 unsigned vmid, uint64_t pd_addr) 463 unsigned vmid, uint64_t pd_addr)
477{ 464{
478 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 465 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
479 if (vmid < 8)
480 amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
481 else
482 amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
483 amdgpu_ring_write(ring, pd_addr >> 12);
484
485 /* bits 0-7 are the VM contexts0-7 */
486 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
487 amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
488 amdgpu_ring_write(ring, 1 << vmid);
489 466
490 /* wait for invalidate to complete */ 467 /* wait for invalidate to complete */
491 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); 468 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
@@ -496,6 +473,14 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
496 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ 473 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
497} 474}
498 475
476static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
477 uint32_t reg, uint32_t val)
478{
479 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
480 amdgpu_ring_write(ring, (0xf << 16) | reg);
481 amdgpu_ring_write(ring, val);
482}
483
499static int si_dma_early_init(void *handle) 484static int si_dma_early_init(void *handle)
500{ 485{
501 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 486 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -772,22 +757,20 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
772 .get_wptr = si_dma_ring_get_wptr, 757 .get_wptr = si_dma_ring_get_wptr,
773 .set_wptr = si_dma_ring_set_wptr, 758 .set_wptr = si_dma_ring_set_wptr,
774 .emit_frame_size = 759 .emit_frame_size =
775 3 + /* si_dma_ring_emit_hdp_flush */ 760 3 + 3 + /* hdp flush / invalidate */
776 3 + /* si_dma_ring_emit_hdp_invalidate */
777 6 + /* si_dma_ring_emit_pipeline_sync */ 761 6 + /* si_dma_ring_emit_pipeline_sync */
778 12 + /* si_dma_ring_emit_vm_flush */ 762 SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
779 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ 763 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
780 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ 764 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
781 .emit_ib = si_dma_ring_emit_ib, 765 .emit_ib = si_dma_ring_emit_ib,
782 .emit_fence = si_dma_ring_emit_fence, 766 .emit_fence = si_dma_ring_emit_fence,
783 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, 767 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
784 .emit_vm_flush = si_dma_ring_emit_vm_flush, 768 .emit_vm_flush = si_dma_ring_emit_vm_flush,
785 .emit_hdp_flush = si_dma_ring_emit_hdp_flush,
786 .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
787 .test_ring = si_dma_ring_test_ring, 769 .test_ring = si_dma_ring_test_ring,
788 .test_ib = si_dma_ring_test_ib, 770 .test_ib = si_dma_ring_test_ib,
789 .insert_nop = amdgpu_ring_insert_nop, 771 .insert_nop = amdgpu_ring_insert_nop,
790 .pad_ib = si_dma_ring_pad_ib, 772 .pad_ib = si_dma_ring_pad_ib,
773 .emit_wreg = si_dma_ring_emit_wreg,
791}; 774};
792 775
793static void si_dma_set_ring_funcs(struct amdgpu_device *adev) 776static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -891,9 +874,6 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
891 .copy_pte = si_dma_vm_copy_pte, 874 .copy_pte = si_dma_vm_copy_pte,
892 875
893 .write_pte = si_dma_vm_write_pte, 876 .write_pte = si_dma_vm_write_pte,
894
895 .set_max_nums_pte_pde = 0xffff8 >> 3,
896 .set_pte_pde_num_dw = 9,
897 .set_pte_pde = si_dma_vm_set_pte_pde, 877 .set_pte_pde = si_dma_vm_set_pte_pde,
898}; 878};
899 879
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index ce675a7f179a..3bfcf0d257ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -26,6 +26,7 @@
26#include "amdgpu_pm.h" 26#include "amdgpu_pm.h"
27#include "amdgpu_dpm.h" 27#include "amdgpu_dpm.h"
28#include "amdgpu_atombios.h" 28#include "amdgpu_atombios.h"
29#include "amd_pcie.h"
29#include "sid.h" 30#include "sid.h"
30#include "r600_dpm.h" 31#include "r600_dpm.h"
31#include "si_dpm.h" 32#include "si_dpm.h"
@@ -66,6 +67,8 @@ MODULE_FIRMWARE("radeon/hainan_smc.bin");
66MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 67MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
67MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 68MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
68 69
70static const struct amd_pm_funcs si_dpm_funcs;
71
69union power_info { 72union power_info {
70 struct _ATOM_POWERPLAY_INFO info; 73 struct _ATOM_POWERPLAY_INFO info;
71 struct _ATOM_POWERPLAY_INFO_V2 info_2; 74 struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -3064,7 +3067,7 @@ static bool si_dpm_vblank_too_short(void *handle)
3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3067 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3065 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 3068 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
3066 /* we never hit the non-gddr5 limit so disable it */ 3069 /* we never hit the non-gddr5 limit so disable it */
3067 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; 3070 u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
3068 3071
3069 if (vblank_time < switch_limit) 3072 if (vblank_time < switch_limit)
3070 return true; 3073 return true;
@@ -3331,29 +3334,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
3331 } 3334 }
3332} 3335}
3333 3336
3334static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
3335 u32 sys_mask,
3336 enum amdgpu_pcie_gen asic_gen,
3337 enum amdgpu_pcie_gen default_gen)
3338{
3339 switch (asic_gen) {
3340 case AMDGPU_PCIE_GEN1:
3341 return AMDGPU_PCIE_GEN1;
3342 case AMDGPU_PCIE_GEN2:
3343 return AMDGPU_PCIE_GEN2;
3344 case AMDGPU_PCIE_GEN3:
3345 return AMDGPU_PCIE_GEN3;
3346 default:
3347 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
3348 return AMDGPU_PCIE_GEN3;
3349 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
3350 return AMDGPU_PCIE_GEN2;
3351 else
3352 return AMDGPU_PCIE_GEN1;
3353 }
3354 return AMDGPU_PCIE_GEN1;
3355}
3356
3357static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 3337static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
3358 u32 *p, u32 *u) 3338 u32 *p, u32 *u)
3359{ 3339{
@@ -4350,7 +4330,7 @@ static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
4350 if (mclk <= pi->mclk_strobe_mode_threshold) 4330 if (mclk <= pi->mclk_strobe_mode_threshold)
4351 strobe_mode = true; 4331 strobe_mode = true;
4352 4332
4353 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 4333 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
4354 result = si_get_mclk_frequency_ratio(mclk, strobe_mode); 4334 result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
4355 else 4335 else
4356 result = si_get_ddr3_mclk_frequency_ratio(mclk); 4336 result = si_get_ddr3_mclk_frequency_ratio(mclk);
@@ -4937,7 +4917,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
4937 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); 4917 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
4938 table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; 4918 table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
4939 4919
4940 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 4920 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
4941 table->initialState.levels[0].strobeMode = 4921 table->initialState.levels[0].strobeMode =
4942 si_get_strobe_mode_settings(adev, 4922 si_get_strobe_mode_settings(adev,
4943 initial_state->performance_levels[0].mclk); 4923 initial_state->performance_levels[0].mclk);
@@ -5028,10 +5008,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
5028 table->ACPIState.levels[0].vddc.index, 5008 table->ACPIState.levels[0].vddc.index,
5029 &table->ACPIState.levels[0].std_vddc); 5009 &table->ACPIState.levels[0].std_vddc);
5030 } 5010 }
5031 table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, 5011 table->ACPIState.levels[0].gen2PCIE =
5032 si_pi->sys_pcie_mask, 5012 (u8)amdgpu_get_pcie_gen_support(adev,
5033 si_pi->boot_pcie_gen, 5013 si_pi->sys_pcie_mask,
5034 AMDGPU_PCIE_GEN1); 5014 si_pi->boot_pcie_gen,
5015 AMDGPU_PCIE_GEN1);
5035 5016
5036 if (si_pi->vddc_phase_shed_control) 5017 if (si_pi->vddc_phase_shed_control)
5037 si_populate_phase_shedding_value(adev, 5018 si_populate_phase_shedding_value(adev,
@@ -5208,7 +5189,7 @@ static int si_init_smc_table(struct amdgpu_device *adev)
5208 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 5189 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
5209 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 5190 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
5210 5191
5211 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 5192 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5212 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 5193 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
5213 5194
5214 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) 5195 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
@@ -5385,7 +5366,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
5385 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; 5366 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
5386 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); 5367 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
5387 5368
5388 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 5369 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5389 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); 5370 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
5390 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | 5371 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
5391 YCLK_POST_DIV(mpll_param.post_div); 5372 YCLK_POST_DIV(mpll_param.post_div);
@@ -5397,7 +5378,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
5397 u32 tmp; 5378 u32 tmp;
5398 u32 reference_clock = adev->clock.mpll.reference_freq; 5379 u32 reference_clock = adev->clock.mpll.reference_freq;
5399 5380
5400 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 5381 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5401 freq_nom = memory_clock * 4; 5382 freq_nom = memory_clock * 4;
5402 else 5383 else
5403 freq_nom = memory_clock * 2; 5384 freq_nom = memory_clock * 2;
@@ -5489,7 +5470,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
5489 level->mcFlags |= SISLANDS_SMC_MC_PG_EN; 5470 level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
5490 } 5471 }
5491 5472
5492 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 5473 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5493 if (pl->mclk > pi->mclk_edc_enable_threshold) 5474 if (pl->mclk > pi->mclk_edc_enable_threshold)
5494 level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; 5475 level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
5495 5476
@@ -5860,12 +5841,12 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
5860 table->mc_reg_table_entry[k].mc_data[j] = 5841 table->mc_reg_table_entry[k].mc_data[j] =
5861 (temp_reg & 0xffff0000) | 5842 (temp_reg & 0xffff0000) |
5862 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 5843 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5863 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) 5844 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
5864 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 5845 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5865 } 5846 }
5866 j++; 5847 j++;
5867 5848
5868 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { 5849 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
5869 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5850 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5870 return -EINVAL; 5851 return -EINVAL;
5871 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; 5852 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
@@ -7168,10 +7149,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
7168 pl->vddc = le16_to_cpu(clock_info->si.usVDDC); 7149 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
7169 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); 7150 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
7170 pl->flags = le32_to_cpu(clock_info->si.ulFlags); 7151 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
7171 pl->pcie_gen = r600_get_pcie_gen_support(adev, 7152 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
7172 si_pi->sys_pcie_mask, 7153 si_pi->sys_pcie_mask,
7173 si_pi->boot_pcie_gen, 7154 si_pi->boot_pcie_gen,
7174 clock_info->si.ucPCIEGen); 7155 clock_info->si.ucPCIEGen);
7175 7156
7176 /* patch up vddc if necessary */ 7157 /* patch up vddc if necessary */
7177 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, 7158 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7326,7 +7307,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
7326 struct si_power_info *si_pi; 7307 struct si_power_info *si_pi;
7327 struct atom_clock_dividers dividers; 7308 struct atom_clock_dividers dividers;
7328 int ret; 7309 int ret;
7329 u32 mask;
7330 7310
7331 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); 7311 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
7332 if (si_pi == NULL) 7312 if (si_pi == NULL)
@@ -7336,11 +7316,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
7336 eg_pi = &ni_pi->eg; 7316 eg_pi = &ni_pi->eg;
7337 pi = &eg_pi->rv7xx; 7317 pi = &eg_pi->rv7xx;
7338 7318
7339 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 7319 si_pi->sys_pcie_mask =
7340 if (ret) 7320 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
7341 si_pi->sys_pcie_mask = 0; 7321 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
7342 else
7343 si_pi->sys_pcie_mask = mask;
7344 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 7322 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
7345 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); 7323 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7346 7324
@@ -7938,6 +7916,7 @@ static int si_dpm_early_init(void *handle)
7938 7916
7939 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7917 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7940 7918
7919 adev->powerplay.pp_funcs = &si_dpm_funcs;
7941 si_dpm_set_irq_funcs(adev); 7920 si_dpm_set_irq_funcs(adev);
7942 return 0; 7921 return 0;
7943} 7922}
@@ -8038,7 +8017,7 @@ static int si_dpm_read_sensor(void *handle, int idx,
8038 } 8017 }
8039} 8018}
8040 8019
8041const struct amd_ip_funcs si_dpm_ip_funcs = { 8020static const struct amd_ip_funcs si_dpm_ip_funcs = {
8042 .name = "si_dpm", 8021 .name = "si_dpm",
8043 .early_init = si_dpm_early_init, 8022 .early_init = si_dpm_early_init,
8044 .late_init = si_dpm_late_init, 8023 .late_init = si_dpm_late_init,
@@ -8055,8 +8034,16 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
8055 .set_powergating_state = si_dpm_set_powergating_state, 8034 .set_powergating_state = si_dpm_set_powergating_state,
8056}; 8035};
8057 8036
8058const struct amd_pm_funcs si_dpm_funcs = { 8037const struct amdgpu_ip_block_version si_smu_ip_block =
8059 .get_temperature = &si_dpm_get_temp, 8038{
8039 .type = AMD_IP_BLOCK_TYPE_SMC,
8040 .major = 6,
8041 .minor = 0,
8042 .rev = 0,
8043 .funcs = &si_dpm_ip_funcs,
8044};
8045
8046static const struct amd_pm_funcs si_dpm_funcs = {
8060 .pre_set_power_state = &si_dpm_pre_set_power_state, 8047 .pre_set_power_state = &si_dpm_pre_set_power_state,
8061 .set_power_state = &si_dpm_set_power_state, 8048 .set_power_state = &si_dpm_set_power_state,
8062 .post_set_power_state = &si_dpm_post_set_power_state, 8049 .post_set_power_state = &si_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 9fe343de3477..6b7d292b919f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -245,8 +245,7 @@ enum si_display_gap
245 SI_PM_DISPLAY_GAP_IGNORE = 3, 245 SI_PM_DISPLAY_GAP_IGNORE = 3,
246}; 246};
247 247
248extern const struct amd_ip_funcs si_dpm_ip_funcs; 248extern const struct amdgpu_ip_block_version si_smu_ip_block;
249extern const struct amd_pm_funcs si_dpm_funcs;
250 249
251struct ni_leakage_coeffients 250struct ni_leakage_coeffients
252{ 251{
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a04a033f57de..c6e857325b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -57,7 +57,6 @@
57#include "uvd_v7_0.h" 57#include "uvd_v7_0.h"
58#include "vce_v4_0.h" 58#include "vce_v4_0.h"
59#include "vcn_v1_0.h" 59#include "vcn_v1_0.h"
60#include "amdgpu_powerplay.h"
61#include "dce_virtual.h" 60#include "dce_virtual.h"
62#include "mxgpu_ai.h" 61#include "mxgpu_ai.h"
63 62
@@ -417,12 +416,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
417 416
418 pci_save_state(adev->pdev); 417 pci_save_state(adev->pdev);
419 418
420 for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) { 419 psp_gpu_reset(adev);
421 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
422 adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
423 break;
424 }
425 }
426 420
427 pci_restore_state(adev->pdev); 421 pci_restore_state(adev->pdev);
428 422
@@ -536,10 +530,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
536 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 530 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
537 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 531 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
538 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 532 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
539 if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1) 533 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
540 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
541 if (!amdgpu_sriov_vf(adev)) 534 if (!amdgpu_sriov_vf(adev))
542 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 535 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
543 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 536 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
544 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 537 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
545#if defined(CONFIG_DRM_AMD_DC) 538#if defined(CONFIG_DRM_AMD_DC)
@@ -558,7 +551,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
558 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 551 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
559 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 552 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
560 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 553 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
561 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 554 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
562 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 555 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
563 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 556 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
564#if defined(CONFIG_DRM_AMD_DC) 557#if defined(CONFIG_DRM_AMD_DC)
@@ -583,6 +576,21 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
583 return adev->nbio_funcs->get_rev_id(adev); 576 return adev->nbio_funcs->get_rev_id(adev);
584} 577}
585 578
579static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
580{
581 adev->nbio_funcs->hdp_flush(adev, ring);
582}
583
584static void soc15_invalidate_hdp(struct amdgpu_device *adev,
585 struct amdgpu_ring *ring)
586{
587 if (!ring || !ring->funcs->emit_wreg)
588 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
589 else
590 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
591 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
592}
593
586static const struct amdgpu_asic_funcs soc15_asic_funcs = 594static const struct amdgpu_asic_funcs soc15_asic_funcs =
587{ 595{
588 .read_disabled_bios = &soc15_read_disabled_bios, 596 .read_disabled_bios = &soc15_read_disabled_bios,
@@ -594,6 +602,8 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
594 .set_uvd_clocks = &soc15_set_uvd_clocks, 602 .set_uvd_clocks = &soc15_set_uvd_clocks,
595 .set_vce_clocks = &soc15_set_vce_clocks, 603 .set_vce_clocks = &soc15_set_vce_clocks,
596 .get_config_memsize = &soc15_get_config_memsize, 604 .get_config_memsize = &soc15_get_config_memsize,
605 .flush_hdp = &soc15_flush_hdp,
606 .invalidate_hdp = &soc15_invalidate_hdp,
597}; 607};
598 608
599static int soc15_common_early_init(void *handle) 609static int soc15_common_early_init(void *handle)
@@ -680,10 +690,6 @@ static int soc15_common_early_init(void *handle)
680 xgpu_ai_mailbox_set_irq_funcs(adev); 690 xgpu_ai_mailbox_set_irq_funcs(adev);
681 } 691 }
682 692
683 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
684
685 amdgpu_device_get_pcie_info(adev);
686
687 return 0; 693 return 0;
688} 694}
689 695
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 26b3feac5d06..f70da8a29f86 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -27,6 +27,9 @@
27#include "nbio_v6_1.h" 27#include "nbio_v6_1.h"
28#include "nbio_v7_0.h" 28#include "nbio_v7_0.h"
29 29
30#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
31#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
32
30extern const struct amd_ip_funcs soc15_common_ip_funcs; 33extern const struct amd_ip_funcs soc15_common_ip_funcs;
31 34
32struct soc15_reg_golden { 35struct soc15_reg_golden {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 5995ffc183de..52853d8a8fdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -107,7 +107,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
107 tonga_ih_disable_interrupts(adev); 107 tonga_ih_disable_interrupts(adev);
108 108
109 /* setup interrupt control */ 109 /* setup interrupt control */
110 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); 110 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
111 interrupt_cntl = RREG32(mmINTERRUPT_CNTL); 111 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
112 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 112 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
113 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 113 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -271,7 +271,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
271 entry->src_data[0] = dw[1] & 0xfffffff; 271 entry->src_data[0] = dw[1] & 0xfffffff;
272 entry->ring_id = dw[2] & 0xff; 272 entry->ring_id = dw[2] & 0xff;
273 entry->vmid = (dw[2] >> 8) & 0xff; 273 entry->vmid = (dw[2] >> 8) & 0xff;
274 entry->pas_id = (dw[2] >> 16) & 0xffff; 274 entry->pasid = (dw[2] >> 16) & 0xffff;
275 275
276 /* wptr/rptr are in bytes! */ 276 /* wptr/rptr are in bytes! */
277 adev->irq.ih.rptr += 16; 277 adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 8ab10c220910..948bb9437757 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -464,32 +464,6 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
464} 464}
465 465
466/** 466/**
467 * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
468 *
469 * @ring: amdgpu_ring pointer
470 *
471 * Emits an hdp flush.
472 */
473static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
474{
475 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
476 amdgpu_ring_write(ring, 0);
477}
478
479/**
480 * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
481 *
482 * @ring: amdgpu_ring pointer
483 *
484 * Emits an hdp invalidate.
485 */
486static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
487{
488 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
489 amdgpu_ring_write(ring, 1);
490}
491
492/**
493 * uvd_v4_2_ring_test_ring - register write test 467 * uvd_v4_2_ring_test_ring - register write test
494 * 468 *
495 * @ring: amdgpu_ring pointer 469 * @ring: amdgpu_ring pointer
@@ -765,14 +739,10 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
765 .set_wptr = uvd_v4_2_ring_set_wptr, 739 .set_wptr = uvd_v4_2_ring_set_wptr,
766 .parse_cs = amdgpu_uvd_ring_parse_cs, 740 .parse_cs = amdgpu_uvd_ring_parse_cs,
767 .emit_frame_size = 741 .emit_frame_size =
768 2 + /* uvd_v4_2_ring_emit_hdp_flush */
769 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
770 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 742 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
771 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 743 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
772 .emit_ib = uvd_v4_2_ring_emit_ib, 744 .emit_ib = uvd_v4_2_ring_emit_ib,
773 .emit_fence = uvd_v4_2_ring_emit_fence, 745 .emit_fence = uvd_v4_2_ring_emit_fence,
774 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
775 .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
776 .test_ring = uvd_v4_2_ring_test_ring, 746 .test_ring = uvd_v4_2_ring_test_ring,
777 .test_ib = amdgpu_uvd_ring_test_ib, 747 .test_ib = amdgpu_uvd_ring_test_ib,
778 .insert_nop = amdgpu_ring_insert_nop, 748 .insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index c1fe30cdba32..6445d55e7d5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -479,32 +479,6 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
479} 479}
480 480
481/** 481/**
482 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
483 *
484 * @ring: amdgpu_ring pointer
485 *
486 * Emits an hdp flush.
487 */
488static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
489{
490 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
491 amdgpu_ring_write(ring, 0);
492}
493
494/**
495 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
496 *
497 * @ring: amdgpu_ring pointer
498 *
499 * Emits an hdp invalidate.
500 */
501static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
502{
503 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
504 amdgpu_ring_write(ring, 1);
505}
506
507/**
508 * uvd_v5_0_ring_test_ring - register write test 482 * uvd_v5_0_ring_test_ring - register write test
509 * 483 *
510 * @ring: amdgpu_ring pointer 484 * @ring: amdgpu_ring pointer
@@ -873,14 +847,10 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
873 .set_wptr = uvd_v5_0_ring_set_wptr, 847 .set_wptr = uvd_v5_0_ring_set_wptr,
874 .parse_cs = amdgpu_uvd_ring_parse_cs, 848 .parse_cs = amdgpu_uvd_ring_parse_cs,
875 .emit_frame_size = 849 .emit_frame_size =
876 2 + /* uvd_v5_0_ring_emit_hdp_flush */
877 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
878 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ 850 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
879 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ 851 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
880 .emit_ib = uvd_v5_0_ring_emit_ib, 852 .emit_ib = uvd_v5_0_ring_emit_ib,
881 .emit_fence = uvd_v5_0_ring_emit_fence, 853 .emit_fence = uvd_v5_0_ring_emit_fence,
882 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
883 .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
884 .test_ring = uvd_v5_0_ring_test_ring, 854 .test_ring = uvd_v5_0_ring_test_ring,
885 .test_ib = amdgpu_uvd_ring_test_ib, 855 .test_ib = amdgpu_uvd_ring_test_ib,
886 .insert_nop = amdgpu_ring_insert_nop, 856 .insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index b2bfedaf57f1..f26f515db2fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -964,32 +964,6 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
964} 964}
965 965
966/** 966/**
967 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
968 *
969 * @ring: amdgpu_ring pointer
970 *
971 * Emits an hdp flush.
972 */
973static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
974{
975 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
976 amdgpu_ring_write(ring, 0);
977}
978
979/**
980 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
981 *
982 * @ring: amdgpu_ring pointer
983 *
984 * Emits an hdp invalidate.
985 */
986static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
987{
988 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
989 amdgpu_ring_write(ring, 1);
990}
991
992/**
993 * uvd_v6_0_ring_test_ring - register write test 967 * uvd_v6_0_ring_test_ring - register write test
994 * 968 *
995 * @ring: amdgpu_ring pointer 969 * @ring: amdgpu_ring pointer
@@ -1072,29 +1046,21 @@ static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1072 amdgpu_ring_write(ring, ib->length_dw); 1046 amdgpu_ring_write(ring, ib->length_dw);
1073} 1047}
1074 1048
1075static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1049static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1076 unsigned vmid, uint64_t pd_addr) 1050 uint32_t reg, uint32_t val)
1077{ 1051{
1078 uint32_t reg;
1079
1080 if (vmid < 8)
1081 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
1082 else
1083 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
1084
1085 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1052 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1086 amdgpu_ring_write(ring, reg << 2); 1053 amdgpu_ring_write(ring, reg << 2);
1087 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1054 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1088 amdgpu_ring_write(ring, pd_addr >> 12); 1055 amdgpu_ring_write(ring, val);
1089 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 1056 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1090 amdgpu_ring_write(ring, 0x8); 1057 amdgpu_ring_write(ring, 0x8);
1058}
1091 1059
1092 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1060static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1093 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 1061 unsigned vmid, uint64_t pd_addr)
1094 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1062{
1095 amdgpu_ring_write(ring, 1 << vmid); 1063 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1096 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1097 amdgpu_ring_write(ring, 0x8);
1098 1064
1099 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1065 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1100 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 1066 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
@@ -1140,7 +1106,7 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1140} 1106}
1141 1107
1142static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1108static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1143 unsigned int vmid, uint64_t pd_addr) 1109 unsigned int vmid, uint64_t pd_addr)
1144{ 1110{
1145 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); 1111 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1146 amdgpu_ring_write(ring, vmid); 1112 amdgpu_ring_write(ring, vmid);
@@ -1562,21 +1528,19 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1562 .set_wptr = uvd_v6_0_ring_set_wptr, 1528 .set_wptr = uvd_v6_0_ring_set_wptr,
1563 .parse_cs = amdgpu_uvd_ring_parse_cs, 1529 .parse_cs = amdgpu_uvd_ring_parse_cs,
1564 .emit_frame_size = 1530 .emit_frame_size =
1565 2 + /* uvd_v6_0_ring_emit_hdp_flush */ 1531 6 + 6 + /* hdp flush / invalidate */
1566 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1567 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1532 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1568 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ 1533 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1569 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1534 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1570 .emit_ib = uvd_v6_0_ring_emit_ib, 1535 .emit_ib = uvd_v6_0_ring_emit_ib,
1571 .emit_fence = uvd_v6_0_ring_emit_fence, 1536 .emit_fence = uvd_v6_0_ring_emit_fence,
1572 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1573 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1574 .test_ring = uvd_v6_0_ring_test_ring, 1537 .test_ring = uvd_v6_0_ring_test_ring,
1575 .test_ib = amdgpu_uvd_ring_test_ib, 1538 .test_ib = amdgpu_uvd_ring_test_ib,
1576 .insert_nop = amdgpu_ring_insert_nop, 1539 .insert_nop = amdgpu_ring_insert_nop,
1577 .pad_ib = amdgpu_ring_generic_pad_ib, 1540 .pad_ib = amdgpu_ring_generic_pad_ib,
1578 .begin_use = amdgpu_uvd_ring_begin_use, 1541 .begin_use = amdgpu_uvd_ring_begin_use,
1579 .end_use = amdgpu_uvd_ring_end_use, 1542 .end_use = amdgpu_uvd_ring_end_use,
1543 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1580}; 1544};
1581 1545
1582static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { 1546static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1588,24 +1552,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1588 .get_wptr = uvd_v6_0_ring_get_wptr, 1552 .get_wptr = uvd_v6_0_ring_get_wptr,
1589 .set_wptr = uvd_v6_0_ring_set_wptr, 1553 .set_wptr = uvd_v6_0_ring_set_wptr,
1590 .emit_frame_size = 1554 .emit_frame_size =
1591 2 + /* uvd_v6_0_ring_emit_hdp_flush */ 1555 6 + 6 + /* hdp flush / invalidate */
1592 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1593 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1556 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1594 20 + /* uvd_v6_0_ring_emit_vm_flush */ 1557 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1595 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ 1558 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1596 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1559 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1597 .emit_ib = uvd_v6_0_ring_emit_ib, 1560 .emit_ib = uvd_v6_0_ring_emit_ib,
1598 .emit_fence = uvd_v6_0_ring_emit_fence, 1561 .emit_fence = uvd_v6_0_ring_emit_fence,
1599 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, 1562 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1600 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, 1563 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1601 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1602 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1603 .test_ring = uvd_v6_0_ring_test_ring, 1564 .test_ring = uvd_v6_0_ring_test_ring,
1604 .test_ib = amdgpu_uvd_ring_test_ib, 1565 .test_ib = amdgpu_uvd_ring_test_ib,
1605 .insert_nop = amdgpu_ring_insert_nop, 1566 .insert_nop = amdgpu_ring_insert_nop,
1606 .pad_ib = amdgpu_ring_generic_pad_ib, 1567 .pad_ib = amdgpu_ring_generic_pad_ib,
1607 .begin_use = amdgpu_uvd_ring_begin_use, 1568 .begin_use = amdgpu_uvd_ring_begin_use,
1608 .end_use = amdgpu_uvd_ring_end_use, 1569 .end_use = amdgpu_uvd_ring_end_use,
1570 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1609}; 1571};
1610 1572
1611static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { 1573static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
@@ -1618,7 +1580,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1618 .set_wptr = uvd_v6_0_enc_ring_set_wptr, 1580 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1619 .emit_frame_size = 1581 .emit_frame_size =
1620 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ 1582 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1621 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */ 1583 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1622 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ 1584 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1623 1, /* uvd_v6_0_enc_ring_insert_end */ 1585 1, /* uvd_v6_0_enc_ring_insert_end */
1624 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ 1586 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 6b95f4f344b5..eddc57f3b72a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -25,6 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "amdgpu_uvd.h" 27#include "amdgpu_uvd.h"
28#include "soc15.h"
28#include "soc15d.h" 29#include "soc15d.h"
29#include "soc15_common.h" 30#include "soc15_common.h"
30#include "mmsch_v1_0.h" 31#include "mmsch_v1_0.h"
@@ -389,13 +390,13 @@ static int uvd_v7_0_sw_init(void *handle)
389 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 390 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
390 391
391 /* UVD TRAP */ 392 /* UVD TRAP */
392 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq); 393 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
393 if (r) 394 if (r)
394 return r; 395 return r;
395 396
396 /* UVD ENC TRAP */ 397 /* UVD ENC TRAP */
397 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 398 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
398 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq); 399 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
399 if (r) 400 if (r)
400 return r; 401 return r;
401 } 402 }
@@ -1135,37 +1136,6 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1135} 1136}
1136 1137
1137/** 1138/**
1138 * uvd_v7_0_ring_emit_hdp_flush - emit an hdp flush
1139 *
1140 * @ring: amdgpu_ring pointer
1141 *
1142 * Emits an hdp flush.
1143 */
1144static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1145{
1146 struct amdgpu_device *adev = ring->adev;
1147
1148 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
1149 mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
1150 amdgpu_ring_write(ring, 0);
1151}
1152
1153/**
1154 * uvd_v7_0_ring_hdp_invalidate - emit an hdp invalidate
1155 *
1156 * @ring: amdgpu_ring pointer
1157 *
1158 * Emits an hdp invalidate.
1159 */
1160static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
1161{
1162 struct amdgpu_device *adev = ring->adev;
1163
1164 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
1165 amdgpu_ring_write(ring, 1);
1166}
1167
1168/**
1169 * uvd_v7_0_ring_test_ring - register write test 1139 * uvd_v7_0_ring_test_ring - register write test
1170 * 1140 *
1171 * @ring: amdgpu_ring pointer 1141 * @ring: amdgpu_ring pointer
@@ -1255,33 +1225,33 @@ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1255 amdgpu_ring_write(ring, ib->length_dw); 1225 amdgpu_ring_write(ring, ib->length_dw);
1256} 1226}
1257 1227
1258static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring, 1228static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1259 uint32_t data0, uint32_t data1) 1229 uint32_t reg, uint32_t val)
1260{ 1230{
1261 struct amdgpu_device *adev = ring->adev; 1231 struct amdgpu_device *adev = ring->adev;
1262 1232
1263 amdgpu_ring_write(ring, 1233 amdgpu_ring_write(ring,
1264 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1234 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1265 amdgpu_ring_write(ring, data0); 1235 amdgpu_ring_write(ring, reg << 2);
1266 amdgpu_ring_write(ring, 1236 amdgpu_ring_write(ring,
1267 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1237 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1268 amdgpu_ring_write(ring, data1); 1238 amdgpu_ring_write(ring, val);
1269 amdgpu_ring_write(ring, 1239 amdgpu_ring_write(ring,
1270 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1240 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1271 amdgpu_ring_write(ring, 8); 1241 amdgpu_ring_write(ring, 8);
1272} 1242}
1273 1243
1274static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring, 1244static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1275 uint32_t data0, uint32_t data1, uint32_t mask) 1245 uint32_t val, uint32_t mask)
1276{ 1246{
1277 struct amdgpu_device *adev = ring->adev; 1247 struct amdgpu_device *adev = ring->adev;
1278 1248
1279 amdgpu_ring_write(ring, 1249 amdgpu_ring_write(ring,
1280 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1250 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1281 amdgpu_ring_write(ring, data0); 1251 amdgpu_ring_write(ring, reg << 2);
1282 amdgpu_ring_write(ring, 1252 amdgpu_ring_write(ring,
1283 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1253 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1284 amdgpu_ring_write(ring, data1); 1254 amdgpu_ring_write(ring, val);
1285 amdgpu_ring_write(ring, 1255 amdgpu_ring_write(ring,
1286 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); 1256 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1287 amdgpu_ring_write(ring, mask); 1257 amdgpu_ring_write(ring, mask);
@@ -1294,37 +1264,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1294 unsigned vmid, uint64_t pd_addr) 1264 unsigned vmid, uint64_t pd_addr)
1295{ 1265{
1296 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1266 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1297 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
1298 uint64_t flags = AMDGPU_PTE_VALID;
1299 unsigned eng = ring->vm_inv_eng;
1300 uint32_t data0, data1, mask; 1267 uint32_t data0, data1, mask;
1301 1268
1302 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 1269 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1303 pd_addr |= flags;
1304
1305 data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
1306 data1 = upper_32_bits(pd_addr);
1307 uvd_v7_0_vm_reg_write(ring, data0, data1);
1308
1309 data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
1310 data1 = lower_32_bits(pd_addr);
1311 uvd_v7_0_vm_reg_write(ring, data0, data1);
1312 1270
1313 data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; 1271 /* wait for reg writes */
1272 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1314 data1 = lower_32_bits(pd_addr); 1273 data1 = lower_32_bits(pd_addr);
1315 mask = 0xffffffff; 1274 mask = 0xffffffff;
1316 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); 1275 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1317
1318 /* flush TLB */
1319 data0 = (hub->vm_inv_eng0_req + eng) << 2;
1320 data1 = req;
1321 uvd_v7_0_vm_reg_write(ring, data0, data1);
1322
1323 /* wait for flush */
1324 data0 = (hub->vm_inv_eng0_ack + eng) << 2;
1325 data1 = 1 << vmid;
1326 mask = 1 << vmid;
1327 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
1328} 1276}
1329 1277
1330static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 1278static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -1342,40 +1290,34 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1342 amdgpu_ring_write(ring, HEVC_ENC_CMD_END); 1290 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1343} 1291}
1344 1292
1293static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1294 uint32_t reg, uint32_t val,
1295 uint32_t mask)
1296{
1297 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1298 amdgpu_ring_write(ring, reg << 2);
1299 amdgpu_ring_write(ring, mask);
1300 amdgpu_ring_write(ring, val);
1301}
1302
1345static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1303static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1346 unsigned int vmid, uint64_t pd_addr) 1304 unsigned int vmid, uint64_t pd_addr)
1347{ 1305{
1348 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1306 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1349 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
1350 uint64_t flags = AMDGPU_PTE_VALID;
1351 unsigned eng = ring->vm_inv_eng;
1352 1307
1353 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 1308 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1354 pd_addr |= flags;
1355 1309
1356 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); 1310 /* wait for reg writes */
1357 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); 1311 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1358 amdgpu_ring_write(ring, upper_32_bits(pd_addr)); 1312 lower_32_bits(pd_addr), 0xffffffff);
1359 1313}
1360 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1361 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
1362 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1363
1364 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1365 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
1366 amdgpu_ring_write(ring, 0xffffffff);
1367 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1368 1314
1369 /* flush TLB */ 1315static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1316 uint32_t reg, uint32_t val)
1317{
1370 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); 1318 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1371 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2); 1319 amdgpu_ring_write(ring, reg << 2);
1372 amdgpu_ring_write(ring, req); 1320 amdgpu_ring_write(ring, val);
1373
1374 /* wait for flush */
1375 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1376 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
1377 amdgpu_ring_write(ring, 1 << vmid);
1378 amdgpu_ring_write(ring, 1 << vmid);
1379} 1321}
1380 1322
1381#if 0 1323#if 0
@@ -1712,22 +1654,23 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1712 .get_wptr = uvd_v7_0_ring_get_wptr, 1654 .get_wptr = uvd_v7_0_ring_get_wptr,
1713 .set_wptr = uvd_v7_0_ring_set_wptr, 1655 .set_wptr = uvd_v7_0_ring_set_wptr,
1714 .emit_frame_size = 1656 .emit_frame_size =
1715 2 + /* uvd_v7_0_ring_emit_hdp_flush */ 1657 6 + 6 + /* hdp flush / invalidate */
1716 2 + /* uvd_v7_0_ring_emit_hdp_invalidate */ 1658 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1717 34 + /* uvd_v7_0_ring_emit_vm_flush */ 1659 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1660 8 + /* uvd_v7_0_ring_emit_vm_flush */
1718 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */ 1661 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1719 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */ 1662 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1720 .emit_ib = uvd_v7_0_ring_emit_ib, 1663 .emit_ib = uvd_v7_0_ring_emit_ib,
1721 .emit_fence = uvd_v7_0_ring_emit_fence, 1664 .emit_fence = uvd_v7_0_ring_emit_fence,
1722 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush, 1665 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1723 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1724 .emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
1725 .test_ring = uvd_v7_0_ring_test_ring, 1666 .test_ring = uvd_v7_0_ring_test_ring,
1726 .test_ib = amdgpu_uvd_ring_test_ib, 1667 .test_ib = amdgpu_uvd_ring_test_ib,
1727 .insert_nop = uvd_v7_0_ring_insert_nop, 1668 .insert_nop = uvd_v7_0_ring_insert_nop,
1728 .pad_ib = amdgpu_ring_generic_pad_ib, 1669 .pad_ib = amdgpu_ring_generic_pad_ib,
1729 .begin_use = amdgpu_uvd_ring_begin_use, 1670 .begin_use = amdgpu_uvd_ring_begin_use,
1730 .end_use = amdgpu_uvd_ring_end_use, 1671 .end_use = amdgpu_uvd_ring_end_use,
1672 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1673 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1731}; 1674};
1732 1675
1733static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { 1676static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
@@ -1740,7 +1683,10 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1740 .get_wptr = uvd_v7_0_enc_ring_get_wptr, 1683 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1741 .set_wptr = uvd_v7_0_enc_ring_set_wptr, 1684 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1742 .emit_frame_size = 1685 .emit_frame_size =
1743 17 + /* uvd_v7_0_enc_ring_emit_vm_flush */ 1686 3 + 3 + /* hdp flush / invalidate */
1687 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1688 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1689 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1744 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */ 1690 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1745 1, /* uvd_v7_0_enc_ring_insert_end */ 1691 1, /* uvd_v7_0_enc_ring_insert_end */
1746 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */ 1692 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
@@ -1754,6 +1700,8 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1754 .pad_ib = amdgpu_ring_generic_pad_ib, 1700 .pad_ib = amdgpu_ring_generic_pad_ib,
1755 .begin_use = amdgpu_uvd_ring_begin_use, 1701 .begin_use = amdgpu_uvd_ring_begin_use,
1756 .end_use = amdgpu_uvd_ring_end_use, 1702 .end_use = amdgpu_uvd_ring_end_use,
1703 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1704 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1757}; 1705};
1758 1706
1759static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) 1707static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index a5355eb689f1..428d1928e44e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -844,7 +844,7 @@ static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
844} 844}
845 845
846static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, 846static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
847 unsigned int vmid, uint64_t pd_addr) 847 unsigned int vmid, uint64_t pd_addr)
848{ 848{
849 amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); 849 amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
850 amdgpu_ring_write(ring, vmid); 850 amdgpu_ring_write(ring, vmid);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 7cf2eef68cf2..73fd48d6c756 100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -28,6 +28,7 @@
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include "amdgpu.h" 29#include "amdgpu.h"
30#include "amdgpu_vce.h" 30#include "amdgpu_vce.h"
31#include "soc15.h"
31#include "soc15d.h" 32#include "soc15d.h"
32#include "soc15_common.h" 33#include "soc15_common.h"
33#include "mmsch_v1_0.h" 34#include "mmsch_v1_0.h"
@@ -419,7 +420,7 @@ static int vce_v4_0_sw_init(void *handle)
419 unsigned size; 420 unsigned size;
420 int r, i; 421 int r, i;
421 422
422 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq); 423 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
423 if (r) 424 if (r)
424 return r; 425 return r;
425 426
@@ -964,40 +965,33 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
964 amdgpu_ring_write(ring, VCE_CMD_END); 965 amdgpu_ring_write(ring, VCE_CMD_END);
965} 966}
966 967
968static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
969 uint32_t val, uint32_t mask)
970{
971 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
972 amdgpu_ring_write(ring, reg << 2);
973 amdgpu_ring_write(ring, mask);
974 amdgpu_ring_write(ring, val);
975}
976
967static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, 977static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
968 unsigned int vmid, uint64_t pd_addr) 978 unsigned int vmid, uint64_t pd_addr)
969{ 979{
970 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 980 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
971 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
972 uint64_t flags = AMDGPU_PTE_VALID;
973 unsigned eng = ring->vm_inv_eng;
974 981
975 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 982 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
976 pd_addr |= flags;
977 983
978 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); 984 /* wait for reg writes */
979 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); 985 vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
980 amdgpu_ring_write(ring, upper_32_bits(pd_addr)); 986 lower_32_bits(pd_addr), 0xffffffff);
981 987}
982 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
983 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
984 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
985
986 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
987 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
988 amdgpu_ring_write(ring, 0xffffffff);
989 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
990 988
991 /* flush TLB */ 989static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
990 uint32_t reg, uint32_t val)
991{
992 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); 992 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
993 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2); 993 amdgpu_ring_write(ring, reg << 2);
994 amdgpu_ring_write(ring, req); 994 amdgpu_ring_write(ring, val);
995
996 /* wait for flush */
997 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
998 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
999 amdgpu_ring_write(ring, 1 << vmid);
1000 amdgpu_ring_write(ring, 1 << vmid);
1001} 995}
1002 996
1003static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, 997static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1069,7 +1063,9 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1069 .set_wptr = vce_v4_0_ring_set_wptr, 1063 .set_wptr = vce_v4_0_ring_set_wptr,
1070 .parse_cs = amdgpu_vce_ring_parse_cs_vm, 1064 .parse_cs = amdgpu_vce_ring_parse_cs_vm,
1071 .emit_frame_size = 1065 .emit_frame_size =
1072 17 + /* vce_v4_0_emit_vm_flush */ 1066 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1067 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1068 4 + /* vce_v4_0_emit_vm_flush */
1073 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */ 1069 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1074 1, /* vce_v4_0_ring_insert_end */ 1070 1, /* vce_v4_0_ring_insert_end */
1075 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */ 1071 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
@@ -1083,6 +1079,8 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1083 .pad_ib = amdgpu_ring_generic_pad_ib, 1079 .pad_ib = amdgpu_ring_generic_pad_ib,
1084 .begin_use = amdgpu_vce_ring_begin_use, 1080 .begin_use = amdgpu_vce_ring_begin_use,
1085 .end_use = amdgpu_vce_ring_end_use, 1081 .end_use = amdgpu_vce_ring_end_use,
1082 .emit_wreg = vce_v4_0_emit_wreg,
1083 .emit_reg_wait = vce_v4_0_emit_reg_wait,
1086}; 1084};
1087 1085
1088static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev) 1086static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index b99e15c43e45..8c132673bc79 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,6 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "amdgpu_vcn.h" 27#include "amdgpu_vcn.h"
28#include "soc15.h"
28#include "soc15d.h" 29#include "soc15d.h"
29#include "soc15_common.h" 30#include "soc15_common.h"
30 31
@@ -74,13 +75,13 @@ static int vcn_v1_0_sw_init(void *handle)
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75 76
76 /* VCN DEC TRAP */ 77 /* VCN DEC TRAP */
77 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq); 78 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
78 if (r) 79 if (r)
79 return r; 80 return r;
80 81
81 /* VCN ENC TRAP */ 82 /* VCN ENC TRAP */
82 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 83 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
83 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, i + 119, 84 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
84 &adev->vcn.irq); 85 &adev->vcn.irq);
85 if (r) 86 if (r)
86 return r; 87 return r;
@@ -809,21 +810,6 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
809} 810}
810 811
811/** 812/**
812 * vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
813 *
814 * @ring: amdgpu_ring pointer
815 *
816 * Emits an hdp invalidate.
817 */
818static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
819{
820 struct amdgpu_device *adev = ring->adev;
821
822 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
823 amdgpu_ring_write(ring, 1);
824}
825
826/**
827 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer 813 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
828 * 814 *
829 * @ring: amdgpu_ring pointer 815 * @ring: amdgpu_ring pointer
@@ -852,33 +838,18 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
852 amdgpu_ring_write(ring, ib->length_dw); 838 amdgpu_ring_write(ring, ib->length_dw);
853} 839}
854 840
855static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring, 841static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
856 uint32_t data0, uint32_t data1) 842 uint32_t reg, uint32_t val,
843 uint32_t mask)
857{ 844{
858 struct amdgpu_device *adev = ring->adev; 845 struct amdgpu_device *adev = ring->adev;
859 846
860 amdgpu_ring_write(ring, 847 amdgpu_ring_write(ring,
861 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 848 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
862 amdgpu_ring_write(ring, data0); 849 amdgpu_ring_write(ring, reg << 2);
863 amdgpu_ring_write(ring, 850 amdgpu_ring_write(ring,
864 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 851 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
865 amdgpu_ring_write(ring, data1); 852 amdgpu_ring_write(ring, val);
866 amdgpu_ring_write(ring,
867 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
868 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
869}
870
871static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
872 uint32_t data0, uint32_t data1, uint32_t mask)
873{
874 struct amdgpu_device *adev = ring->adev;
875
876 amdgpu_ring_write(ring,
877 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
878 amdgpu_ring_write(ring, data0);
879 amdgpu_ring_write(ring,
880 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
881 amdgpu_ring_write(ring, data1);
882 amdgpu_ring_write(ring, 853 amdgpu_ring_write(ring,
883 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); 854 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
884 amdgpu_ring_write(ring, mask); 855 amdgpu_ring_write(ring, mask);
@@ -888,40 +859,34 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
888} 859}
889 860
890static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, 861static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
891 unsigned vmid, uint64_t pd_addr) 862 unsigned vmid, uint64_t pd_addr)
892{ 863{
893 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 864 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
894 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
895 uint64_t flags = AMDGPU_PTE_VALID;
896 unsigned eng = ring->vm_inv_eng;
897 uint32_t data0, data1, mask; 865 uint32_t data0, data1, mask;
898 866
899 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 867 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
900 pd_addr |= flags;
901
902 data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
903 data1 = upper_32_bits(pd_addr);
904 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
905
906 data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
907 data1 = lower_32_bits(pd_addr);
908 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
909 868
910 data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; 869 /* wait for register write */
870 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
911 data1 = lower_32_bits(pd_addr); 871 data1 = lower_32_bits(pd_addr);
912 mask = 0xffffffff; 872 mask = 0xffffffff;
913 vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); 873 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
914 874}
915 /* flush TLB */ 875
916 data0 = (hub->vm_inv_eng0_req + eng) << 2; 876static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
917 data1 = req; 877 uint32_t reg, uint32_t val)
918 vcn_v1_0_dec_vm_reg_write(ring, data0, data1); 878{
919 879 struct amdgpu_device *adev = ring->adev;
920 /* wait for flush */ 880
921 data0 = (hub->vm_inv_eng0_ack + eng) << 2; 881 amdgpu_ring_write(ring,
922 data1 = 1 << vmid; 882 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
923 mask = 1 << vmid; 883 amdgpu_ring_write(ring, reg << 2);
924 vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); 884 amdgpu_ring_write(ring,
885 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
886 amdgpu_ring_write(ring, val);
887 amdgpu_ring_write(ring,
888 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
889 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
925} 890}
926 891
927/** 892/**
@@ -1020,43 +985,34 @@ static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1020 amdgpu_ring_write(ring, ib->length_dw); 985 amdgpu_ring_write(ring, ib->length_dw);
1021} 986}
1022 987
988static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
989 uint32_t reg, uint32_t val,
990 uint32_t mask)
991{
992 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
993 amdgpu_ring_write(ring, reg << 2);
994 amdgpu_ring_write(ring, mask);
995 amdgpu_ring_write(ring, val);
996}
997
1023static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 998static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1024 unsigned int vmid, uint64_t pd_addr) 999 unsigned int vmid, uint64_t pd_addr)
1025{ 1000{
1026 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1001 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1027 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
1028 uint64_t flags = AMDGPU_PTE_VALID;
1029 unsigned eng = ring->vm_inv_eng;
1030
1031 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
1032 pd_addr |= flags;
1033 1002
1034 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); 1003 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1035 amdgpu_ring_write(ring,
1036 (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
1037 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
1038
1039 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1040 amdgpu_ring_write(ring,
1041 (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
1042 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1043 1004
1044 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); 1005 /* wait for reg writes */
1045 amdgpu_ring_write(ring, 1006 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1046 (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); 1007 lower_32_bits(pd_addr), 0xffffffff);
1047 amdgpu_ring_write(ring, 0xffffffff); 1008}
1048 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1049 1009
1050 /* flush TLB */ 1010static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1011 uint32_t reg, uint32_t val)
1012{
1051 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); 1013 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1052 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2); 1014 amdgpu_ring_write(ring, reg << 2);
1053 amdgpu_ring_write(ring, req); 1015 amdgpu_ring_write(ring, val);
1054
1055 /* wait for flush */
1056 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1057 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
1058 amdgpu_ring_write(ring, 1 << vmid);
1059 amdgpu_ring_write(ring, 1 << vmid);
1060} 1016}
1061 1017
1062static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, 1018static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1133,15 +1089,16 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1133 .get_wptr = vcn_v1_0_dec_ring_get_wptr, 1089 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1134 .set_wptr = vcn_v1_0_dec_ring_set_wptr, 1090 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1135 .emit_frame_size = 1091 .emit_frame_size =
1136 2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */ 1092 6 + 6 + /* hdp invalidate / flush */
1137 34 + /* vcn_v1_0_dec_ring_emit_vm_flush */ 1093 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1094 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1095 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1138 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */ 1096 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1139 6, 1097 6,
1140 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */ 1098 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1141 .emit_ib = vcn_v1_0_dec_ring_emit_ib, 1099 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1142 .emit_fence = vcn_v1_0_dec_ring_emit_fence, 1100 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1143 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush, 1101 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1144 .emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
1145 .test_ring = amdgpu_vcn_dec_ring_test_ring, 1102 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1146 .test_ib = amdgpu_vcn_dec_ring_test_ib, 1103 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1147 .insert_nop = vcn_v1_0_ring_insert_nop, 1104 .insert_nop = vcn_v1_0_ring_insert_nop,
@@ -1150,6 +1107,8 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1150 .pad_ib = amdgpu_ring_generic_pad_ib, 1107 .pad_ib = amdgpu_ring_generic_pad_ib,
1151 .begin_use = amdgpu_vcn_ring_begin_use, 1108 .begin_use = amdgpu_vcn_ring_begin_use,
1152 .end_use = amdgpu_vcn_ring_end_use, 1109 .end_use = amdgpu_vcn_ring_end_use,
1110 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1111 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1153}; 1112};
1154 1113
1155static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { 1114static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
@@ -1162,7 +1121,9 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1162 .get_wptr = vcn_v1_0_enc_ring_get_wptr, 1121 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1163 .set_wptr = vcn_v1_0_enc_ring_set_wptr, 1122 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1164 .emit_frame_size = 1123 .emit_frame_size =
1165 17 + /* vcn_v1_0_enc_ring_emit_vm_flush */ 1124 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1125 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1126 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1166 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */ 1127 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1167 1, /* vcn_v1_0_enc_ring_insert_end */ 1128 1, /* vcn_v1_0_enc_ring_insert_end */
1168 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */ 1129 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
@@ -1176,6 +1137,8 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1176 .pad_ib = amdgpu_ring_generic_pad_ib, 1137 .pad_ib = amdgpu_ring_generic_pad_ib,
1177 .begin_use = amdgpu_vcn_ring_begin_use, 1138 .begin_use = amdgpu_vcn_ring_begin_use,
1178 .end_use = amdgpu_vcn_ring_end_use, 1139 .end_use = amdgpu_vcn_ring_end_use,
1140 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1141 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1179}; 1142};
1180 1143
1181static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) 1144static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index ee14d78be2a9..5ae5ed2e62d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -245,8 +245,8 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
245 * some faults get cleared. 245 * some faults get cleared.
246 */ 246 */
247 switch (dw0 & 0xff) { 247 switch (dw0 & 0xff) {
248 case AMDGPU_IH_CLIENTID_VMC: 248 case SOC15_IH_CLIENTID_VMC:
249 case AMDGPU_IH_CLIENTID_UTCL2: 249 case SOC15_IH_CLIENTID_UTCL2:
250 break; 250 break;
251 default: 251 default:
252 /* Not a VM fault */ 252 /* Not a VM fault */
@@ -333,7 +333,7 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
333 entry->vmid_src = (dw[0] >> 31); 333 entry->vmid_src = (dw[0] >> 31);
334 entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); 334 entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
335 entry->timestamp_src = dw[2] >> 31; 335 entry->timestamp_src = dw[2] >> 31;
336 entry->pas_id = dw[3] & 0xffff; 336 entry->pasid = dw[3] & 0xffff;
337 entry->pasid_src = dw[3] >> 31; 337 entry->pasid_src = dw[3] >> 31;
338 entry->src_data[0] = dw[4]; 338 entry->src_data[0] = dw[4];
339 entry->src_data[1] = dw[5]; 339 entry->src_data[1] = dw[5];
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index b7bdd04793d6..4c45db7f1157 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -24,7 +24,8 @@
24#include "soc15.h" 24#include "soc15.h"
25 25
26#include "soc15_common.h" 26#include "soc15_common.h"
27#include "soc15ip.h" 27#include "soc15_hw_ip.h"
28#include "vega10_ip_offset.h"
28 29
29int vega10_reg_base_init(struct amdgpu_device *adev) 30int vega10_reg_base_init(struct amdgpu_device *adev)
30{ 31{
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 1e3e05a11f7a..e7fb165cc9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -71,7 +71,6 @@
71#include "uvd_v5_0.h" 71#include "uvd_v5_0.h"
72#include "uvd_v6_0.h" 72#include "uvd_v6_0.h"
73#include "vce_v3_0.h" 73#include "vce_v3_0.h"
74#include "amdgpu_powerplay.h"
75#if defined(CONFIG_DRM_AMD_ACP) 74#if defined(CONFIG_DRM_AMD_ACP)
76#include "amdgpu_acp.h" 75#include "amdgpu_acp.h"
77#endif 76#endif
@@ -856,6 +855,27 @@ static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
856 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 855 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
857} 856}
858 857
858static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
859{
860 if (!ring || !ring->funcs->emit_wreg) {
861 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
862 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
863 } else {
864 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
865 }
866}
867
868static void vi_invalidate_hdp(struct amdgpu_device *adev,
869 struct amdgpu_ring *ring)
870{
871 if (!ring || !ring->funcs->emit_wreg) {
872 WREG32(mmHDP_DEBUG0, 1);
873 RREG32(mmHDP_DEBUG0);
874 } else {
875 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
876 }
877}
878
859static const struct amdgpu_asic_funcs vi_asic_funcs = 879static const struct amdgpu_asic_funcs vi_asic_funcs =
860{ 880{
861 .read_disabled_bios = &vi_read_disabled_bios, 881 .read_disabled_bios = &vi_read_disabled_bios,
@@ -867,6 +887,8 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
867 .set_uvd_clocks = &vi_set_uvd_clocks, 887 .set_uvd_clocks = &vi_set_uvd_clocks,
868 .set_vce_clocks = &vi_set_vce_clocks, 888 .set_vce_clocks = &vi_set_vce_clocks,
869 .get_config_memsize = &vi_get_config_memsize, 889 .get_config_memsize = &vi_get_config_memsize,
890 .flush_hdp = &vi_flush_hdp,
891 .invalidate_hdp = &vi_invalidate_hdp,
870}; 892};
871 893
872#define CZ_REV_BRISTOL(rev) \ 894#define CZ_REV_BRISTOL(rev) \
@@ -1074,11 +1096,6 @@ static int vi_common_early_init(void *handle)
1074 xgpu_vi_mailbox_set_irq_funcs(adev); 1096 xgpu_vi_mailbox_set_irq_funcs(adev);
1075 } 1097 }
1076 1098
1077 /* vi use smc load by default */
1078 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1079
1080 amdgpu_device_get_pcie_info(adev);
1081
1082 return 0; 1099 return 0;
1083} 1100}
1084 1101
@@ -1493,7 +1510,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1493 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1510 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1494 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1511 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1495 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1512 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1496 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1513 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1497 if (adev->enable_virtual_display) 1514 if (adev->enable_virtual_display)
1498 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1515 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1499 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1516 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1503,7 +1520,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1503 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1520 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1504 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1521 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1505 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1522 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1506 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1523 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1507 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1524 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1508 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1525 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1509#if defined(CONFIG_DRM_AMD_DC) 1526#if defined(CONFIG_DRM_AMD_DC)
@@ -1523,7 +1540,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1523 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1540 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1524 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1541 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1525 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1542 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1526 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1543 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1527 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1544 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1528 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1545 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1529#if defined(CONFIG_DRM_AMD_DC) 1546#if defined(CONFIG_DRM_AMD_DC)
@@ -1545,7 +1562,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1545 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1562 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1546 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1563 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1547 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1564 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1548 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1565 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1549 if (adev->enable_virtual_display) 1566 if (adev->enable_virtual_display)
1550 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1567 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1551#if defined(CONFIG_DRM_AMD_DC) 1568#if defined(CONFIG_DRM_AMD_DC)
@@ -1563,7 +1580,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1563 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1580 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1564 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1581 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1565 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1582 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1566 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1583 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1567 if (adev->enable_virtual_display) 1584 if (adev->enable_virtual_display)
1568 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1585 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1569#if defined(CONFIG_DRM_AMD_DC) 1586#if defined(CONFIG_DRM_AMD_DC)
@@ -1584,7 +1601,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1584 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1601 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1585 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1602 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1586 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1603 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1587 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1604 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1588 if (adev->enable_virtual_display) 1605 if (adev->enable_virtual_display)
1589 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1606 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1590#if defined(CONFIG_DRM_AMD_DC) 1607#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 575d7aed5d32..0429fe332269 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,6 +24,8 @@
24#ifndef __VI_H__ 24#ifndef __VI_H__
25#define __VI_H__ 25#define __VI_H__
26 26
27#define VI_FLUSH_GPU_TLB_NUM_WREG 3
28
27void vi_srbm_select(struct amdgpu_device *adev, 29void vi_srbm_select(struct amdgpu_device *adev,
28 u32 me, u32 pipe, u32 queue, u32 vmid); 30 u32 me, u32 pipe, u32 queue, u32 vmid);
29int vi_set_ip_blocks(struct amdgpu_device *adev); 31int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index bc5a2945bd2b..ed2f06c9f346 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,7 @@
4 4
5config HSA_AMD 5config HSA_AMD
6 tristate "HSA kernel driver for AMD GPU devices" 6 tristate "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && AMD_IOMMU_V2 && X86_64 7 depends on DRM_AMDGPU && X86_64
8 imply AMD_IOMMU_V2
8 help 9 help
9 Enable this if you want to use HSA features on AMD GPU devices. 10 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index a317e76ffb5e..0d0242240c47 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -37,6 +37,10 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
37 kfd_interrupt.o kfd_events.o cik_event_interrupt.o \ 37 kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
38 kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o 38 kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o
39 39
40ifneq ($(CONFIG_AMD_IOMMU_V2),)
41amdkfd-y += kfd_iommu.o
42endif
43
40amdkfd-$(CONFIG_DEBUG_FS) += kfd_debugfs.o 44amdkfd-$(CONFIG_DEBUG_FS) += kfd_debugfs.o
41 45
42obj-$(CONFIG_HSA_AMD) += amdkfd.o 46obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 62c3d9cd6ef1..6fe24964540b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -901,7 +901,8 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
901 901
902 mutex_unlock(&p->mutex); 902 mutex_unlock(&p->mutex);
903 903
904 if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0) 904 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
905 pdd->qpd.vmid != 0)
905 dev->kfd2kgd->set_scratch_backing_va( 906 dev->kfd2kgd->set_scratch_backing_va(
906 dev->kgd, args->va_addr, pdd->qpd.vmid); 907 dev->kgd, args->va_addr, pdd->qpd.vmid);
907 908
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 2bc2816767a7..7493f47e7fe1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -22,10 +22,10 @@
22 22
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/acpi.h> 24#include <linux/acpi.h>
25#include <linux/amd-iommu.h>
26#include "kfd_crat.h" 25#include "kfd_crat.h"
27#include "kfd_priv.h" 26#include "kfd_priv.h"
28#include "kfd_topology.h" 27#include "kfd_topology.h"
28#include "kfd_iommu.h"
29 29
30/* GPU Processor ID base for dGPUs for which VCRAT needs to be created. 30/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
31 * GPU processor ID are expressed with Bit[31]=1. 31 * GPU processor ID are expressed with Bit[31]=1.
@@ -1037,15 +1037,11 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1037 struct crat_subtype_generic *sub_type_hdr; 1037 struct crat_subtype_generic *sub_type_hdr;
1038 struct crat_subtype_computeunit *cu; 1038 struct crat_subtype_computeunit *cu;
1039 struct kfd_cu_info cu_info; 1039 struct kfd_cu_info cu_info;
1040 struct amd_iommu_device_info iommu_info;
1041 int avail_size = *size; 1040 int avail_size = *size;
1042 uint32_t total_num_of_cu; 1041 uint32_t total_num_of_cu;
1043 int num_of_cache_entries = 0; 1042 int num_of_cache_entries = 0;
1044 int cache_mem_filled = 0; 1043 int cache_mem_filled = 0;
1045 int ret = 0; 1044 int ret = 0;
1046 const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
1047 AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
1048 AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
1049 struct kfd_local_mem_info local_mem_info; 1045 struct kfd_local_mem_info local_mem_info;
1050 1046
1051 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU) 1047 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
@@ -1106,12 +1102,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1106 /* Check if this node supports IOMMU. During parsing this flag will 1102 /* Check if this node supports IOMMU. During parsing this flag will
1107 * translate to HSA_CAP_ATS_PRESENT 1103 * translate to HSA_CAP_ATS_PRESENT
1108 */ 1104 */
1109 iommu_info.flags = 0; 1105 if (!kfd_iommu_check_device(kdev))
1110 if (amd_iommu_device_info(kdev->pdev, &iommu_info) == 0) { 1106 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1111 if ((iommu_info.flags & required_iommu_flags) ==
1112 required_iommu_flags)
1113 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1114 }
1115 1107
1116 crat_table->length += sub_type_hdr->length; 1108 crat_table->length += sub_type_hdr->length;
1117 crat_table->total_entries++; 1109 crat_table->total_entries++;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 3da25f7bda6b..9d4af961c5d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -33,6 +33,7 @@
33#include "kfd_pm4_headers_diq.h" 33#include "kfd_pm4_headers_diq.h"
34#include "kfd_dbgmgr.h" 34#include "kfd_dbgmgr.h"
35#include "kfd_dbgdev.h" 35#include "kfd_dbgdev.h"
36#include "kfd_device_queue_manager.h"
36 37
37static DEFINE_MUTEX(kfd_dbgmgr_mutex); 38static DEFINE_MUTEX(kfd_dbgmgr_mutex);
38 39
@@ -83,7 +84,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
83 } 84 }
84 85
85 /* get actual type of DBGDevice cpsch or not */ 86 /* get actual type of DBGDevice cpsch or not */
86 if (sched_policy == KFD_SCHED_POLICY_NO_HWS) 87 if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
87 type = DBGDEV_TYPE_NODIQ; 88 type = DBGDEV_TYPE_NODIQ;
88 89
89 kfd_dbgdev_init(new_buff->dbgdev, pdev, type); 90 kfd_dbgdev_init(new_buff->dbgdev, pdev, type);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a8fa33a08de3..3346699960dd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -20,7 +20,9 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
23#include <linux/amd-iommu.h> 24#include <linux/amd-iommu.h>
25#endif
24#include <linux/bsearch.h> 26#include <linux/bsearch.h>
25#include <linux/pci.h> 27#include <linux/pci.h>
26#include <linux/slab.h> 28#include <linux/slab.h>
@@ -28,9 +30,12 @@
28#include "kfd_device_queue_manager.h" 30#include "kfd_device_queue_manager.h"
29#include "kfd_pm4_headers_vi.h" 31#include "kfd_pm4_headers_vi.h"
30#include "cwsr_trap_handler_gfx8.asm" 32#include "cwsr_trap_handler_gfx8.asm"
33#include "kfd_iommu.h"
31 34
32#define MQD_SIZE_ALIGNED 768 35#define MQD_SIZE_ALIGNED 768
36static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
33 37
38#ifdef KFD_SUPPORT_IOMMU_V2
34static const struct kfd_device_info kaveri_device_info = { 39static const struct kfd_device_info kaveri_device_info = {
35 .asic_family = CHIP_KAVERI, 40 .asic_family = CHIP_KAVERI,
36 .max_pasid_bits = 16, 41 .max_pasid_bits = 16,
@@ -41,6 +46,8 @@ static const struct kfd_device_info kaveri_device_info = {
41 .num_of_watch_points = 4, 46 .num_of_watch_points = 4,
42 .mqd_size_aligned = MQD_SIZE_ALIGNED, 47 .mqd_size_aligned = MQD_SIZE_ALIGNED,
43 .supports_cwsr = false, 48 .supports_cwsr = false,
49 .needs_iommu_device = true,
50 .needs_pci_atomics = false,
44}; 51};
45 52
46static const struct kfd_device_info carrizo_device_info = { 53static const struct kfd_device_info carrizo_device_info = {
@@ -53,15 +60,125 @@ static const struct kfd_device_info carrizo_device_info = {
53 .num_of_watch_points = 4, 60 .num_of_watch_points = 4,
54 .mqd_size_aligned = MQD_SIZE_ALIGNED, 61 .mqd_size_aligned = MQD_SIZE_ALIGNED,
55 .supports_cwsr = true, 62 .supports_cwsr = true,
63 .needs_iommu_device = true,
64 .needs_pci_atomics = false,
56}; 65};
66#endif
67
68static const struct kfd_device_info hawaii_device_info = {
69 .asic_family = CHIP_HAWAII,
70 .max_pasid_bits = 16,
71 /* max num of queues for KV.TODO should be a dynamic value */
72 .max_no_of_hqd = 24,
73 .ih_ring_entry_size = 4 * sizeof(uint32_t),
74 .event_interrupt_class = &event_interrupt_class_cik,
75 .num_of_watch_points = 4,
76 .mqd_size_aligned = MQD_SIZE_ALIGNED,
77 .supports_cwsr = false,
78 .needs_iommu_device = false,
79 .needs_pci_atomics = false,
80};
81
82static const struct kfd_device_info tonga_device_info = {
83 .asic_family = CHIP_TONGA,
84 .max_pasid_bits = 16,
85 .max_no_of_hqd = 24,
86 .ih_ring_entry_size = 4 * sizeof(uint32_t),
87 .event_interrupt_class = &event_interrupt_class_cik,
88 .num_of_watch_points = 4,
89 .mqd_size_aligned = MQD_SIZE_ALIGNED,
90 .supports_cwsr = false,
91 .needs_iommu_device = false,
92 .needs_pci_atomics = true,
93};
94
95static const struct kfd_device_info tonga_vf_device_info = {
96 .asic_family = CHIP_TONGA,
97 .max_pasid_bits = 16,
98 .max_no_of_hqd = 24,
99 .ih_ring_entry_size = 4 * sizeof(uint32_t),
100 .event_interrupt_class = &event_interrupt_class_cik,
101 .num_of_watch_points = 4,
102 .mqd_size_aligned = MQD_SIZE_ALIGNED,
103 .supports_cwsr = false,
104 .needs_iommu_device = false,
105 .needs_pci_atomics = false,
106};
107
108static const struct kfd_device_info fiji_device_info = {
109 .asic_family = CHIP_FIJI,
110 .max_pasid_bits = 16,
111 .max_no_of_hqd = 24,
112 .ih_ring_entry_size = 4 * sizeof(uint32_t),
113 .event_interrupt_class = &event_interrupt_class_cik,
114 .num_of_watch_points = 4,
115 .mqd_size_aligned = MQD_SIZE_ALIGNED,
116 .supports_cwsr = true,
117 .needs_iommu_device = false,
118 .needs_pci_atomics = true,
119};
120
121static const struct kfd_device_info fiji_vf_device_info = {
122 .asic_family = CHIP_FIJI,
123 .max_pasid_bits = 16,
124 .max_no_of_hqd = 24,
125 .ih_ring_entry_size = 4 * sizeof(uint32_t),
126 .event_interrupt_class = &event_interrupt_class_cik,
127 .num_of_watch_points = 4,
128 .mqd_size_aligned = MQD_SIZE_ALIGNED,
129 .supports_cwsr = true,
130 .needs_iommu_device = false,
131 .needs_pci_atomics = false,
132};
133
134
135static const struct kfd_device_info polaris10_device_info = {
136 .asic_family = CHIP_POLARIS10,
137 .max_pasid_bits = 16,
138 .max_no_of_hqd = 24,
139 .ih_ring_entry_size = 4 * sizeof(uint32_t),
140 .event_interrupt_class = &event_interrupt_class_cik,
141 .num_of_watch_points = 4,
142 .mqd_size_aligned = MQD_SIZE_ALIGNED,
143 .supports_cwsr = true,
144 .needs_iommu_device = false,
145 .needs_pci_atomics = true,
146};
147
148static const struct kfd_device_info polaris10_vf_device_info = {
149 .asic_family = CHIP_POLARIS10,
150 .max_pasid_bits = 16,
151 .max_no_of_hqd = 24,
152 .ih_ring_entry_size = 4 * sizeof(uint32_t),
153 .event_interrupt_class = &event_interrupt_class_cik,
154 .num_of_watch_points = 4,
155 .mqd_size_aligned = MQD_SIZE_ALIGNED,
156 .supports_cwsr = true,
157 .needs_iommu_device = false,
158 .needs_pci_atomics = false,
159};
160
161static const struct kfd_device_info polaris11_device_info = {
162 .asic_family = CHIP_POLARIS11,
163 .max_pasid_bits = 16,
164 .max_no_of_hqd = 24,
165 .ih_ring_entry_size = 4 * sizeof(uint32_t),
166 .event_interrupt_class = &event_interrupt_class_cik,
167 .num_of_watch_points = 4,
168 .mqd_size_aligned = MQD_SIZE_ALIGNED,
169 .supports_cwsr = true,
170 .needs_iommu_device = false,
171 .needs_pci_atomics = true,
172};
173
57 174
58struct kfd_deviceid { 175struct kfd_deviceid {
59 unsigned short did; 176 unsigned short did;
60 const struct kfd_device_info *device_info; 177 const struct kfd_device_info *device_info;
61}; 178};
62 179
63/* Please keep this sorted by increasing device id. */
64static const struct kfd_deviceid supported_devices[] = { 180static const struct kfd_deviceid supported_devices[] = {
181#ifdef KFD_SUPPORT_IOMMU_V2
65 { 0x1304, &kaveri_device_info }, /* Kaveri */ 182 { 0x1304, &kaveri_device_info }, /* Kaveri */
66 { 0x1305, &kaveri_device_info }, /* Kaveri */ 183 { 0x1305, &kaveri_device_info }, /* Kaveri */
67 { 0x1306, &kaveri_device_info }, /* Kaveri */ 184 { 0x1306, &kaveri_device_info }, /* Kaveri */
@@ -88,7 +205,51 @@ static const struct kfd_deviceid supported_devices[] = {
88 { 0x9874, &carrizo_device_info }, /* Carrizo */ 205 { 0x9874, &carrizo_device_info }, /* Carrizo */
89 { 0x9875, &carrizo_device_info }, /* Carrizo */ 206 { 0x9875, &carrizo_device_info }, /* Carrizo */
90 { 0x9876, &carrizo_device_info }, /* Carrizo */ 207 { 0x9876, &carrizo_device_info }, /* Carrizo */
91 { 0x9877, &carrizo_device_info } /* Carrizo */ 208 { 0x9877, &carrizo_device_info }, /* Carrizo */
209#endif
210 { 0x67A0, &hawaii_device_info }, /* Hawaii */
211 { 0x67A1, &hawaii_device_info }, /* Hawaii */
212 { 0x67A2, &hawaii_device_info }, /* Hawaii */
213 { 0x67A8, &hawaii_device_info }, /* Hawaii */
214 { 0x67A9, &hawaii_device_info }, /* Hawaii */
215 { 0x67AA, &hawaii_device_info }, /* Hawaii */
216 { 0x67B0, &hawaii_device_info }, /* Hawaii */
217 { 0x67B1, &hawaii_device_info }, /* Hawaii */
218 { 0x67B8, &hawaii_device_info }, /* Hawaii */
219 { 0x67B9, &hawaii_device_info }, /* Hawaii */
220 { 0x67BA, &hawaii_device_info }, /* Hawaii */
221 { 0x67BE, &hawaii_device_info }, /* Hawaii */
222 { 0x6920, &tonga_device_info }, /* Tonga */
223 { 0x6921, &tonga_device_info }, /* Tonga */
224 { 0x6928, &tonga_device_info }, /* Tonga */
225 { 0x6929, &tonga_device_info }, /* Tonga */
226 { 0x692B, &tonga_device_info }, /* Tonga */
227 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
228 { 0x6938, &tonga_device_info }, /* Tonga */
229 { 0x6939, &tonga_device_info }, /* Tonga */
230 { 0x7300, &fiji_device_info }, /* Fiji */
231 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
232 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
233 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
234 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
235 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
236 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
237 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
238 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
239 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
240 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
241 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
242 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
243 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
244 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
245 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
246 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
247 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
248 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
249 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
250 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
251 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
252 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
92}; 253};
93 254
94static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 255static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -127,6 +288,21 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
127 return NULL; 288 return NULL;
128 } 289 }
129 290
291 if (device_info->needs_pci_atomics) {
292 /* Allow BIF to recode atomics to PCIe 3.0
293 * AtomicOps. 32 and 64-bit requests are possible and
294 * must be supported.
295 */
296 if (pci_enable_atomic_ops_to_root(pdev,
297 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
298 PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) {
299 dev_info(kfd_device,
300 "skipped device %x:%x, PCI rejects atomics",
301 pdev->vendor, pdev->device);
302 return NULL;
303 }
304 }
305
130 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 306 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
131 if (!kfd) 307 if (!kfd)
132 return NULL; 308 return NULL;
@@ -144,77 +320,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
144 return kfd; 320 return kfd;
145} 321}
146 322
147static bool device_iommu_pasid_init(struct kfd_dev *kfd)
148{
149 const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
150 AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
151 AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
152
153 struct amd_iommu_device_info iommu_info;
154 unsigned int pasid_limit;
155 int err;
156
157 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
158 if (err < 0) {
159 dev_err(kfd_device,
160 "error getting iommu info. is the iommu enabled?\n");
161 return false;
162 }
163
164 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
165 dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
166 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
167 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
168 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
169 != 0);
170 return false;
171 }
172
173 pasid_limit = min_t(unsigned int,
174 (unsigned int)(1 << kfd->device_info->max_pasid_bits),
175 iommu_info.max_pasids);
176
177 if (!kfd_set_pasid_limit(pasid_limit)) {
178 dev_err(kfd_device, "error setting pasid limit\n");
179 return false;
180 }
181
182 return true;
183}
184
185static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
186{
187 struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
188
189 if (dev)
190 kfd_process_iommu_unbind_callback(dev, pasid);
191}
192
193/*
194 * This function called by IOMMU driver on PPR failure
195 */
196static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
197 unsigned long address, u16 flags)
198{
199 struct kfd_dev *dev;
200
201 dev_warn(kfd_device,
202 "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
203 PCI_BUS_NUM(pdev->devfn),
204 PCI_SLOT(pdev->devfn),
205 PCI_FUNC(pdev->devfn),
206 pasid,
207 address,
208 flags);
209
210 dev = kfd_device_by_pci_dev(pdev);
211 if (!WARN_ON(!dev))
212 kfd_signal_iommu_event(dev, pasid, address,
213 flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
214
215 return AMD_IOMMU_INV_PRI_RSP_INVALID;
216}
217
218static void kfd_cwsr_init(struct kfd_dev *kfd) 323static void kfd_cwsr_init(struct kfd_dev *kfd)
219{ 324{
220 if (cwsr_enable && kfd->device_info->supports_cwsr) { 325 if (cwsr_enable && kfd->device_info->supports_cwsr) {
@@ -304,11 +409,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
304 goto device_queue_manager_error; 409 goto device_queue_manager_error;
305 } 410 }
306 411
307 if (!device_iommu_pasid_init(kfd)) { 412 if (kfd_iommu_device_init(kfd)) {
308 dev_err(kfd_device, 413 dev_err(kfd_device, "Error initializing iommuv2\n");
309 "Error initializing iommuv2 for device %x:%x\n", 414 goto device_iommu_error;
310 kfd->pdev->vendor, kfd->pdev->device);
311 goto device_iommu_pasid_error;
312 } 415 }
313 416
314 kfd_cwsr_init(kfd); 417 kfd_cwsr_init(kfd);
@@ -323,12 +426,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
323 kfd->pdev->device); 426 kfd->pdev->device);
324 427
325 pr_debug("Starting kfd with the following scheduling policy %d\n", 428 pr_debug("Starting kfd with the following scheduling policy %d\n",
326 sched_policy); 429 kfd->dqm->sched_policy);
327 430
328 goto out; 431 goto out;
329 432
330kfd_resume_error: 433kfd_resume_error:
331device_iommu_pasid_error: 434device_iommu_error:
332 device_queue_manager_uninit(kfd->dqm); 435 device_queue_manager_uninit(kfd->dqm);
333device_queue_manager_error: 436device_queue_manager_error:
334 kfd_interrupt_exit(kfd); 437 kfd_interrupt_exit(kfd);
@@ -367,40 +470,45 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
367 if (!kfd->init_complete) 470 if (!kfd->init_complete)
368 return; 471 return;
369 472
370 kfd->dqm->ops.stop(kfd->dqm); 473 /* For first KFD device suspend all the KFD processes */
474 if (atomic_inc_return(&kfd_device_suspended) == 1)
475 kfd_suspend_all_processes();
371 476
372 kfd_unbind_processes_from_device(kfd); 477 kfd->dqm->ops.stop(kfd->dqm);
373 478
374 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); 479 kfd_iommu_suspend(kfd);
375 amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
376 amd_iommu_free_device(kfd->pdev);
377} 480}
378 481
379int kgd2kfd_resume(struct kfd_dev *kfd) 482int kgd2kfd_resume(struct kfd_dev *kfd)
380{ 483{
484 int ret, count;
485
381 if (!kfd->init_complete) 486 if (!kfd->init_complete)
382 return 0; 487 return 0;
383 488
384 return kfd_resume(kfd); 489 ret = kfd_resume(kfd);
490 if (ret)
491 return ret;
492
493 count = atomic_dec_return(&kfd_device_suspended);
494 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
495 if (count == 0)
496 ret = kfd_resume_all_processes();
385 497
498 return ret;
386} 499}
387 500
388static int kfd_resume(struct kfd_dev *kfd) 501static int kfd_resume(struct kfd_dev *kfd)
389{ 502{
390 int err = 0; 503 int err = 0;
391 unsigned int pasid_limit = kfd_get_pasid_limit();
392 504
393 err = amd_iommu_init_device(kfd->pdev, pasid_limit); 505 err = kfd_iommu_resume(kfd);
394 if (err) 506 if (err) {
395 return -ENXIO; 507 dev_err(kfd_device,
396 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, 508 "Failed to resume IOMMU for device %x:%x\n",
397 iommu_pasid_shutdown_callback); 509 kfd->pdev->vendor, kfd->pdev->device);
398 amd_iommu_set_invalid_ppr_cb(kfd->pdev, 510 return err;
399 iommu_invalid_ppr_cb); 511 }
400
401 err = kfd_bind_processes_to_device(kfd);
402 if (err)
403 goto processes_bind_error;
404 512
405 err = kfd->dqm->ops.start(kfd->dqm); 513 err = kfd->dqm->ops.start(kfd->dqm);
406 if (err) { 514 if (err) {
@@ -413,9 +521,7 @@ static int kfd_resume(struct kfd_dev *kfd)
413 return err; 521 return err;
414 522
415dqm_start_error: 523dqm_start_error:
416processes_bind_error: 524 kfd_iommu_suspend(kfd);
417 amd_iommu_free_device(kfd->pdev);
418
419 return err; 525 return err;
420} 526}
421 527
@@ -435,6 +541,54 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
435 spin_unlock(&kfd->interrupt_lock); 541 spin_unlock(&kfd->interrupt_lock);
436} 542}
437 543
544/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
545 * prepare for safe eviction of KFD BOs that belong to the specified
546 * process.
547 *
548 * @mm: mm_struct that identifies the specified KFD process
549 * @fence: eviction fence attached to KFD process BOs
550 *
551 */
552int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
553 struct dma_fence *fence)
554{
555 struct kfd_process *p;
556 unsigned long active_time;
557 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
558
559 if (!fence)
560 return -EINVAL;
561
562 if (dma_fence_is_signaled(fence))
563 return 0;
564
565 p = kfd_lookup_process_by_mm(mm);
566 if (!p)
567 return -ENODEV;
568
569 if (fence->seqno == p->last_eviction_seqno)
570 goto out;
571
572 p->last_eviction_seqno = fence->seqno;
573
574 /* Avoid KFD process starvation. Wait for at least
575 * PROCESS_ACTIVE_TIME_MS before evicting the process again
576 */
577 active_time = get_jiffies_64() - p->last_restore_timestamp;
578 if (delay_jiffies > active_time)
579 delay_jiffies -= active_time;
580 else
581 delay_jiffies = 0;
582
583 /* During process initialization eviction_work.dwork is initialized
584 * to kfd_evict_bo_worker
585 */
586 schedule_delayed_work(&p->eviction_work, delay_jiffies);
587out:
588 kfd_unref_process(p);
589 return 0;
590}
591
438static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 592static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
439 unsigned int chunk_size) 593 unsigned int chunk_size)
440{ 594{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b21285afa4ea..b3b6dab71638 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -21,10 +21,11 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/ratelimit.h>
25#include <linux/printk.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include <linux/list.h> 27#include <linux/list.h>
26#include <linux/types.h> 28#include <linux/types.h>
27#include <linux/printk.h>
28#include <linux/bitops.h> 29#include <linux/bitops.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include "kfd_priv.h" 31#include "kfd_priv.h"
@@ -118,9 +119,8 @@ static int allocate_vmid(struct device_queue_manager *dqm,
118 if (dqm->vmid_bitmap == 0) 119 if (dqm->vmid_bitmap == 0)
119 return -ENOMEM; 120 return -ENOMEM;
120 121
121 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, 122 bit = ffs(dqm->vmid_bitmap) - 1;
122 dqm->dev->vm_info.vmid_num_kfd); 123 dqm->vmid_bitmap &= ~(1 << bit);
123 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
124 124
125 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; 125 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
126 pr_debug("vmid allocation %d\n", allocated_vmid); 126 pr_debug("vmid allocation %d\n", allocated_vmid);
@@ -130,6 +130,15 @@ static int allocate_vmid(struct device_queue_manager *dqm,
130 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); 130 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
131 program_sh_mem_settings(dqm, qpd); 131 program_sh_mem_settings(dqm, qpd);
132 132
133 /* qpd->page_table_base is set earlier when register_process()
134 * is called, i.e. when the first queue is created.
135 */
136 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
137 qpd->vmid,
138 qpd->page_table_base);
139 /* invalidate the VM context after pasid and vmid mapping is set up */
140 kfd_flush_tlb(qpd_to_pdd(qpd));
141
133 return 0; 142 return 0;
134} 143}
135 144
@@ -139,10 +148,12 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
139{ 148{
140 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; 149 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
141 150
151 kfd_flush_tlb(qpd_to_pdd(qpd));
152
142 /* Release the vmid mapping */ 153 /* Release the vmid mapping */
143 set_pasid_vmid_mapping(dqm, 0, qpd->vmid); 154 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
144 155
145 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); 156 dqm->vmid_bitmap |= (1 << bit);
146 qpd->vmid = 0; 157 qpd->vmid = 0;
147 q->properties.vmid = 0; 158 q->properties.vmid = 0;
148} 159}
@@ -170,6 +181,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
170 goto out_unlock; 181 goto out_unlock;
171 } 182 }
172 q->properties.vmid = qpd->vmid; 183 q->properties.vmid = qpd->vmid;
184 /*
185 * Eviction state logic: we only mark active queues as evicted
186 * to avoid the overhead of restoring inactive queues later
187 */
188 if (qpd->evicted)
189 q->properties.is_evicted = (q->properties.queue_size > 0 &&
190 q->properties.queue_percent > 0 &&
191 q->properties.queue_address != 0);
173 192
174 q->properties.tba_addr = qpd->tba_addr; 193 q->properties.tba_addr = qpd->tba_addr;
175 q->properties.tma_addr = qpd->tma_addr; 194 q->properties.tma_addr = qpd->tma_addr;
@@ -223,12 +242,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
223 continue; 242 continue;
224 243
225 if (dqm->allocated_queues[pipe] != 0) { 244 if (dqm->allocated_queues[pipe] != 0) {
226 bit = find_first_bit( 245 bit = ffs(dqm->allocated_queues[pipe]) - 1;
227 (unsigned long *)&dqm->allocated_queues[pipe], 246 dqm->allocated_queues[pipe] &= ~(1 << bit);
228 get_queues_per_pipe(dqm));
229
230 clear_bit(bit,
231 (unsigned long *)&dqm->allocated_queues[pipe]);
232 q->pipe = pipe; 247 q->pipe = pipe;
233 q->queue = bit; 248 q->queue = bit;
234 set = true; 249 set = true;
@@ -249,7 +264,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
249static inline void deallocate_hqd(struct device_queue_manager *dqm, 264static inline void deallocate_hqd(struct device_queue_manager *dqm,
250 struct queue *q) 265 struct queue *q)
251{ 266{
252 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]); 267 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
253} 268}
254 269
255static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, 270static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
@@ -371,21 +386,35 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
371{ 386{
372 int retval; 387 int retval;
373 struct mqd_manager *mqd; 388 struct mqd_manager *mqd;
389 struct kfd_process_device *pdd;
374 bool prev_active = false; 390 bool prev_active = false;
375 391
376 mutex_lock(&dqm->lock); 392 mutex_lock(&dqm->lock);
393 pdd = kfd_get_process_device_data(q->device, q->process);
394 if (!pdd) {
395 retval = -ENODEV;
396 goto out_unlock;
397 }
377 mqd = dqm->ops.get_mqd_manager(dqm, 398 mqd = dqm->ops.get_mqd_manager(dqm,
378 get_mqd_type_from_queue_type(q->properties.type)); 399 get_mqd_type_from_queue_type(q->properties.type));
379 if (!mqd) { 400 if (!mqd) {
380 retval = -ENOMEM; 401 retval = -ENOMEM;
381 goto out_unlock; 402 goto out_unlock;
382 } 403 }
404 /*
405 * Eviction state logic: we only mark active queues as evicted
406 * to avoid the overhead of restoring inactive queues later
407 */
408 if (pdd->qpd.evicted)
409 q->properties.is_evicted = (q->properties.queue_size > 0 &&
410 q->properties.queue_percent > 0 &&
411 q->properties.queue_address != 0);
383 412
384 /* Save previous activity state for counters */ 413 /* Save previous activity state for counters */
385 prev_active = q->properties.is_active; 414 prev_active = q->properties.is_active;
386 415
387 /* Make sure the queue is unmapped before updating the MQD */ 416 /* Make sure the queue is unmapped before updating the MQD */
388 if (sched_policy != KFD_SCHED_POLICY_NO_HWS) { 417 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
389 retval = unmap_queues_cpsch(dqm, 418 retval = unmap_queues_cpsch(dqm,
390 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); 419 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
391 if (retval) { 420 if (retval) {
@@ -417,7 +446,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
417 else if (!q->properties.is_active && prev_active) 446 else if (!q->properties.is_active && prev_active)
418 dqm->queue_count--; 447 dqm->queue_count--;
419 448
420 if (sched_policy != KFD_SCHED_POLICY_NO_HWS) 449 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
421 retval = map_queues_cpsch(dqm); 450 retval = map_queues_cpsch(dqm);
422 else if (q->properties.is_active && 451 else if (q->properties.is_active &&
423 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 452 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
@@ -451,10 +480,193 @@ static struct mqd_manager *get_mqd_manager(
451 return mqd; 480 return mqd;
452} 481}
453 482
483static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
484 struct qcm_process_device *qpd)
485{
486 struct queue *q;
487 struct mqd_manager *mqd;
488 struct kfd_process_device *pdd;
489 int retval = 0;
490
491 mutex_lock(&dqm->lock);
492 if (qpd->evicted++ > 0) /* already evicted, do nothing */
493 goto out;
494
495 pdd = qpd_to_pdd(qpd);
496 pr_info_ratelimited("Evicting PASID %u queues\n",
497 pdd->process->pasid);
498
499 /* unactivate all active queues on the qpd */
500 list_for_each_entry(q, &qpd->queues_list, list) {
501 if (!q->properties.is_active)
502 continue;
503 mqd = dqm->ops.get_mqd_manager(dqm,
504 get_mqd_type_from_queue_type(q->properties.type));
505 if (!mqd) { /* should not be here */
506 pr_err("Cannot evict queue, mqd mgr is NULL\n");
507 retval = -ENOMEM;
508 goto out;
509 }
510 q->properties.is_evicted = true;
511 q->properties.is_active = false;
512 retval = mqd->destroy_mqd(mqd, q->mqd,
513 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
514 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
515 if (retval)
516 goto out;
517 dqm->queue_count--;
518 }
519
520out:
521 mutex_unlock(&dqm->lock);
522 return retval;
523}
524
525static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
526 struct qcm_process_device *qpd)
527{
528 struct queue *q;
529 struct kfd_process_device *pdd;
530 int retval = 0;
531
532 mutex_lock(&dqm->lock);
533 if (qpd->evicted++ > 0) /* already evicted, do nothing */
534 goto out;
535
536 pdd = qpd_to_pdd(qpd);
537 pr_info_ratelimited("Evicting PASID %u queues\n",
538 pdd->process->pasid);
539
540 /* unactivate all active queues on the qpd */
541 list_for_each_entry(q, &qpd->queues_list, list) {
542 if (!q->properties.is_active)
543 continue;
544 q->properties.is_evicted = true;
545 q->properties.is_active = false;
546 dqm->queue_count--;
547 }
548 retval = execute_queues_cpsch(dqm,
549 qpd->is_debug ?
550 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
551 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
552
553out:
554 mutex_unlock(&dqm->lock);
555 return retval;
556}
557
558static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
559 struct qcm_process_device *qpd)
560{
561 struct queue *q;
562 struct mqd_manager *mqd;
563 struct kfd_process_device *pdd;
564 uint32_t pd_base;
565 int retval = 0;
566
567 pdd = qpd_to_pdd(qpd);
568 /* Retrieve PD base */
569 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
570
571 mutex_lock(&dqm->lock);
572 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
573 goto out;
574 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
575 qpd->evicted--;
576 goto out;
577 }
578
579 pr_info_ratelimited("Restoring PASID %u queues\n",
580 pdd->process->pasid);
581
582 /* Update PD Base in QPD */
583 qpd->page_table_base = pd_base;
584 pr_debug("Updated PD address to 0x%08x\n", pd_base);
585
586 if (!list_empty(&qpd->queues_list)) {
587 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
588 dqm->dev->kgd,
589 qpd->vmid,
590 qpd->page_table_base);
591 kfd_flush_tlb(pdd);
592 }
593
594 /* activate all active queues on the qpd */
595 list_for_each_entry(q, &qpd->queues_list, list) {
596 if (!q->properties.is_evicted)
597 continue;
598 mqd = dqm->ops.get_mqd_manager(dqm,
599 get_mqd_type_from_queue_type(q->properties.type));
600 if (!mqd) { /* should not be here */
601 pr_err("Cannot restore queue, mqd mgr is NULL\n");
602 retval = -ENOMEM;
603 goto out;
604 }
605 q->properties.is_evicted = false;
606 q->properties.is_active = true;
607 retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
608 q->queue, &q->properties,
609 q->process->mm);
610 if (retval)
611 goto out;
612 dqm->queue_count++;
613 }
614 qpd->evicted = 0;
615out:
616 mutex_unlock(&dqm->lock);
617 return retval;
618}
619
620static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
621 struct qcm_process_device *qpd)
622{
623 struct queue *q;
624 struct kfd_process_device *pdd;
625 uint32_t pd_base;
626 int retval = 0;
627
628 pdd = qpd_to_pdd(qpd);
629 /* Retrieve PD base */
630 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
631
632 mutex_lock(&dqm->lock);
633 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
634 goto out;
635 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
636 qpd->evicted--;
637 goto out;
638 }
639
640 pr_info_ratelimited("Restoring PASID %u queues\n",
641 pdd->process->pasid);
642
643 /* Update PD Base in QPD */
644 qpd->page_table_base = pd_base;
645 pr_debug("Updated PD address to 0x%08x\n", pd_base);
646
647 /* activate all active queues on the qpd */
648 list_for_each_entry(q, &qpd->queues_list, list) {
649 if (!q->properties.is_evicted)
650 continue;
651 q->properties.is_evicted = false;
652 q->properties.is_active = true;
653 dqm->queue_count++;
654 }
655 retval = execute_queues_cpsch(dqm,
656 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
657 if (!retval)
658 qpd->evicted = 0;
659out:
660 mutex_unlock(&dqm->lock);
661 return retval;
662}
663
454static int register_process(struct device_queue_manager *dqm, 664static int register_process(struct device_queue_manager *dqm,
455 struct qcm_process_device *qpd) 665 struct qcm_process_device *qpd)
456{ 666{
457 struct device_process_node *n; 667 struct device_process_node *n;
668 struct kfd_process_device *pdd;
669 uint32_t pd_base;
458 int retval; 670 int retval;
459 671
460 n = kzalloc(sizeof(*n), GFP_KERNEL); 672 n = kzalloc(sizeof(*n), GFP_KERNEL);
@@ -463,9 +675,16 @@ static int register_process(struct device_queue_manager *dqm,
463 675
464 n->qpd = qpd; 676 n->qpd = qpd;
465 677
678 pdd = qpd_to_pdd(qpd);
679 /* Retrieve PD base */
680 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
681
466 mutex_lock(&dqm->lock); 682 mutex_lock(&dqm->lock);
467 list_add(&n->list, &dqm->queues); 683 list_add(&n->list, &dqm->queues);
468 684
685 /* Update PD Base in QPD */
686 qpd->page_table_base = pd_base;
687
469 retval = dqm->asic_ops.update_qpd(dqm, qpd); 688 retval = dqm->asic_ops.update_qpd(dqm, qpd);
470 689
471 dqm->processes_count++; 690 dqm->processes_count++;
@@ -589,10 +808,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
589 if (dqm->sdma_bitmap == 0) 808 if (dqm->sdma_bitmap == 0)
590 return -ENOMEM; 809 return -ENOMEM;
591 810
592 bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap, 811 bit = ffs(dqm->sdma_bitmap) - 1;
593 CIK_SDMA_QUEUES); 812 dqm->sdma_bitmap &= ~(1 << bit);
594
595 clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
596 *sdma_queue_id = bit; 813 *sdma_queue_id = bit;
597 814
598 return 0; 815 return 0;
@@ -603,7 +820,7 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
603{ 820{
604 if (sdma_queue_id >= CIK_SDMA_QUEUES) 821 if (sdma_queue_id >= CIK_SDMA_QUEUES)
605 return; 822 return;
606 set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap); 823 dqm->sdma_bitmap |= (1 << sdma_queue_id);
607} 824}
608 825
609static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, 826static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
@@ -840,6 +1057,14 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
840 retval = -ENOMEM; 1057 retval = -ENOMEM;
841 goto out; 1058 goto out;
842 } 1059 }
1060 /*
1061 * Eviction state logic: we only mark active queues as evicted
1062 * to avoid the overhead of restoring inactive queues later
1063 */
1064 if (qpd->evicted)
1065 q->properties.is_evicted = (q->properties.queue_size > 0 &&
1066 q->properties.queue_percent > 0 &&
1067 q->properties.queue_address != 0);
843 1068
844 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); 1069 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
845 1070
@@ -1097,7 +1322,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1097 alternate_aperture_base, 1322 alternate_aperture_base,
1098 alternate_aperture_size); 1323 alternate_aperture_size);
1099 1324
1100 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) 1325 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1101 program_sh_mem_settings(dqm, qpd); 1326 program_sh_mem_settings(dqm, qpd);
1102 1327
1103 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", 1328 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
@@ -1242,8 +1467,24 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1242 if (!dqm) 1467 if (!dqm)
1243 return NULL; 1468 return NULL;
1244 1469
1470 switch (dev->device_info->asic_family) {
1471 /* HWS is not available on Hawaii. */
1472 case CHIP_HAWAII:
1473 /* HWS depends on CWSR for timely dequeue. CWSR is not
1474 * available on Tonga.
1475 *
1476 * FIXME: This argument also applies to Kaveri.
1477 */
1478 case CHIP_TONGA:
1479 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1480 break;
1481 default:
1482 dqm->sched_policy = sched_policy;
1483 break;
1484 }
1485
1245 dqm->dev = dev; 1486 dqm->dev = dev;
1246 switch (sched_policy) { 1487 switch (dqm->sched_policy) {
1247 case KFD_SCHED_POLICY_HWS: 1488 case KFD_SCHED_POLICY_HWS:
1248 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: 1489 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1249 /* initialize dqm for cp scheduling */ 1490 /* initialize dqm for cp scheduling */
@@ -1262,6 +1503,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1262 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; 1503 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1263 dqm->ops.set_trap_handler = set_trap_handler; 1504 dqm->ops.set_trap_handler = set_trap_handler;
1264 dqm->ops.process_termination = process_termination_cpsch; 1505 dqm->ops.process_termination = process_termination_cpsch;
1506 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1507 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1265 break; 1508 break;
1266 case KFD_SCHED_POLICY_NO_HWS: 1509 case KFD_SCHED_POLICY_NO_HWS:
1267 /* initialize dqm for no cp scheduling */ 1510 /* initialize dqm for no cp scheduling */
@@ -1278,9 +1521,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1278 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; 1521 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1279 dqm->ops.set_trap_handler = set_trap_handler; 1522 dqm->ops.set_trap_handler = set_trap_handler;
1280 dqm->ops.process_termination = process_termination_nocpsch; 1523 dqm->ops.process_termination = process_termination_nocpsch;
1524 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1525 dqm->ops.restore_process_queues =
1526 restore_process_queues_nocpsch;
1281 break; 1527 break;
1282 default: 1528 default:
1283 pr_err("Invalid scheduling policy %d\n", sched_policy); 1529 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1284 goto out_free; 1530 goto out_free;
1285 } 1531 }
1286 1532
@@ -1292,6 +1538,17 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1292 case CHIP_KAVERI: 1538 case CHIP_KAVERI:
1293 device_queue_manager_init_cik(&dqm->asic_ops); 1539 device_queue_manager_init_cik(&dqm->asic_ops);
1294 break; 1540 break;
1541
1542 case CHIP_HAWAII:
1543 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1544 break;
1545
1546 case CHIP_TONGA:
1547 case CHIP_FIJI:
1548 case CHIP_POLARIS10:
1549 case CHIP_POLARIS11:
1550 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1551 break;
1295 default: 1552 default:
1296 WARN(1, "Unexpected ASIC family %u", 1553 WARN(1, "Unexpected ASIC family %u",
1297 dev->device_info->asic_family); 1554 dev->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c61b693bfa8c..412beff3281d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -79,6 +79,10 @@ struct device_process_node {
79 * 79 *
80 * @process_termination: Clears all process queues belongs to that device. 80 * @process_termination: Clears all process queues belongs to that device.
81 * 81 *
82 * @evict_process_queues: Evict all active queues of a process
83 *
84 * @restore_process_queues: Restore all evicted queues queues of a process
85 *
82 */ 86 */
83 87
84struct device_queue_manager_ops { 88struct device_queue_manager_ops {
@@ -129,6 +133,11 @@ struct device_queue_manager_ops {
129 133
130 int (*process_termination)(struct device_queue_manager *dqm, 134 int (*process_termination)(struct device_queue_manager *dqm,
131 struct qcm_process_device *qpd); 135 struct qcm_process_device *qpd);
136
137 int (*evict_process_queues)(struct device_queue_manager *dqm,
138 struct qcm_process_device *qpd);
139 int (*restore_process_queues)(struct device_queue_manager *dqm,
140 struct qcm_process_device *qpd);
132}; 141};
133 142
134struct device_queue_manager_asic_ops { 143struct device_queue_manager_asic_ops {
@@ -180,12 +189,17 @@ struct device_queue_manager {
180 unsigned int *fence_addr; 189 unsigned int *fence_addr;
181 struct kfd_mem_obj *fence_mem; 190 struct kfd_mem_obj *fence_mem;
182 bool active_runlist; 191 bool active_runlist;
192 int sched_policy;
183}; 193};
184 194
185void device_queue_manager_init_cik( 195void device_queue_manager_init_cik(
186 struct device_queue_manager_asic_ops *asic_ops); 196 struct device_queue_manager_asic_ops *asic_ops);
197void device_queue_manager_init_cik_hawaii(
198 struct device_queue_manager_asic_ops *asic_ops);
187void device_queue_manager_init_vi( 199void device_queue_manager_init_vi(
188 struct device_queue_manager_asic_ops *asic_ops); 200 struct device_queue_manager_asic_ops *asic_ops);
201void device_queue_manager_init_vi_tonga(
202 struct device_queue_manager_asic_ops *asic_ops);
189void program_sh_mem_settings(struct device_queue_manager *dqm, 203void program_sh_mem_settings(struct device_queue_manager *dqm,
190 struct qcm_process_device *qpd); 204 struct qcm_process_device *qpd);
191unsigned int get_queues_num(struct device_queue_manager *dqm); 205unsigned int get_queues_num(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 28e48c90c596..aed4c21417bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -34,8 +34,13 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
34 uint64_t alternate_aperture_size); 34 uint64_t alternate_aperture_size);
35static int update_qpd_cik(struct device_queue_manager *dqm, 35static int update_qpd_cik(struct device_queue_manager *dqm,
36 struct qcm_process_device *qpd); 36 struct qcm_process_device *qpd);
37static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
38 struct qcm_process_device *qpd);
37static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 39static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
38 struct qcm_process_device *qpd); 40 struct qcm_process_device *qpd);
41static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
42 struct queue *q,
43 struct qcm_process_device *qpd);
39 44
40void device_queue_manager_init_cik( 45void device_queue_manager_init_cik(
41 struct device_queue_manager_asic_ops *asic_ops) 46 struct device_queue_manager_asic_ops *asic_ops)
@@ -45,6 +50,14 @@ void device_queue_manager_init_cik(
45 asic_ops->init_sdma_vm = init_sdma_vm; 50 asic_ops->init_sdma_vm = init_sdma_vm;
46} 51}
47 52
53void device_queue_manager_init_cik_hawaii(
54 struct device_queue_manager_asic_ops *asic_ops)
55{
56 asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
57 asic_ops->update_qpd = update_qpd_cik_hawaii;
58 asic_ops->init_sdma_vm = init_sdma_vm_hawaii;
59}
60
48static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) 61static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
49{ 62{
50 /* In 64-bit mode, we can only control the top 3 bits of the LDS, 63 /* In 64-bit mode, we can only control the top 3 bits of the LDS,
@@ -132,6 +145,36 @@ static int update_qpd_cik(struct device_queue_manager *dqm,
132 return 0; 145 return 0;
133} 146}
134 147
148static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
149 struct qcm_process_device *qpd)
150{
151 struct kfd_process_device *pdd;
152 unsigned int temp;
153
154 pdd = qpd_to_pdd(qpd);
155
156 /* check if sh_mem_config register already configured */
157 if (qpd->sh_mem_config == 0) {
158 qpd->sh_mem_config =
159 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
160 DEFAULT_MTYPE(MTYPE_NONCACHED) |
161 APE1_MTYPE(MTYPE_NONCACHED);
162 qpd->sh_mem_ape1_limit = 0;
163 qpd->sh_mem_ape1_base = 0;
164 }
165
166 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
167 * aperture addresses.
168 */
169 temp = get_sh_mem_bases_nybble_64(pdd);
170 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
171
172 pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
173 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
174
175 return 0;
176}
177
135static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 178static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
136 struct qcm_process_device *qpd) 179 struct qcm_process_device *qpd)
137{ 180{
@@ -147,3 +190,16 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
147 190
148 q->properties.sdma_vm_addr = value; 191 q->properties.sdma_vm_addr = value;
149} 192}
193
194static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
195 struct queue *q,
196 struct qcm_process_device *qpd)
197{
198 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
199 * aperture addresses.
200 */
201 q->properties.sdma_vm_addr =
202 ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
203 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
204 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
205}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 2fbce57a2f21..fd60a116be37 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -33,10 +33,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
33 enum cache_policy alternate_policy, 33 enum cache_policy alternate_policy,
34 void __user *alternate_aperture_base, 34 void __user *alternate_aperture_base,
35 uint64_t alternate_aperture_size); 35 uint64_t alternate_aperture_size);
36static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
37 struct qcm_process_device *qpd,
38 enum cache_policy default_policy,
39 enum cache_policy alternate_policy,
40 void __user *alternate_aperture_base,
41 uint64_t alternate_aperture_size);
36static int update_qpd_vi(struct device_queue_manager *dqm, 42static int update_qpd_vi(struct device_queue_manager *dqm,
37 struct qcm_process_device *qpd); 43 struct qcm_process_device *qpd);
44static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
45 struct qcm_process_device *qpd);
38static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 46static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
39 struct qcm_process_device *qpd); 47 struct qcm_process_device *qpd);
48static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
49 struct queue *q,
50 struct qcm_process_device *qpd);
40 51
41void device_queue_manager_init_vi( 52void device_queue_manager_init_vi(
42 struct device_queue_manager_asic_ops *asic_ops) 53 struct device_queue_manager_asic_ops *asic_ops)
@@ -46,6 +57,14 @@ void device_queue_manager_init_vi(
46 asic_ops->init_sdma_vm = init_sdma_vm; 57 asic_ops->init_sdma_vm = init_sdma_vm;
47} 58}
48 59
60void device_queue_manager_init_vi_tonga(
61 struct device_queue_manager_asic_ops *asic_ops)
62{
63 asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
64 asic_ops->update_qpd = update_qpd_vi_tonga;
65 asic_ops->init_sdma_vm = init_sdma_vm_tonga;
66}
67
49static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) 68static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
50{ 69{
51 /* In 64-bit mode, we can only control the top 3 bits of the LDS, 70 /* In 64-bit mode, we can only control the top 3 bits of the LDS,
@@ -103,6 +122,33 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
103 return true; 122 return true;
104} 123}
105 124
125static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
126 struct qcm_process_device *qpd,
127 enum cache_policy default_policy,
128 enum cache_policy alternate_policy,
129 void __user *alternate_aperture_base,
130 uint64_t alternate_aperture_size)
131{
132 uint32_t default_mtype;
133 uint32_t ape1_mtype;
134
135 default_mtype = (default_policy == cache_policy_coherent) ?
136 MTYPE_UC :
137 MTYPE_NC;
138
139 ape1_mtype = (alternate_policy == cache_policy_coherent) ?
140 MTYPE_UC :
141 MTYPE_NC;
142
143 qpd->sh_mem_config =
144 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
145 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
146 default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
147 ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
148
149 return true;
150}
151
106static int update_qpd_vi(struct device_queue_manager *dqm, 152static int update_qpd_vi(struct device_queue_manager *dqm,
107 struct qcm_process_device *qpd) 153 struct qcm_process_device *qpd)
108{ 154{
@@ -144,6 +190,40 @@ static int update_qpd_vi(struct device_queue_manager *dqm,
144 return 0; 190 return 0;
145} 191}
146 192
193static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
194 struct qcm_process_device *qpd)
195{
196 struct kfd_process_device *pdd;
197 unsigned int temp;
198
199 pdd = qpd_to_pdd(qpd);
200
201 /* check if sh_mem_config register already configured */
202 if (qpd->sh_mem_config == 0) {
203 qpd->sh_mem_config =
204 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
205 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
206 MTYPE_UC <<
207 SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
208 MTYPE_UC <<
209 SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
210
211 qpd->sh_mem_ape1_limit = 0;
212 qpd->sh_mem_ape1_base = 0;
213 }
214
215 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
216 * aperture addresses.
217 */
218 temp = get_sh_mem_bases_nybble_64(pdd);
219 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
220
221 pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
222 temp, qpd->sh_mem_bases);
223
224 return 0;
225}
226
147static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 227static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
148 struct qcm_process_device *qpd) 228 struct qcm_process_device *qpd)
149{ 229{
@@ -159,3 +239,16 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
159 239
160 q->properties.sdma_vm_addr = value; 240 q->properties.sdma_vm_addr = value;
161} 241}
242
243static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
244 struct queue *q,
245 struct qcm_process_device *qpd)
246{
247 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
248 * aperture addresses.
249 */
250 q->properties.sdma_vm_addr =
251 ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
252 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
253 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
254}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 93aae5c1e78b..6fb9c0d46d63 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -30,6 +30,7 @@
30#include <linux/memory.h> 30#include <linux/memory.h>
31#include "kfd_priv.h" 31#include "kfd_priv.h"
32#include "kfd_events.h" 32#include "kfd_events.h"
33#include "kfd_iommu.h"
33#include <linux/device.h> 34#include <linux/device.h>
34 35
35/* 36/*
@@ -837,6 +838,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
837 } 838 }
838} 839}
839 840
841#ifdef KFD_SUPPORT_IOMMU_V2
840void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, 842void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
841 unsigned long address, bool is_write_requested, 843 unsigned long address, bool is_write_requested,
842 bool is_execute_requested) 844 bool is_execute_requested)
@@ -905,6 +907,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
905 mutex_unlock(&p->event_mutex); 907 mutex_unlock(&p->event_mutex);
906 kfd_unref_process(p); 908 kfd_unref_process(p);
907} 909}
910#endif /* KFD_SUPPORT_IOMMU_V2 */
908 911
909void kfd_signal_hw_exception_event(unsigned int pasid) 912void kfd_signal_hw_exception_event(unsigned int pasid)
910{ 913{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
new file mode 100644
index 000000000000..c71817963eea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -0,0 +1,357 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/printk.h>
24#include <linux/device.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/amd-iommu.h>
28#include "kfd_priv.h"
29#include "kfd_dbgmgr.h"
30#include "kfd_topology.h"
31#include "kfd_iommu.h"
32
33static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
34 AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
35 AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
36
37/** kfd_iommu_check_device - Check whether IOMMU is available for device
38 */
39int kfd_iommu_check_device(struct kfd_dev *kfd)
40{
41 struct amd_iommu_device_info iommu_info;
42 int err;
43
44 if (!kfd->device_info->needs_iommu_device)
45 return -ENODEV;
46
47 iommu_info.flags = 0;
48 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
49 if (err)
50 return err;
51
52 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags)
53 return -ENODEV;
54
55 return 0;
56}
57
58/** kfd_iommu_device_init - Initialize IOMMU for device
59 */
60int kfd_iommu_device_init(struct kfd_dev *kfd)
61{
62 struct amd_iommu_device_info iommu_info;
63 unsigned int pasid_limit;
64 int err;
65
66 if (!kfd->device_info->needs_iommu_device)
67 return 0;
68
69 iommu_info.flags = 0;
70 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
71 if (err < 0) {
72 dev_err(kfd_device,
73 "error getting iommu info. is the iommu enabled?\n");
74 return -ENODEV;
75 }
76
77 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
78 dev_err(kfd_device,
79 "error required iommu flags ats %i, pri %i, pasid %i\n",
80 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
81 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
82 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
83 != 0);
84 return -ENODEV;
85 }
86
87 pasid_limit = min_t(unsigned int,
88 (unsigned int)(1 << kfd->device_info->max_pasid_bits),
89 iommu_info.max_pasids);
90
91 if (!kfd_set_pasid_limit(pasid_limit)) {
92 dev_err(kfd_device, "error setting pasid limit\n");
93 return -EBUSY;
94 }
95
96 return 0;
97}
98
99/** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process
100 *
101 * Binds the given process to the given device using its PASID. This
102 * enables IOMMUv2 address translation for the process on the device.
103 *
104 * This function assumes that the process mutex is held.
105 */
106int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
107{
108 struct kfd_dev *dev = pdd->dev;
109 struct kfd_process *p = pdd->process;
110 int err;
111
112 if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND)
113 return 0;
114
115 if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
116 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
117 return -EINVAL;
118 }
119
120 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
121 if (!err)
122 pdd->bound = PDD_BOUND;
123
124 return err;
125}
126
127/** kfd_iommu_unbind_process - Unbind process from all devices
128 *
129 * This removes all IOMMU device bindings of the process. To be used
130 * before process termination.
131 */
132void kfd_iommu_unbind_process(struct kfd_process *p)
133{
134 struct kfd_process_device *pdd;
135
136 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
137 if (pdd->bound == PDD_BOUND)
138 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
139}
140
141/* Callback for process shutdown invoked by the IOMMU driver */
142static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
143{
144 struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
145 struct kfd_process *p;
146 struct kfd_process_device *pdd;
147
148 if (!dev)
149 return;
150
151 /*
152 * Look for the process that matches the pasid. If there is no such
153 * process, we either released it in amdkfd's own notifier, or there
154 * is a bug. Unfortunately, there is no way to tell...
155 */
156 p = kfd_lookup_process_by_pasid(pasid);
157 if (!p)
158 return;
159
160 pr_debug("Unbinding process %d from IOMMU\n", pasid);
161
162 mutex_lock(kfd_get_dbgmgr_mutex());
163
164 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
165 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
166 kfd_dbgmgr_destroy(dev->dbgmgr);
167 dev->dbgmgr = NULL;
168 }
169 }
170
171 mutex_unlock(kfd_get_dbgmgr_mutex());
172
173 mutex_lock(&p->mutex);
174
175 pdd = kfd_get_process_device_data(dev, p);
176 if (pdd)
177 /* For GPU relying on IOMMU, we need to dequeue here
178 * when PASID is still bound.
179 */
180 kfd_process_dequeue_from_device(pdd);
181
182 mutex_unlock(&p->mutex);
183
184 kfd_unref_process(p);
185}
186
187/* This function called by IOMMU driver on PPR failure */
188static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
189 unsigned long address, u16 flags)
190{
191 struct kfd_dev *dev;
192
193 dev_warn(kfd_device,
194 "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
195 PCI_BUS_NUM(pdev->devfn),
196 PCI_SLOT(pdev->devfn),
197 PCI_FUNC(pdev->devfn),
198 pasid,
199 address,
200 flags);
201
202 dev = kfd_device_by_pci_dev(pdev);
203 if (!WARN_ON(!dev))
204 kfd_signal_iommu_event(dev, pasid, address,
205 flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
206
207 return AMD_IOMMU_INV_PRI_RSP_INVALID;
208}
209
210/*
211 * Bind processes do the device that have been temporarily unbound
212 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
213 */
214static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
215{
216 struct kfd_process_device *pdd;
217 struct kfd_process *p;
218 unsigned int temp;
219 int err = 0;
220
221 int idx = srcu_read_lock(&kfd_processes_srcu);
222
223 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
224 mutex_lock(&p->mutex);
225 pdd = kfd_get_process_device_data(kfd, p);
226
227 if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
228 mutex_unlock(&p->mutex);
229 continue;
230 }
231
232 err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
233 p->lead_thread);
234 if (err < 0) {
235 pr_err("Unexpected pasid %d binding failure\n",
236 p->pasid);
237 mutex_unlock(&p->mutex);
238 break;
239 }
240
241 pdd->bound = PDD_BOUND;
242 mutex_unlock(&p->mutex);
243 }
244
245 srcu_read_unlock(&kfd_processes_srcu, idx);
246
247 return err;
248}
249
250/*
251 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
252 * processes will be restored to PDD_BOUND state in
253 * kfd_bind_processes_to_device.
254 */
255static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
256{
257 struct kfd_process_device *pdd;
258 struct kfd_process *p;
259 unsigned int temp;
260
261 int idx = srcu_read_lock(&kfd_processes_srcu);
262
263 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
264 mutex_lock(&p->mutex);
265 pdd = kfd_get_process_device_data(kfd, p);
266
267 if (WARN_ON(!pdd)) {
268 mutex_unlock(&p->mutex);
269 continue;
270 }
271
272 if (pdd->bound == PDD_BOUND)
273 pdd->bound = PDD_BOUND_SUSPENDED;
274 mutex_unlock(&p->mutex);
275 }
276
277 srcu_read_unlock(&kfd_processes_srcu, idx);
278}
279
280/** kfd_iommu_suspend - Prepare IOMMU for suspend
281 *
282 * This unbinds processes from the device and disables the IOMMU for
283 * the device.
284 */
285void kfd_iommu_suspend(struct kfd_dev *kfd)
286{
287 if (!kfd->device_info->needs_iommu_device)
288 return;
289
290 kfd_unbind_processes_from_device(kfd);
291
292 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
293 amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
294 amd_iommu_free_device(kfd->pdev);
295}
296
297/** kfd_iommu_resume - Restore IOMMU after resume
298 *
299 * This reinitializes the IOMMU for the device and re-binds previously
300 * suspended processes to the device.
301 */
302int kfd_iommu_resume(struct kfd_dev *kfd)
303{
304 unsigned int pasid_limit;
305 int err;
306
307 if (!kfd->device_info->needs_iommu_device)
308 return 0;
309
310 pasid_limit = kfd_get_pasid_limit();
311
312 err = amd_iommu_init_device(kfd->pdev, pasid_limit);
313 if (err)
314 return -ENXIO;
315
316 amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
317 iommu_pasid_shutdown_callback);
318 amd_iommu_set_invalid_ppr_cb(kfd->pdev,
319 iommu_invalid_ppr_cb);
320
321 err = kfd_bind_processes_to_device(kfd);
322 if (err) {
323 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
324 amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
325 amd_iommu_free_device(kfd->pdev);
326 return err;
327 }
328
329 return 0;
330}
331
332extern bool amd_iommu_pc_supported(void);
333extern u8 amd_iommu_pc_get_max_banks(u16 devid);
334extern u8 amd_iommu_pc_get_max_counters(u16 devid);
335
336/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
337 */
338int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
339{
340 struct kfd_perf_properties *props;
341
342 if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT))
343 return 0;
344
345 if (!amd_iommu_pc_supported())
346 return 0;
347
348 props = kfd_alloc_struct(props);
349 if (!props)
350 return -ENOMEM;
351 strcpy(props->block_name, "iommu");
352 props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
353 amd_iommu_pc_get_max_counters(0); /* assume one iommu */
354 list_add_tail(&props->list, &kdev->perf_props);
355
356 return 0;
357}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
new file mode 100644
index 000000000000..dd23d9fdf6a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __KFD_IOMMU_H__
24#define __KFD_IOMMU_H__
25
26#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
27
28#define KFD_SUPPORT_IOMMU_V2
29
30int kfd_iommu_check_device(struct kfd_dev *kfd);
31int kfd_iommu_device_init(struct kfd_dev *kfd);
32
33int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd);
34void kfd_iommu_unbind_process(struct kfd_process *p);
35
36void kfd_iommu_suspend(struct kfd_dev *kfd);
37int kfd_iommu_resume(struct kfd_dev *kfd);
38
39int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev);
40
41#else
42
43static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
44{
45 return -ENODEV;
46}
47static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
48{
49 return 0;
50}
51
52static inline int kfd_iommu_bind_process_to_device(
53 struct kfd_process_device *pdd)
54{
55 return 0;
56}
57static inline void kfd_iommu_unbind_process(struct kfd_process *p)
58{
59 /* empty */
60}
61
62static inline void kfd_iommu_suspend(struct kfd_dev *kfd)
63{
64 /* empty */
65}
66static inline int kfd_iommu_resume(struct kfd_dev *kfd)
67{
68 return 0;
69}
70
71static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
72{
73 return 0;
74}
75
76#endif /* defined(CONFIG_AMD_IOMMU_V2) */
77
78#endif /* __KFD_IOMMU_H__ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 5dc6567d4a13..69f496485331 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -297,10 +297,15 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
297 297
298 switch (dev->device_info->asic_family) { 298 switch (dev->device_info->asic_family) {
299 case CHIP_CARRIZO: 299 case CHIP_CARRIZO:
300 case CHIP_TONGA:
301 case CHIP_FIJI:
302 case CHIP_POLARIS10:
303 case CHIP_POLARIS11:
300 kernel_queue_init_vi(&kq->ops_asic_specific); 304 kernel_queue_init_vi(&kq->ops_asic_specific);
301 break; 305 break;
302 306
303 case CHIP_KAVERI: 307 case CHIP_KAVERI:
308 case CHIP_HAWAII:
304 kernel_queue_init_cik(&kq->ops_asic_specific); 309 kernel_queue_init_cik(&kq->ops_asic_specific);
305 break; 310 break;
306 default: 311 default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 3ac72bed4f31..65574c6a10b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -43,6 +43,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
43 .interrupt = kgd2kfd_interrupt, 43 .interrupt = kgd2kfd_interrupt,
44 .suspend = kgd2kfd_suspend, 44 .suspend = kgd2kfd_suspend,
45 .resume = kgd2kfd_resume, 45 .resume = kgd2kfd_resume,
46 .schedule_evict_and_restore_process =
47 kgd2kfd_schedule_evict_and_restore_process,
46}; 48};
47 49
48int sched_policy = KFD_SCHED_POLICY_HWS; 50int sched_policy = KFD_SCHED_POLICY_HWS;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index dfd260ef81ff..ee7061e1c466 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -29,8 +29,15 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
29 switch (dev->device_info->asic_family) { 29 switch (dev->device_info->asic_family) {
30 case CHIP_KAVERI: 30 case CHIP_KAVERI:
31 return mqd_manager_init_cik(type, dev); 31 return mqd_manager_init_cik(type, dev);
32 case CHIP_HAWAII:
33 return mqd_manager_init_cik_hawaii(type, dev);
32 case CHIP_CARRIZO: 34 case CHIP_CARRIZO:
33 return mqd_manager_init_vi(type, dev); 35 return mqd_manager_init_vi(type, dev);
36 case CHIP_TONGA:
37 case CHIP_FIJI:
38 case CHIP_POLARIS10:
39 case CHIP_POLARIS11:
40 return mqd_manager_init_vi_tonga(type, dev);
34 default: 41 default:
35 WARN(1, "Unexpected ASIC family %u", 42 WARN(1, "Unexpected ASIC family %u",
36 dev->device_info->asic_family); 43 dev->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index f8ef4a051e08..c00c325ed3c9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -170,14 +170,19 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
170 mms); 170 mms);
171} 171}
172 172
173static int update_mqd(struct mqd_manager *mm, void *mqd, 173static int __update_mqd(struct mqd_manager *mm, void *mqd,
174 struct queue_properties *q) 174 struct queue_properties *q, unsigned int atc_bit)
175{ 175{
176 struct cik_mqd *m; 176 struct cik_mqd *m;
177 177
178 m = get_mqd(mqd); 178 m = get_mqd(mqd);
179 m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | 179 m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
180 DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN; 180 DEFAULT_MIN_AVAIL_SIZE;
181 m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
182 if (atc_bit) {
183 m->cp_hqd_pq_control |= PQ_ATC_EN;
184 m->cp_hqd_ib_control |= IB_ATC_EN;
185 }
181 186
182 /* 187 /*
183 * Calculating queue size which is log base 2 of actual queue size -1 188 * Calculating queue size which is log base 2 of actual queue size -1
@@ -197,11 +202,24 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
197 202
198 q->is_active = (q->queue_size > 0 && 203 q->is_active = (q->queue_size > 0 &&
199 q->queue_address != 0 && 204 q->queue_address != 0 &&
200 q->queue_percent > 0); 205 q->queue_percent > 0 &&
206 !q->is_evicted);
201 207
202 return 0; 208 return 0;
203} 209}
204 210
211static int update_mqd(struct mqd_manager *mm, void *mqd,
212 struct queue_properties *q)
213{
214 return __update_mqd(mm, mqd, q, 1);
215}
216
217static int update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
218 struct queue_properties *q)
219{
220 return __update_mqd(mm, mqd, q, 0);
221}
222
205static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, 223static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
206 struct queue_properties *q) 224 struct queue_properties *q)
207{ 225{
@@ -228,7 +246,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
228 246
229 q->is_active = (q->queue_size > 0 && 247 q->is_active = (q->queue_size > 0 &&
230 q->queue_address != 0 && 248 q->queue_address != 0 &&
231 q->queue_percent > 0); 249 q->queue_percent > 0 &&
250 !q->is_evicted);
232 251
233 return 0; 252 return 0;
234} 253}
@@ -360,7 +379,8 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
360 379
361 q->is_active = (q->queue_size > 0 && 380 q->is_active = (q->queue_size > 0 &&
362 q->queue_address != 0 && 381 q->queue_address != 0 &&
363 q->queue_percent > 0); 382 q->queue_percent > 0 &&
383 !q->is_evicted);
364 384
365 return 0; 385 return 0;
366} 386}
@@ -441,3 +461,15 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
441 return mqd; 461 return mqd;
442} 462}
443 463
464struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
465 struct kfd_dev *dev)
466{
467 struct mqd_manager *mqd;
468
469 mqd = mqd_manager_init_cik(type, dev);
470 if (!mqd)
471 return NULL;
472 if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
473 mqd->update_mqd = update_mqd_hawaii;
474 return mqd;
475}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 971aec0637dc..89e4242e43e7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -151,6 +151,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
151 151
152 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); 152 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
153 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); 153 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
154 m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
155 m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
154 156
155 m->cp_hqd_pq_doorbell_control = 157 m->cp_hqd_pq_doorbell_control =
156 q->doorbell_off << 158 q->doorbell_off <<
@@ -196,7 +198,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
196 198
197 q->is_active = (q->queue_size > 0 && 199 q->is_active = (q->queue_size > 0 &&
198 q->queue_address != 0 && 200 q->queue_address != 0 &&
199 q->queue_percent > 0); 201 q->queue_percent > 0 &&
202 !q->is_evicted);
200 203
201 return 0; 204 return 0;
202} 205}
@@ -208,6 +211,12 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
208 return __update_mqd(mm, mqd, q, MTYPE_CC, 1); 211 return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
209} 212}
210 213
214static int update_mqd_tonga(struct mqd_manager *mm, void *mqd,
215 struct queue_properties *q)
216{
217 return __update_mqd(mm, mqd, q, MTYPE_UC, 0);
218}
219
211static int destroy_mqd(struct mqd_manager *mm, void *mqd, 220static int destroy_mqd(struct mqd_manager *mm, void *mqd,
212 enum kfd_preempt_type type, 221 enum kfd_preempt_type type,
213 unsigned int timeout, uint32_t pipe_id, 222 unsigned int timeout, uint32_t pipe_id,
@@ -334,7 +343,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
334 343
335 q->is_active = (q->queue_size > 0 && 344 q->is_active = (q->queue_size > 0 &&
336 q->queue_address != 0 && 345 q->queue_address != 0 &&
337 q->queue_percent > 0); 346 q->queue_percent > 0 &&
347 !q->is_evicted);
338 348
339 return 0; 349 return 0;
340} 350}
@@ -432,3 +442,16 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
432 442
433 return mqd; 443 return mqd;
434} 444}
445
446struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
447 struct kfd_dev *dev)
448{
449 struct mqd_manager *mqd;
450
451 mqd = mqd_manager_init_vi(type, dev);
452 if (!mqd)
453 return NULL;
454 if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
455 mqd->update_mqd = update_mqd_tonga;
456 return mqd;
457}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 0bedcf9cc08c..cac7aa258162 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -158,6 +158,8 @@ struct kfd_device_info {
158 uint8_t num_of_watch_points; 158 uint8_t num_of_watch_points;
159 uint16_t mqd_size_aligned; 159 uint16_t mqd_size_aligned;
160 bool supports_cwsr; 160 bool supports_cwsr;
161 bool needs_iommu_device;
162 bool needs_pci_atomics;
161}; 163};
162 164
163struct kfd_mem_obj { 165struct kfd_mem_obj {
@@ -333,7 +335,11 @@ enum kfd_queue_format {
333 * @is_interop: Defines if this is a interop queue. Interop queue means that 335 * @is_interop: Defines if this is a interop queue. Interop queue means that
334 * the queue can access both graphics and compute resources. 336 * the queue can access both graphics and compute resources.
335 * 337 *
336 * @is_active: Defines if the queue is active or not. 338 * @is_evicted: Defines if the queue is evicted. Only active queues
339 * are evicted, rendering them inactive.
340 *
341 * @is_active: Defines if the queue is active or not. @is_active and
342 * @is_evicted are protected by the DQM lock.
337 * 343 *
338 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 344 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
339 * of the queue. 345 * of the queue.
@@ -355,6 +361,7 @@ struct queue_properties {
355 uint32_t __iomem *doorbell_ptr; 361 uint32_t __iomem *doorbell_ptr;
356 uint32_t doorbell_off; 362 uint32_t doorbell_off;
357 bool is_interop; 363 bool is_interop;
364 bool is_evicted;
358 bool is_active; 365 bool is_active;
359 /* Not relevant for user mode queues in cp scheduling */ 366 /* Not relevant for user mode queues in cp scheduling */
360 unsigned int vmid; 367 unsigned int vmid;
@@ -458,6 +465,7 @@ struct qcm_process_device {
458 unsigned int queue_count; 465 unsigned int queue_count;
459 unsigned int vmid; 466 unsigned int vmid;
460 bool is_debug; 467 bool is_debug;
468 unsigned int evicted; /* eviction counter, 0=active */
461 469
462 /* This flag tells if we should reset all wavefronts on 470 /* This flag tells if we should reset all wavefronts on
463 * process termination 471 * process termination
@@ -484,6 +492,17 @@ struct qcm_process_device {
484 uint64_t tma_addr; 492 uint64_t tma_addr;
485}; 493};
486 494
495/* KFD Memory Eviction */
496
497/* Approx. wait time before attempting to restore evicted BOs */
498#define PROCESS_RESTORE_TIME_MS 100
499/* Approx. back off time if restore fails due to lack of memory */
500#define PROCESS_BACK_OFF_TIME_MS 100
501/* Approx. time before evicting the process again */
502#define PROCESS_ACTIVE_TIME_MS 10
503
504int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
505 struct dma_fence *fence);
487 506
488enum kfd_pdd_bound { 507enum kfd_pdd_bound {
489 PDD_UNBOUND = 0, 508 PDD_UNBOUND = 0,
@@ -516,8 +535,8 @@ struct kfd_process_device {
516 uint64_t scratch_base; 535 uint64_t scratch_base;
517 uint64_t scratch_limit; 536 uint64_t scratch_limit;
518 537
519 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 538 /* VM context for GPUVM allocations */
520 enum kfd_pdd_bound bound; 539 void *vm;
521 540
522 /* Flag used to tell the pdd has dequeued from the dqm. 541 /* Flag used to tell the pdd has dequeued from the dqm.
523 * This is used to prevent dev->dqm->ops.process_termination() from 542 * This is used to prevent dev->dqm->ops.process_termination() from
@@ -525,6 +544,9 @@ struct kfd_process_device {
525 * function. 544 * function.
526 */ 545 */
527 bool already_dequeued; 546 bool already_dequeued;
547
548 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
549 enum kfd_pdd_bound bound;
528}; 550};
529 551
530#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 552#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -587,8 +609,30 @@ struct kfd_process {
587 size_t signal_mapped_size; 609 size_t signal_mapped_size;
588 size_t signal_event_count; 610 size_t signal_event_count;
589 bool signal_event_limit_reached; 611 bool signal_event_limit_reached;
612
613 /* Information used for memory eviction */
614 void *kgd_process_info;
615 /* Eviction fence that is attached to all the BOs of this process. The
616 * fence will be triggered during eviction and new one will be created
617 * during restore
618 */
619 struct dma_fence *ef;
620
621 /* Work items for evicting and restoring BOs */
622 struct delayed_work eviction_work;
623 struct delayed_work restore_work;
624 /* seqno of the last scheduled eviction */
625 unsigned int last_eviction_seqno;
626 /* Approx. the last timestamp (in jiffies) when the process was
627 * restored after an eviction
628 */
629 unsigned long last_restore_timestamp;
590}; 630};
591 631
632#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
633extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
634extern struct srcu_struct kfd_processes_srcu;
635
592/** 636/**
593 * Ioctl function type. 637 * Ioctl function type.
594 * 638 *
@@ -612,13 +656,13 @@ void kfd_process_destroy_wq(void);
612struct kfd_process *kfd_create_process(struct file *filep); 656struct kfd_process *kfd_create_process(struct file *filep);
613struct kfd_process *kfd_get_process(const struct task_struct *); 657struct kfd_process *kfd_get_process(const struct task_struct *);
614struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid); 658struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
659struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
615void kfd_unref_process(struct kfd_process *p); 660void kfd_unref_process(struct kfd_process *p);
661void kfd_suspend_all_processes(void);
662int kfd_resume_all_processes(void);
616 663
617struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, 664struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
618 struct kfd_process *p); 665 struct kfd_process *p);
619int kfd_bind_processes_to_device(struct kfd_dev *dev);
620void kfd_unbind_processes_from_device(struct kfd_dev *dev);
621void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
622struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, 666struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
623 struct kfd_process *p); 667 struct kfd_process *p);
624struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, 668struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
@@ -705,8 +749,12 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
705 struct kfd_dev *dev); 749 struct kfd_dev *dev);
706struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 750struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
707 struct kfd_dev *dev); 751 struct kfd_dev *dev);
752struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
753 struct kfd_dev *dev);
708struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 754struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
709 struct kfd_dev *dev); 755 struct kfd_dev *dev);
756struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
757 struct kfd_dev *dev);
710struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); 758struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
711void device_queue_manager_uninit(struct device_queue_manager *dqm); 759void device_queue_manager_uninit(struct device_queue_manager *dqm);
712struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, 760struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
@@ -795,6 +843,8 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
795 uint64_t *event_page_offset, uint32_t *event_slot_index); 843 uint64_t *event_page_offset, uint32_t *event_slot_index);
796int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 844int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
797 845
846void kfd_flush_tlb(struct kfd_process_device *pdd);
847
798int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); 848int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
799 849
800/* Debugfs */ 850/* Debugfs */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 4ff5f0fe6db8..18b2b86ad503 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -34,17 +34,18 @@
34struct mm_struct; 34struct mm_struct;
35 35
36#include "kfd_priv.h" 36#include "kfd_priv.h"
37#include "kfd_device_queue_manager.h"
37#include "kfd_dbgmgr.h" 38#include "kfd_dbgmgr.h"
39#include "kfd_iommu.h"
38 40
39/* 41/*
40 * List of struct kfd_process (field kfd_process). 42 * List of struct kfd_process (field kfd_process).
41 * Unique/indexed by mm_struct* 43 * Unique/indexed by mm_struct*
42 */ 44 */
43#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 45DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
44static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
45static DEFINE_MUTEX(kfd_processes_mutex); 46static DEFINE_MUTEX(kfd_processes_mutex);
46 47
47DEFINE_STATIC_SRCU(kfd_processes_srcu); 48DEFINE_SRCU(kfd_processes_srcu);
48 49
49static struct workqueue_struct *kfd_process_wq; 50static struct workqueue_struct *kfd_process_wq;
50 51
@@ -54,6 +55,9 @@ static struct kfd_process *create_process(const struct task_struct *thread,
54 struct file *filep); 55 struct file *filep);
55static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep); 56static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
56 57
58static void evict_process_worker(struct work_struct *work);
59static void restore_process_worker(struct work_struct *work);
60
57 61
58void kfd_process_create_wq(void) 62void kfd_process_create_wq(void)
59{ 63{
@@ -154,6 +158,10 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
154 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n", 158 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
155 pdd->dev->id, p->pasid); 159 pdd->dev->id, p->pasid);
156 160
161 if (pdd->vm)
162 pdd->dev->kfd2kgd->destroy_process_vm(
163 pdd->dev->kgd, pdd->vm);
164
157 list_del(&pdd->per_device_list); 165 list_del(&pdd->per_device_list);
158 166
159 if (pdd->qpd.cwsr_kaddr) 167 if (pdd->qpd.cwsr_kaddr)
@@ -173,16 +181,11 @@ static void kfd_process_wq_release(struct work_struct *work)
173{ 181{
174 struct kfd_process *p = container_of(work, struct kfd_process, 182 struct kfd_process *p = container_of(work, struct kfd_process,
175 release_work); 183 release_work);
176 struct kfd_process_device *pdd;
177 184
178 pr_debug("Releasing process (pasid %d) in workqueue\n", p->pasid); 185 kfd_iommu_unbind_process(p);
179
180 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
181 if (pdd->bound == PDD_BOUND)
182 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
183 }
184 186
185 kfd_process_destroy_pdds(p); 187 kfd_process_destroy_pdds(p);
188 dma_fence_put(p->ef);
186 189
187 kfd_event_free_process(p); 190 kfd_event_free_process(p);
188 191
@@ -230,6 +233,9 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
230 mutex_unlock(&kfd_processes_mutex); 233 mutex_unlock(&kfd_processes_mutex);
231 synchronize_srcu(&kfd_processes_srcu); 234 synchronize_srcu(&kfd_processes_srcu);
232 235
236 cancel_delayed_work_sync(&p->eviction_work);
237 cancel_delayed_work_sync(&p->restore_work);
238
233 mutex_lock(&p->mutex); 239 mutex_lock(&p->mutex);
234 240
235 /* Iterate over all process device data structures and if the 241 /* Iterate over all process device data structures and if the
@@ -351,6 +357,10 @@ static struct kfd_process *create_process(const struct task_struct *thread,
351 if (err != 0) 357 if (err != 0)
352 goto err_init_apertures; 358 goto err_init_apertures;
353 359
360 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
361 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
362 process->last_restore_timestamp = get_jiffies_64();
363
354 err = kfd_process_init_cwsr(process, filep); 364 err = kfd_process_init_cwsr(process, filep);
355 if (err) 365 if (err)
356 goto err_init_cwsr; 366 goto err_init_cwsr;
@@ -402,12 +412,24 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
402 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); 412 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
403 pdd->qpd.dqm = dev->dqm; 413 pdd->qpd.dqm = dev->dqm;
404 pdd->qpd.pqm = &p->pqm; 414 pdd->qpd.pqm = &p->pqm;
415 pdd->qpd.evicted = 0;
405 pdd->process = p; 416 pdd->process = p;
406 pdd->bound = PDD_UNBOUND; 417 pdd->bound = PDD_UNBOUND;
407 pdd->already_dequeued = false; 418 pdd->already_dequeued = false;
408 list_add(&pdd->per_device_list, &p->per_device_data); 419 list_add(&pdd->per_device_list, &p->per_device_data);
409 420
421 /* Create the GPUVM context for this specific device */
422 if (dev->kfd2kgd->create_process_vm(dev->kgd, &pdd->vm,
423 &p->kgd_process_info, &p->ef)) {
424 pr_err("Failed to create process VM object\n");
425 goto err_create_pdd;
426 }
410 return pdd; 427 return pdd;
428
429err_create_pdd:
430 list_del(&pdd->per_device_list);
431 kfree(pdd);
432 return NULL;
411} 433}
412 434
413/* 435/*
@@ -429,174 +451,256 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
429 return ERR_PTR(-ENOMEM); 451 return ERR_PTR(-ENOMEM);
430 } 452 }
431 453
432 if (pdd->bound == PDD_BOUND) { 454 err = kfd_iommu_bind_process_to_device(pdd);
433 return pdd; 455 if (err)
434 } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
435 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
436 return ERR_PTR(-EINVAL);
437 }
438
439 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
440 if (err < 0)
441 return ERR_PTR(err); 456 return ERR_PTR(err);
442 457
443 pdd->bound = PDD_BOUND;
444
445 return pdd; 458 return pdd;
446} 459}
447 460
448/* 461struct kfd_process_device *kfd_get_first_process_device_data(
449 * Bind processes do the device that have been temporarily unbound 462 struct kfd_process *p)
450 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
451 */
452int kfd_bind_processes_to_device(struct kfd_dev *dev)
453{ 463{
454 struct kfd_process_device *pdd; 464 return list_first_entry(&p->per_device_data,
455 struct kfd_process *p; 465 struct kfd_process_device,
466 per_device_list);
467}
468
469struct kfd_process_device *kfd_get_next_process_device_data(
470 struct kfd_process *p,
471 struct kfd_process_device *pdd)
472{
473 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
474 return NULL;
475 return list_next_entry(pdd, per_device_list);
476}
477
478bool kfd_has_process_device_data(struct kfd_process *p)
479{
480 return !(list_empty(&p->per_device_data));
481}
482
483/* This increments the process->ref counter. */
484struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
485{
486 struct kfd_process *p, *ret_p = NULL;
456 unsigned int temp; 487 unsigned int temp;
457 int err = 0;
458 488
459 int idx = srcu_read_lock(&kfd_processes_srcu); 489 int idx = srcu_read_lock(&kfd_processes_srcu);
460 490
461 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 491 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
462 mutex_lock(&p->mutex); 492 if (p->pasid == pasid) {
463 pdd = kfd_get_process_device_data(dev, p); 493 kref_get(&p->ref);
464 494 ret_p = p;
465 if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
466 mutex_unlock(&p->mutex);
467 continue;
468 }
469
470 err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
471 p->lead_thread);
472 if (err < 0) {
473 pr_err("Unexpected pasid %d binding failure\n",
474 p->pasid);
475 mutex_unlock(&p->mutex);
476 break; 495 break;
477 } 496 }
478
479 pdd->bound = PDD_BOUND;
480 mutex_unlock(&p->mutex);
481 } 497 }
482 498
483 srcu_read_unlock(&kfd_processes_srcu, idx); 499 srcu_read_unlock(&kfd_processes_srcu, idx);
484 500
485 return err; 501 return ret_p;
486} 502}
487 503
488/* 504/* This increments the process->ref counter. */
489 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These 505struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
490 * processes will be restored to PDD_BOUND state in
491 * kfd_bind_processes_to_device.
492 */
493void kfd_unbind_processes_from_device(struct kfd_dev *dev)
494{ 506{
495 struct kfd_process_device *pdd;
496 struct kfd_process *p; 507 struct kfd_process *p;
497 unsigned int temp;
498 508
499 int idx = srcu_read_lock(&kfd_processes_srcu); 509 int idx = srcu_read_lock(&kfd_processes_srcu);
500 510
501 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 511 p = find_process_by_mm(mm);
502 mutex_lock(&p->mutex); 512 if (p)
503 pdd = kfd_get_process_device_data(dev, p); 513 kref_get(&p->ref);
504
505 if (WARN_ON(!pdd)) {
506 mutex_unlock(&p->mutex);
507 continue;
508 }
509
510 if (pdd->bound == PDD_BOUND)
511 pdd->bound = PDD_BOUND_SUSPENDED;
512 mutex_unlock(&p->mutex);
513 }
514 514
515 srcu_read_unlock(&kfd_processes_srcu, idx); 515 srcu_read_unlock(&kfd_processes_srcu, idx);
516
517 return p;
516} 518}
517 519
518void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid) 520/* process_evict_queues - Evict all user queues of a process
521 *
522 * Eviction is reference-counted per process-device. This means multiple
523 * evictions from different sources can be nested safely.
524 */
525static int process_evict_queues(struct kfd_process *p)
519{ 526{
520 struct kfd_process *p;
521 struct kfd_process_device *pdd; 527 struct kfd_process_device *pdd;
528 int r = 0;
529 unsigned int n_evicted = 0;
522 530
523 /* 531 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
524 * Look for the process that matches the pasid. If there is no such 532 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
525 * process, we either released it in amdkfd's own notifier, or there 533 &pdd->qpd);
526 * is a bug. Unfortunately, there is no way to tell... 534 if (r) {
527 */ 535 pr_err("Failed to evict process queues\n");
528 p = kfd_lookup_process_by_pasid(pasid); 536 goto fail;
529 if (!p) 537 }
530 return; 538 n_evicted++;
539 }
531 540
532 pr_debug("Unbinding process %d from IOMMU\n", pasid); 541 return r;
533 542
534 mutex_lock(kfd_get_dbgmgr_mutex()); 543fail:
544 /* To keep state consistent, roll back partial eviction by
545 * restoring queues
546 */
547 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
548 if (n_evicted == 0)
549 break;
550 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
551 &pdd->qpd))
552 pr_err("Failed to restore queues\n");
535 553
536 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { 554 n_evicted--;
537 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
538 kfd_dbgmgr_destroy(dev->dbgmgr);
539 dev->dbgmgr = NULL;
540 }
541 } 555 }
542 556
543 mutex_unlock(kfd_get_dbgmgr_mutex()); 557 return r;
544 558}
545 mutex_lock(&p->mutex);
546 559
547 pdd = kfd_get_process_device_data(dev, p); 560/* process_restore_queues - Restore all user queues of a process */
548 if (pdd) 561static int process_restore_queues(struct kfd_process *p)
549 /* For GPU relying on IOMMU, we need to dequeue here 562{
550 * when PASID is still bound. 563 struct kfd_process_device *pdd;
551 */ 564 int r, ret = 0;
552 kfd_process_dequeue_from_device(pdd);
553 565
554 mutex_unlock(&p->mutex); 566 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
567 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
568 &pdd->qpd);
569 if (r) {
570 pr_err("Failed to restore process queues\n");
571 if (!ret)
572 ret = r;
573 }
574 }
555 575
556 kfd_unref_process(p); 576 return ret;
557} 577}
558 578
559struct kfd_process_device *kfd_get_first_process_device_data( 579static void evict_process_worker(struct work_struct *work)
560 struct kfd_process *p)
561{ 580{
562 return list_first_entry(&p->per_device_data, 581 int ret;
563 struct kfd_process_device, 582 struct kfd_process *p;
564 per_device_list); 583 struct delayed_work *dwork;
584
585 dwork = to_delayed_work(work);
586
587 /* Process termination destroys this worker thread. So during the
588 * lifetime of this thread, kfd_process p will be valid
589 */
590 p = container_of(dwork, struct kfd_process, eviction_work);
591 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
592 "Eviction fence mismatch\n");
593
594 /* Narrow window of overlap between restore and evict work
595 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
596 * unreserves KFD BOs, it is possible to evicted again. But
597 * restore has few more steps of finish. So lets wait for any
598 * previous restore work to complete
599 */
600 flush_delayed_work(&p->restore_work);
601
602 pr_debug("Started evicting pasid %d\n", p->pasid);
603 ret = process_evict_queues(p);
604 if (!ret) {
605 dma_fence_signal(p->ef);
606 dma_fence_put(p->ef);
607 p->ef = NULL;
608 schedule_delayed_work(&p->restore_work,
609 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
610
611 pr_debug("Finished evicting pasid %d\n", p->pasid);
612 } else
613 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
565} 614}
566 615
567struct kfd_process_device *kfd_get_next_process_device_data( 616static void restore_process_worker(struct work_struct *work)
568 struct kfd_process *p,
569 struct kfd_process_device *pdd)
570{ 617{
571 if (list_is_last(&pdd->per_device_list, &p->per_device_data)) 618 struct delayed_work *dwork;
572 return NULL; 619 struct kfd_process *p;
573 return list_next_entry(pdd, per_device_list); 620 struct kfd_process_device *pdd;
621 int ret = 0;
622
623 dwork = to_delayed_work(work);
624
625 /* Process termination destroys this worker thread. So during the
626 * lifetime of this thread, kfd_process p will be valid
627 */
628 p = container_of(dwork, struct kfd_process, restore_work);
629
630 /* Call restore_process_bos on the first KGD device. This function
631 * takes care of restoring the whole process including other devices.
632 * Restore can fail if enough memory is not available. If so,
633 * reschedule again.
634 */
635 pdd = list_first_entry(&p->per_device_data,
636 struct kfd_process_device,
637 per_device_list);
638
639 pr_debug("Started restoring pasid %d\n", p->pasid);
640
641 /* Setting last_restore_timestamp before successful restoration.
642 * Otherwise this would have to be set by KGD (restore_process_bos)
643 * before KFD BOs are unreserved. If not, the process can be evicted
644 * again before the timestamp is set.
645 * If restore fails, the timestamp will be set again in the next
646 * attempt. This would mean that the minimum GPU quanta would be
647 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
648 * functions)
649 */
650
651 p->last_restore_timestamp = get_jiffies_64();
652 ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
653 &p->ef);
654 if (ret) {
655 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
656 p->pasid, PROCESS_BACK_OFF_TIME_MS);
657 ret = schedule_delayed_work(&p->restore_work,
658 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
659 WARN(!ret, "reschedule restore work failed\n");
660 return;
661 }
662
663 ret = process_restore_queues(p);
664 if (!ret)
665 pr_debug("Finished restoring pasid %d\n", p->pasid);
666 else
667 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
574} 668}
575 669
576bool kfd_has_process_device_data(struct kfd_process *p) 670void kfd_suspend_all_processes(void)
577{ 671{
578 return !(list_empty(&p->per_device_data)); 672 struct kfd_process *p;
673 unsigned int temp;
674 int idx = srcu_read_lock(&kfd_processes_srcu);
675
676 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
677 cancel_delayed_work_sync(&p->eviction_work);
678 cancel_delayed_work_sync(&p->restore_work);
679
680 if (process_evict_queues(p))
681 pr_err("Failed to suspend process %d\n", p->pasid);
682 dma_fence_signal(p->ef);
683 dma_fence_put(p->ef);
684 p->ef = NULL;
685 }
686 srcu_read_unlock(&kfd_processes_srcu, idx);
579} 687}
580 688
581/* This increments the process->ref counter. */ 689int kfd_resume_all_processes(void)
582struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
583{ 690{
584 struct kfd_process *p, *ret_p = NULL; 691 struct kfd_process *p;
585 unsigned int temp; 692 unsigned int temp;
586 693 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
587 int idx = srcu_read_lock(&kfd_processes_srcu);
588 694
589 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 695 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
590 if (p->pasid == pasid) { 696 if (!schedule_delayed_work(&p->restore_work, 0)) {
591 kref_get(&p->ref); 697 pr_err("Restore process %d failed during resume\n",
592 ret_p = p; 698 p->pasid);
593 break; 699 ret = -EFAULT;
594 } 700 }
595 } 701 }
596
597 srcu_read_unlock(&kfd_processes_srcu, idx); 702 srcu_read_unlock(&kfd_processes_srcu, idx);
598 703 return ret;
599 return ret_p;
600} 704}
601 705
602int kfd_reserved_mem_mmap(struct kfd_process *process, 706int kfd_reserved_mem_mmap(struct kfd_process *process,
@@ -633,6 +737,22 @@ int kfd_reserved_mem_mmap(struct kfd_process *process,
633 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); 737 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
634} 738}
635 739
740void kfd_flush_tlb(struct kfd_process_device *pdd)
741{
742 struct kfd_dev *dev = pdd->dev;
743 const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
744
745 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
746 /* Nothing to flush until a VMID is assigned, which
747 * only happens when the first queue is created.
748 */
749 if (pdd->qpd.vmid)
750 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
751 } else {
752 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
753 }
754}
755
636#if defined(CONFIG_DEBUG_FS) 756#if defined(CONFIG_DEBUG_FS)
637 757
638int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) 758int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 876380632668..7817e327ea6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -208,7 +208,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
208 208
209 case KFD_QUEUE_TYPE_COMPUTE: 209 case KFD_QUEUE_TYPE_COMPUTE:
210 /* check if there is over subscription */ 210 /* check if there is over subscription */
211 if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && 211 if ((dev->dqm->sched_policy ==
212 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
212 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || 213 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
213 (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) { 214 (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
214 pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n"); 215 pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c6a76090a725..250615535563 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -35,6 +35,7 @@
35#include "kfd_crat.h" 35#include "kfd_crat.h"
36#include "kfd_topology.h" 36#include "kfd_topology.h"
37#include "kfd_device_queue_manager.h" 37#include "kfd_device_queue_manager.h"
38#include "kfd_iommu.h"
38 39
39/* topology_device_list - Master list of all topology devices */ 40/* topology_device_list - Master list of all topology devices */
40static struct list_head topology_device_list; 41static struct list_head topology_device_list;
@@ -677,7 +678,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
677 } 678 }
678 679
679 /* All hardware blocks have the same number of attributes. */ 680 /* All hardware blocks have the same number of attributes. */
680 num_attrs = sizeof(perf_attr_iommu)/sizeof(struct kfd_perf_attr); 681 num_attrs = ARRAY_SIZE(perf_attr_iommu);
681 list_for_each_entry(perf, &dev->perf_props, list) { 682 list_for_each_entry(perf, &dev->perf_props, list) {
682 perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr) 683 perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
683 * num_attrs + sizeof(struct attribute_group), 684 * num_attrs + sizeof(struct attribute_group),
@@ -875,19 +876,8 @@ static void find_system_memory(const struct dmi_header *dm,
875 */ 876 */
876static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev) 877static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
877{ 878{
878 struct kfd_perf_properties *props; 879 /* These are the only counters supported so far */
879 880 return kfd_iommu_add_perf_counters(kdev);
880 if (amd_iommu_pc_supported()) {
881 props = kfd_alloc_struct(props);
882 if (!props)
883 return -ENOMEM;
884 strcpy(props->block_name, "iommu");
885 props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
886 amd_iommu_pc_get_max_counters(0); /* assume one iommu */
887 list_add_tail(&props->list, &kdev->perf_props);
888 }
889
890 return 0;
891} 881}
892 882
893/* kfd_add_non_crat_information - Add information that is not currently 883/* kfd_add_non_crat_information - Add information that is not currently
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 53fca1f45401..c0be2be6dca5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -25,7 +25,7 @@
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include "kfd_priv.h" 28#include "kfd_crat.h"
29 29
30#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128 30#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
31 31
@@ -183,8 +183,4 @@ struct kfd_topology_device *kfd_create_topology_device(
183 struct list_head *device_list); 183 struct list_head *device_list);
184void kfd_release_topology_device_list(struct list_head *device_list); 184void kfd_release_topology_device_list(struct list_head *device_list);
185 185
186extern bool amd_iommu_pc_supported(void);
187extern u8 amd_iommu_pc_get_max_banks(u16 devid);
188extern u8 amd_iommu_pc_get_max_counters(u16 devid);
189
190#endif /* __KFD_TOPOLOGY_H__ */ 186#endif /* __KFD_TOPOLOGY_H__ */
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index ec3285f65517..5b124a67404c 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -11,7 +11,7 @@ config DRM_AMD_DC
11 11
12config DRM_AMD_DC_PRE_VEGA 12config DRM_AMD_DC_PRE_VEGA
13 bool "DC support for Polaris and older ASICs" 13 bool "DC support for Polaris and older ASICs"
14 default n 14 default y
15 help 15 help
16 Choose this option to enable the new DC support for older asics 16 Choose this option to enable the new DC support for older asics
17 by default. This includes Polaris, Carrizo, Tonga, Bonaire, 17 by default. This includes Polaris, Carrizo, Tonga, Bonaire,
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index c27c81cdeed3..a2c5be493555 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -26,17 +26,16 @@
26 26
27AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH) 27AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
28 28
29subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
30
31subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/ 29subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
32subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw 30subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
33subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc 31subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
34subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync 32subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
33subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
35 34
36#TODO: remove when Timing Sync feature is complete 35#TODO: remove when Timing Sync feature is complete
37subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 36subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
38 37
39DAL_LIBS = amdgpu_dm dc modules/freesync 38DAL_LIBS = amdgpu_dm dc modules/freesync modules/color
40 39
41AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) 40AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
42 41
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 2b72009844f8..af16973f2c41 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -25,12 +25,16 @@
25 25
26 26
27 27
28AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o 28AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
29 29
30ifneq ($(CONFIG_DRM_AMD_DC),) 30ifneq ($(CONFIG_DRM_AMD_DC),)
31AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o 31AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
32endif 32endif
33 33
34ifneq ($(CONFIG_DEBUG_FS),)
35AMDGPUDM += amdgpu_dm_crc.o
36endif
37
34subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc 38subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
35 39
36AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) 40AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1ce4c98385e3..ae512ecb65ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -61,7 +61,8 @@
61 61
62#include "dcn/dcn_1_0_offset.h" 62#include "dcn/dcn_1_0_offset.h"
63#include "dcn/dcn_1_0_sh_mask.h" 63#include "dcn/dcn_1_0_sh_mask.h"
64#include "soc15ip.h" 64#include "soc15_hw_ip.h"
65#include "vega10_ip_offset.h"
65 66
66#include "soc15_common.h" 67#include "soc15_common.h"
67#endif 68#endif
@@ -319,6 +320,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
319 crtc_index = acrtc->crtc_id; 320 crtc_index = acrtc->crtc_id;
320 321
321 drm_handle_vblank(adev->ddev, crtc_index); 322 drm_handle_vblank(adev->ddev, crtc_index);
323 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
322} 324}
323 325
324static int dm_set_clockgating_state(void *handle, 326static int dm_set_clockgating_state(void *handle,
@@ -345,23 +347,43 @@ static void hotplug_notify_work_func(struct work_struct *work)
345} 347}
346 348
347#if defined(CONFIG_DRM_AMD_DC_FBC) 349#if defined(CONFIG_DRM_AMD_DC_FBC)
348#include "dal_asic_id.h"
349/* Allocate memory for FBC compressed data */ 350/* Allocate memory for FBC compressed data */
350/* TODO: Dynamic allocation */ 351static void amdgpu_dm_fbc_init(struct drm_connector *connector)
351#define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
352
353static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354{ 352{
355 int r; 353 struct drm_device *dev = connector->dev;
354 struct amdgpu_device *adev = dev->dev_private;
356 struct dm_comressor_info *compressor = &adev->dm.compressor; 355 struct dm_comressor_info *compressor = &adev->dm.compressor;
356 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
357 struct drm_display_mode *mode;
358 unsigned long max_size = 0;
359
360 if (adev->dm.dc->fbc_compressor == NULL)
361 return;
362
363 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
364 return;
365
366 if (compressor->bo_ptr)
367 return;
357 368
358 if (!compressor->bo_ptr) { 369
359 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE, 370 list_for_each_entry(mode, &connector->modes, head) {
360 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr, 371 if (max_size < mode->htotal * mode->vtotal)
361 &compressor->gpu_addr, &compressor->cpu_addr); 372 max_size = mode->htotal * mode->vtotal;
373 }
374
375 if (max_size) {
376 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
377 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
378 &compressor->gpu_addr, &compressor->cpu_addr);
362 379
363 if (r) 380 if (r)
364 DRM_ERROR("DM: Failed to initialize fbc\n"); 381 DRM_ERROR("DM: Failed to initialize FBC\n");
382 else {
383 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
384 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
385 }
386
365 } 387 }
366 388
367} 389}
@@ -381,12 +403,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
381 /* Zero all the fields */ 403 /* Zero all the fields */
382 memset(&init_data, 0, sizeof(init_data)); 404 memset(&init_data, 0, sizeof(init_data));
383 405
384 /* initialize DAL's lock (for SYNC context use) */
385 spin_lock_init(&adev->dm.dal_lock);
386
387 /* initialize DAL's mutex */
388 mutex_init(&adev->dm.dal_mutex);
389
390 if(amdgpu_dm_irq_init(adev)) { 406 if(amdgpu_dm_irq_init(adev)) {
391 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 407 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392 goto error; 408 goto error;
@@ -397,7 +413,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
397 init_data.asic_id.pci_revision_id = adev->rev_id; 413 init_data.asic_id.pci_revision_id = adev->rev_id;
398 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 414 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399 415
400 init_data.asic_id.vram_width = adev->mc.vram_width; 416 init_data.asic_id.vram_width = adev->gmc.vram_width;
401 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 417 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402 init_data.asic_id.atombios_base_address = 418 init_data.asic_id.atombios_base_address =
403 adev->mode_info.atom_context->bios; 419 adev->mode_info.atom_context->bios;
@@ -422,11 +438,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
422 else 438 else
423 init_data.log_mask = DC_MIN_LOG_MASK; 439 init_data.log_mask = DC_MIN_LOG_MASK;
424 440
425#if defined(CONFIG_DRM_AMD_DC_FBC) 441 /*
426 if (adev->family == FAMILY_CZ) 442 * TODO debug why this doesn't work on Raven
427 amdgpu_dm_initialize_fbc(adev); 443 */
428 init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr; 444 if (adev->flags & AMD_IS_APU &&
429#endif 445 adev->asic_type >= CHIP_CARRIZO &&
446 adev->asic_type < CHIP_RAVEN)
447 init_data.flags.gpu_vm_support = true;
448
430 /* Display Core create. */ 449 /* Display Core create. */
431 adev->dm.dc = dc_create(&init_data); 450 adev->dm.dc = dc_create(&init_data);
432 451
@@ -447,6 +466,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
447 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 466 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
448 adev->dm.freesync_module); 467 adev->dm.freesync_module);
449 468
469 amdgpu_dm_init_color_mod();
470
450 if (amdgpu_dm_initialize_drm_device(adev)) { 471 if (amdgpu_dm_initialize_drm_device(adev)) {
451 DRM_ERROR( 472 DRM_ERROR(
452 "amdgpu: failed to initialize sw for display support.\n"); 473 "amdgpu: failed to initialize sw for display support.\n");
@@ -540,9 +561,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
540 561
541static int dm_late_init(void *handle) 562static int dm_late_init(void *handle)
542{ 563{
543 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev; 564 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
544 565
545 return detect_mst_link_for_all_connectors(dev); 566 return detect_mst_link_for_all_connectors(adev->ddev);
546} 567}
547 568
548static void s3_handle_mst(struct drm_device *dev, bool suspend) 569static void s3_handle_mst(struct drm_device *dev, bool suspend)
@@ -629,11 +650,13 @@ static int dm_resume(void *handle)
629{ 650{
630 struct amdgpu_device *adev = handle; 651 struct amdgpu_device *adev = handle;
631 struct amdgpu_display_manager *dm = &adev->dm; 652 struct amdgpu_display_manager *dm = &adev->dm;
653 int ret = 0;
632 654
633 /* power on hardware */ 655 /* power on hardware */
634 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 656 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
635 657
636 return 0; 658 ret = amdgpu_dm_display_resume(adev);
659 return ret;
637} 660}
638 661
639int amdgpu_dm_display_resume(struct amdgpu_device *adev) 662int amdgpu_dm_display_resume(struct amdgpu_device *adev)
@@ -791,7 +814,7 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state)
791} 814}
792 815
793static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 816static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
794 .fb_create = amdgpu_user_framebuffer_create, 817 .fb_create = amdgpu_display_user_framebuffer_create,
795 .output_poll_changed = drm_fb_helper_output_poll_changed, 818 .output_poll_changed = drm_fb_helper_output_poll_changed,
796 .atomic_check = amdgpu_dm_atomic_check, 819 .atomic_check = amdgpu_dm_atomic_check,
797 .atomic_commit = amdgpu_dm_atomic_commit, 820 .atomic_commit = amdgpu_dm_atomic_commit,
@@ -1035,6 +1058,10 @@ static void handle_hpd_rx_irq(void *param)
1035 !is_mst_root_connector) { 1058 !is_mst_root_connector) {
1036 /* Downstream Port status changed. */ 1059 /* Downstream Port status changed. */
1037 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1060 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1061
1062 if (aconnector->fake_enable)
1063 aconnector->fake_enable = false;
1064
1038 amdgpu_dm_update_connector_after_detect(aconnector); 1065 amdgpu_dm_update_connector_after_detect(aconnector);
1039 1066
1040 1067
@@ -1104,7 +1131,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1104 1131
1105 if (adev->asic_type == CHIP_VEGA10 || 1132 if (adev->asic_type == CHIP_VEGA10 ||
1106 adev->asic_type == CHIP_RAVEN) 1133 adev->asic_type == CHIP_RAVEN)
1107 client_id = AMDGPU_IH_CLIENTID_DCE; 1134 client_id = SOC15_IH_CLIENTID_DCE;
1108 1135
1109 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 1136 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1110 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 1137 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -1204,7 +1231,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1204 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 1231 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1205 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 1232 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1206 i++) { 1233 i++) {
1207 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq); 1234 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1208 1235
1209 if (r) { 1236 if (r) {
1210 DRM_ERROR("Failed to add crtc irq id!\n"); 1237 DRM_ERROR("Failed to add crtc irq id!\n");
@@ -1228,7 +1255,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1228 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 1255 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1229 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; 1256 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1230 i++) { 1257 i++) {
1231 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 1258 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1232 if (r) { 1259 if (r) {
1233 DRM_ERROR("Failed to add page flip irq id!\n"); 1260 DRM_ERROR("Failed to add page flip irq id!\n");
1234 return r; 1261 return r;
@@ -1249,7 +1276,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1249 } 1276 }
1250 1277
1251 /* HPD */ 1278 /* HPD */
1252 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 1279 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1253 &adev->hpd_irq); 1280 &adev->hpd_irq);
1254 if (r) { 1281 if (r) {
1255 DRM_ERROR("Failed to add hpd irq id!\n"); 1282 DRM_ERROR("Failed to add hpd irq id!\n");
@@ -1279,9 +1306,9 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1279 /* indicate support of immediate flip */ 1306 /* indicate support of immediate flip */
1280 adev->ddev->mode_config.async_page_flip = true; 1307 adev->ddev->mode_config.async_page_flip = true;
1281 1308
1282 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 1309 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1283 1310
1284 r = amdgpu_modeset_create_props(adev); 1311 r = amdgpu_display_modeset_create_props(adev);
1285 if (r) 1312 if (r)
1286 return r; 1313 return r;
1287 1314
@@ -1338,6 +1365,43 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1338 1365
1339#endif 1366#endif
1340 1367
1368static int initialize_plane(struct amdgpu_display_manager *dm,
1369 struct amdgpu_mode_info *mode_info,
1370 int plane_id)
1371{
1372 struct amdgpu_plane *plane;
1373 unsigned long possible_crtcs;
1374 int ret = 0;
1375
1376 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1377 mode_info->planes[plane_id] = plane;
1378
1379 if (!plane) {
1380 DRM_ERROR("KMS: Failed to allocate plane\n");
1381 return -ENOMEM;
1382 }
1383 plane->base.type = mode_info->plane_type[plane_id];
1384
1385 /*
1386 * HACK: IGT tests expect that each plane can only have one
1387 * one possible CRTC. For now, set one CRTC for each
1388 * plane that is not an underlay, but still allow multiple
1389 * CRTCs for underlay planes.
1390 */
1391 possible_crtcs = 1 << plane_id;
1392 if (plane_id >= dm->dc->caps.max_streams)
1393 possible_crtcs = 0xff;
1394
1395 ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1396
1397 if (ret) {
1398 DRM_ERROR("KMS: Failed to initialize plane\n");
1399 return ret;
1400 }
1401
1402 return ret;
1403}
1404
1341/* In this architecture, the association 1405/* In this architecture, the association
1342 * connector -> encoder -> crtc 1406 * connector -> encoder -> crtc
1343 * id not really requried. The crtc and connector will hold the 1407 * id not really requried. The crtc and connector will hold the
@@ -1348,12 +1412,12 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1348static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 1412static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1349{ 1413{
1350 struct amdgpu_display_manager *dm = &adev->dm; 1414 struct amdgpu_display_manager *dm = &adev->dm;
1351 uint32_t i; 1415 int32_t i;
1352 struct amdgpu_dm_connector *aconnector = NULL; 1416 struct amdgpu_dm_connector *aconnector = NULL;
1353 struct amdgpu_encoder *aencoder = NULL; 1417 struct amdgpu_encoder *aencoder = NULL;
1354 struct amdgpu_mode_info *mode_info = &adev->mode_info; 1418 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1355 uint32_t link_cnt; 1419 uint32_t link_cnt;
1356 unsigned long possible_crtcs; 1420 int32_t total_overlay_planes, total_primary_planes;
1357 1421
1358 link_cnt = dm->dc->caps.max_links; 1422 link_cnt = dm->dc->caps.max_links;
1359 if (amdgpu_dm_mode_config_init(dm->adev)) { 1423 if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1361,30 +1425,22 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1361 return -1; 1425 return -1;
1362 } 1426 }
1363 1427
1364 for (i = 0; i < dm->dc->caps.max_planes; i++) { 1428 /* Identify the number of planes to be initialized */
1365 struct amdgpu_plane *plane; 1429 total_overlay_planes = dm->dc->caps.max_slave_planes;
1430 total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1366 1431
1367 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); 1432 /* First initialize overlay planes, index starting after primary planes */
1368 mode_info->planes[i] = plane; 1433 for (i = (total_overlay_planes - 1); i >= 0; i--) {
1369 1434 if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1370 if (!plane) { 1435 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1371 DRM_ERROR("KMS: Failed to allocate plane\n");
1372 goto fail; 1436 goto fail;
1373 } 1437 }
1374 plane->base.type = mode_info->plane_type[i]; 1438 }
1375
1376 /*
1377 * HACK: IGT tests expect that each plane can only have one
1378 * one possible CRTC. For now, set one CRTC for each
1379 * plane that is not an underlay, but still allow multiple
1380 * CRTCs for underlay planes.
1381 */
1382 possible_crtcs = 1 << i;
1383 if (i >= dm->dc->caps.max_streams)
1384 possible_crtcs = 0xff;
1385 1439
1386 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) { 1440 /* Initialize primary planes */
1387 DRM_ERROR("KMS: Failed to initialize plane\n"); 1441 for (i = (total_primary_planes - 1); i >= 0; i--) {
1442 if (initialize_plane(dm, mode_info, i)) {
1443 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1388 goto fail; 1444 goto fail;
1389 } 1445 }
1390 } 1446 }
@@ -1538,7 +1594,6 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1538static const struct amdgpu_display_funcs dm_display_funcs = { 1594static const struct amdgpu_display_funcs dm_display_funcs = {
1539 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 1595 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1540 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 1596 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1541 .vblank_wait = NULL,
1542 .backlight_set_level = 1597 .backlight_set_level =
1543 dm_set_backlight_level,/* called unconditionally */ 1598 dm_set_backlight_level,/* called unconditionally */
1544 .backlight_get_level = 1599 .backlight_get_level =
@@ -1589,8 +1644,6 @@ static int dm_early_init(void *handle)
1589{ 1644{
1590 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1645 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1591 1646
1592 adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1593
1594 switch (adev->asic_type) { 1647 switch (adev->asic_type) {
1595 case CHIP_BONAIRE: 1648 case CHIP_BONAIRE:
1596 case CHIP_HAWAII: 1649 case CHIP_HAWAII:
@@ -1924,32 +1977,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1924 1977
1925} 1978}
1926 1979
1927static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1928 struct dc_plane_state *plane_state)
1929{
1930 int i;
1931 struct dc_gamma *gamma;
1932 struct drm_color_lut *lut =
1933 (struct drm_color_lut *) crtc_state->gamma_lut->data;
1934
1935 gamma = dc_create_gamma();
1936
1937 if (gamma == NULL) {
1938 WARN_ON(1);
1939 return;
1940 }
1941
1942 gamma->type = GAMMA_RGB_256;
1943 gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1944 for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1945 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1946 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1947 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1948 }
1949
1950 plane_state->gamma_correction = gamma;
1951}
1952
1953static int fill_plane_attributes(struct amdgpu_device *adev, 1980static int fill_plane_attributes(struct amdgpu_device *adev,
1954 struct dc_plane_state *dc_plane_state, 1981 struct dc_plane_state *dc_plane_state,
1955 struct drm_plane_state *plane_state, 1982 struct drm_plane_state *plane_state,
@@ -1977,14 +2004,17 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
1977 if (input_tf == NULL) 2004 if (input_tf == NULL)
1978 return -ENOMEM; 2005 return -ENOMEM;
1979 2006
1980 input_tf->type = TF_TYPE_PREDEFINED;
1981 input_tf->tf = TRANSFER_FUNCTION_SRGB;
1982
1983 dc_plane_state->in_transfer_func = input_tf; 2007 dc_plane_state->in_transfer_func = input_tf;
1984 2008
1985 /* In case of gamma set, update gamma value */ 2009 /*
1986 if (crtc_state->gamma_lut) 2010 * Always set input transfer function, since plane state is refreshed
1987 fill_gamma_from_crtc_state(crtc_state, dc_plane_state); 2011 * every time.
2012 */
2013 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2014 if (ret) {
2015 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2016 dc_plane_state->in_transfer_func = NULL;
2017 }
1988 2018
1989 return ret; 2019 return ret;
1990} 2020}
@@ -2010,30 +2040,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2010 dst.width = stream->timing.h_addressable; 2040 dst.width = stream->timing.h_addressable;
2011 dst.height = stream->timing.v_addressable; 2041 dst.height = stream->timing.v_addressable;
2012 2042
2013 rmx_type = dm_state->scaling; 2043 if (dm_state) {
2014 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 2044 rmx_type = dm_state->scaling;
2015 if (src.width * dst.height < 2045 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2016 src.height * dst.width) { 2046 if (src.width * dst.height <
2017 /* height needs less upscaling/more downscaling */ 2047 src.height * dst.width) {
2018 dst.width = src.width * 2048 /* height needs less upscaling/more downscaling */
2019 dst.height / src.height; 2049 dst.width = src.width *
2020 } else { 2050 dst.height / src.height;
2021 /* width needs less upscaling/more downscaling */ 2051 } else {
2022 dst.height = src.height * 2052 /* width needs less upscaling/more downscaling */
2023 dst.width / src.width; 2053 dst.height = src.height *
2054 dst.width / src.width;
2055 }
2056 } else if (rmx_type == RMX_CENTER) {
2057 dst = src;
2024 } 2058 }
2025 } else if (rmx_type == RMX_CENTER) {
2026 dst = src;
2027 }
2028 2059
2029 dst.x = (stream->timing.h_addressable - dst.width) / 2; 2060 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2030 dst.y = (stream->timing.v_addressable - dst.height) / 2; 2061 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2031 2062
2032 if (dm_state->underscan_enable) { 2063 if (dm_state->underscan_enable) {
2033 dst.x += dm_state->underscan_hborder / 2; 2064 dst.x += dm_state->underscan_hborder / 2;
2034 dst.y += dm_state->underscan_vborder / 2; 2065 dst.y += dm_state->underscan_vborder / 2;
2035 dst.width -= dm_state->underscan_hborder; 2066 dst.width -= dm_state->underscan_hborder;
2036 dst.height -= dm_state->underscan_vborder; 2067 dst.height -= dm_state->underscan_vborder;
2068 }
2037 } 2069 }
2038 2070
2039 stream->src = src; 2071 stream->src = src;
@@ -2322,7 +2354,7 @@ static void set_master_stream(struct dc_stream_state *stream_set[],
2322 } 2354 }
2323 } 2355 }
2324 for (j = 0; j < stream_count; j++) { 2356 for (j = 0; j < stream_count; j++) {
2325 if (stream_set[j] && j != master_stream) 2357 if (stream_set[j])
2326 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 2358 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2327 } 2359 }
2328} 2360}
@@ -2358,12 +2390,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2358 2390
2359 if (aconnector == NULL) { 2391 if (aconnector == NULL) {
2360 DRM_ERROR("aconnector is NULL!\n"); 2392 DRM_ERROR("aconnector is NULL!\n");
2361 goto drm_connector_null; 2393 return stream;
2362 }
2363
2364 if (dm_state == NULL) {
2365 DRM_ERROR("dm_state is NULL!\n");
2366 goto dm_state_null;
2367 } 2394 }
2368 2395
2369 drm_connector = &aconnector->base; 2396 drm_connector = &aconnector->base;
@@ -2375,18 +2402,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2375 */ 2402 */
2376 if (aconnector->mst_port) { 2403 if (aconnector->mst_port) {
2377 dm_dp_mst_dc_sink_create(drm_connector); 2404 dm_dp_mst_dc_sink_create(drm_connector);
2378 goto mst_dc_sink_create_done; 2405 return stream;
2379 } 2406 }
2380 2407
2381 if (create_fake_sink(aconnector)) 2408 if (create_fake_sink(aconnector))
2382 goto stream_create_fail; 2409 return stream;
2383 } 2410 }
2384 2411
2385 stream = dc_create_stream_for_sink(aconnector->dc_sink); 2412 stream = dc_create_stream_for_sink(aconnector->dc_sink);
2386 2413
2387 if (stream == NULL) { 2414 if (stream == NULL) {
2388 DRM_ERROR("Failed to create stream for sink!\n"); 2415 DRM_ERROR("Failed to create stream for sink!\n");
2389 goto stream_create_fail; 2416 return stream;
2390 } 2417 }
2391 2418
2392 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 2419 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
@@ -2412,9 +2439,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2412 } else { 2439 } else {
2413 decide_crtc_timing_for_drm_display_mode( 2440 decide_crtc_timing_for_drm_display_mode(
2414 &mode, preferred_mode, 2441 &mode, preferred_mode,
2415 dm_state->scaling != RMX_OFF); 2442 dm_state ? (dm_state->scaling != RMX_OFF) : false);
2416 } 2443 }
2417 2444
2445 if (!dm_state)
2446 drm_mode_set_crtcinfo(&mode, 0);
2447
2418 fill_stream_properties_from_drm_display_mode(stream, 2448 fill_stream_properties_from_drm_display_mode(stream,
2419 &mode, &aconnector->base); 2449 &mode, &aconnector->base);
2420 update_stream_scaling_settings(&mode, dm_state, stream); 2450 update_stream_scaling_settings(&mode, dm_state, stream);
@@ -2424,10 +2454,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2424 drm_connector, 2454 drm_connector,
2425 aconnector->dc_sink); 2455 aconnector->dc_sink);
2426 2456
2427stream_create_fail: 2457 update_stream_signal(stream);
2428dm_state_null: 2458
2429drm_connector_null:
2430mst_dc_sink_create_done:
2431 return stream; 2459 return stream;
2432} 2460}
2433 2461
@@ -2495,6 +2523,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
2495 return &state->base; 2523 return &state->base;
2496} 2524}
2497 2525
2526
2527static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2528{
2529 enum dc_irq_source irq_source;
2530 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2531 struct amdgpu_device *adev = crtc->dev->dev_private;
2532
2533 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2534 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2535}
2536
2537static int dm_enable_vblank(struct drm_crtc *crtc)
2538{
2539 return dm_set_vblank(crtc, true);
2540}
2541
2542static void dm_disable_vblank(struct drm_crtc *crtc)
2543{
2544 dm_set_vblank(crtc, false);
2545}
2546
2498/* Implemented only the options currently availible for the driver */ 2547/* Implemented only the options currently availible for the driver */
2499static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 2548static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2500 .reset = dm_crtc_reset_state, 2549 .reset = dm_crtc_reset_state,
@@ -2504,6 +2553,9 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2504 .page_flip = drm_atomic_helper_page_flip, 2553 .page_flip = drm_atomic_helper_page_flip,
2505 .atomic_duplicate_state = dm_crtc_duplicate_state, 2554 .atomic_duplicate_state = dm_crtc_duplicate_state,
2506 .atomic_destroy_state = dm_crtc_destroy_state, 2555 .atomic_destroy_state = dm_crtc_destroy_state,
2556 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
2557 .enable_vblank = dm_enable_vblank,
2558 .disable_vblank = dm_disable_vblank,
2507}; 2559};
2508 2560
2509static enum drm_connector_status 2561static enum drm_connector_status
@@ -2779,6 +2831,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2779 /* TODO: Unhardcode stream count */ 2831 /* TODO: Unhardcode stream count */
2780 struct dc_stream_state *stream; 2832 struct dc_stream_state *stream;
2781 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 2833 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2834 enum dc_status dc_result = DC_OK;
2782 2835
2783 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 2836 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2784 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 2837 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
@@ -2798,21 +2851,22 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2798 goto fail; 2851 goto fail;
2799 } 2852 }
2800 2853
2801 stream = dc_create_stream_for_sink(dc_sink); 2854 stream = create_stream_for_sink(aconnector, mode, NULL);
2802 if (stream == NULL) { 2855 if (stream == NULL) {
2803 DRM_ERROR("Failed to create stream for sink!\n"); 2856 DRM_ERROR("Failed to create stream for sink!\n");
2804 goto fail; 2857 goto fail;
2805 } 2858 }
2806 2859
2807 drm_mode_set_crtcinfo(mode, 0); 2860 dc_result = dc_validate_stream(adev->dm.dc, stream);
2808 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2809
2810 stream->src.width = mode->hdisplay;
2811 stream->src.height = mode->vdisplay;
2812 stream->dst = stream->src;
2813 2861
2814 if (dc_validate_stream(adev->dm.dc, stream) == DC_OK) 2862 if (dc_result == DC_OK)
2815 result = MODE_OK; 2863 result = MODE_OK;
2864 else
2865 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
2866 mode->vdisplay,
2867 mode->hdisplay,
2868 mode->clock,
2869 dc_result);
2816 2870
2817 dc_stream_release(stream); 2871 dc_stream_release(stream);
2818 2872
@@ -2954,11 +3008,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2954{ 3008{
2955 struct amdgpu_framebuffer *afb; 3009 struct amdgpu_framebuffer *afb;
2956 struct drm_gem_object *obj; 3010 struct drm_gem_object *obj;
3011 struct amdgpu_device *adev;
2957 struct amdgpu_bo *rbo; 3012 struct amdgpu_bo *rbo;
2958 uint64_t chroma_addr = 0; 3013 uint64_t chroma_addr = 0;
2959 int r;
2960 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 3014 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2961 unsigned int awidth; 3015 unsigned int awidth;
3016 uint32_t domain;
3017 int r;
2962 3018
2963 dm_plane_state_old = to_dm_plane_state(plane->state); 3019 dm_plane_state_old = to_dm_plane_state(plane->state);
2964 dm_plane_state_new = to_dm_plane_state(new_state); 3020 dm_plane_state_new = to_dm_plane_state(new_state);
@@ -2972,12 +3028,17 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2972 3028
2973 obj = afb->obj; 3029 obj = afb->obj;
2974 rbo = gem_to_amdgpu_bo(obj); 3030 rbo = gem_to_amdgpu_bo(obj);
3031 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
2975 r = amdgpu_bo_reserve(rbo, false); 3032 r = amdgpu_bo_reserve(rbo, false);
2976 if (unlikely(r != 0)) 3033 if (unlikely(r != 0))
2977 return r; 3034 return r;
2978 3035
2979 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address); 3036 if (plane->type != DRM_PLANE_TYPE_CURSOR)
3037 domain = amdgpu_display_framebuffer_domains(adev);
3038 else
3039 domain = AMDGPU_GEM_DOMAIN_VRAM;
2980 3040
3041 r = amdgpu_bo_pin(rbo, domain, &afb->address);
2981 3042
2982 amdgpu_bo_unreserve(rbo); 3043 amdgpu_bo_unreserve(rbo);
2983 3044
@@ -3058,6 +3119,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
3058 if (!dm_plane_state->dc_state) 3119 if (!dm_plane_state->dc_state)
3059 return 0; 3120 return 0;
3060 3121
3122 if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3123 return -EINVAL;
3124
3061 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 3125 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3062 return 0; 3126 return 0;
3063 3127
@@ -3190,7 +3254,9 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3190 acrtc->base.enabled = false; 3254 acrtc->base.enabled = false;
3191 3255
3192 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 3256 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3193 drm_mode_crtc_set_gamma_size(&acrtc->base, 256); 3257 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3258 true, MAX_COLOR_LUT_ENTRIES);
3259 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
3194 3260
3195 return 0; 3261 return 0;
3196 3262
@@ -3366,9 +3432,12 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3366 struct edid *edid = amdgpu_dm_connector->edid; 3432 struct edid *edid = amdgpu_dm_connector->edid;
3367 3433
3368 encoder = helper->best_encoder(connector); 3434 encoder = helper->best_encoder(connector);
3369
3370 amdgpu_dm_connector_ddc_get_modes(connector, edid); 3435 amdgpu_dm_connector_ddc_get_modes(connector, edid);
3371 amdgpu_dm_connector_add_common_modes(encoder, connector); 3436 amdgpu_dm_connector_add_common_modes(encoder, connector);
3437
3438#if defined(CONFIG_DRM_AMD_DC_FBC)
3439 amdgpu_dm_fbc_init(connector);
3440#endif
3372 return amdgpu_dm_connector->num_modes; 3441 return amdgpu_dm_connector->num_modes;
3373} 3442}
3374 3443
@@ -3641,7 +3710,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
3641 * constant is the same as PFLIP 3710 * constant is the same as PFLIP
3642 */ 3711 */
3643 int irq_type = 3712 int irq_type =
3644 amdgpu_crtc_idx_to_irq_type( 3713 amdgpu_display_crtc_idx_to_irq_type(
3645 adev, 3714 adev,
3646 acrtc->crtc_id); 3715 acrtc->crtc_id);
3647 3716
@@ -3836,7 +3905,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3836 3905
3837 3906
3838 /* Prepare wait for target vblank early - before the fence-waits */ 3907 /* Prepare wait for target vblank early - before the fence-waits */
3839 target_vblank = target - drm_crtc_vblank_count(crtc) + 3908 target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
3840 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); 3909 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3841 3910
3842 /* TODO This might fail and hence better not used, wait 3911 /* TODO This might fail and hence better not used, wait
@@ -3860,9 +3929,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3860 * targeted by the flip 3929 * targeted by the flip
3861 */ 3930 */
3862 while ((acrtc->enabled && 3931 while ((acrtc->enabled &&
3863 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0, 3932 (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
3864 &vpos, &hpos, NULL, NULL, 3933 0, &vpos, &hpos, NULL,
3865 &crtc->hwmode) 3934 NULL, &crtc->hwmode)
3866 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 3935 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3867 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 3936 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3868 (int)(target_vblank - 3937 (int)(target_vblank -
@@ -3982,7 +4051,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3982 amdgpu_dm_do_flip( 4051 amdgpu_dm_do_flip(
3983 crtc, 4052 crtc,
3984 fb, 4053 fb,
3985 drm_crtc_vblank_count(crtc) + *wait_for_vblank, 4054 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3986 dm_state->context); 4055 dm_state->context);
3987 } 4056 }
3988 4057
@@ -4603,6 +4672,30 @@ next_crtc:
4603 /* Release extra reference */ 4672 /* Release extra reference */
4604 if (new_stream) 4673 if (new_stream)
4605 dc_stream_release(new_stream); 4674 dc_stream_release(new_stream);
4675
4676 /*
4677 * We want to do dc stream updates that do not require a
4678 * full modeset below.
4679 */
4680 if (!enable || !aconnector || modereset_required(new_crtc_state))
4681 continue;
4682 /*
4683 * Given above conditions, the dc state cannot be NULL because:
4684 * 1. We're attempting to enable a CRTC. Which has a...
4685 * 2. Valid connector attached, and
4686 * 3. User does not want to reset it (disable or mark inactive,
4687 * which can happen on a CRTC that's already disabled).
4688 * => It currently exists.
4689 */
4690 BUG_ON(dm_new_crtc_state->stream == NULL);
4691
4692 /* Color managment settings */
4693 if (dm_new_crtc_state->base.color_mgmt_changed) {
4694 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
4695 if (ret)
4696 goto fail;
4697 amdgpu_dm_set_ctm(dm_new_crtc_state);
4698 }
4606 } 4699 }
4607 4700
4608 return ret; 4701 return ret;
@@ -4630,11 +4723,9 @@ static int dm_update_planes_state(struct dc *dc,
4630 bool pflip_needed = !state->allow_modeset; 4723 bool pflip_needed = !state->allow_modeset;
4631 int ret = 0; 4724 int ret = 0;
4632 4725
4633 if (pflip_needed)
4634 return ret;
4635 4726
4636 /* Add new planes */ 4727 /* Add new planes, in reverse order as DC expectation */
4637 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4728 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
4638 new_plane_crtc = new_plane_state->crtc; 4729 new_plane_crtc = new_plane_state->crtc;
4639 old_plane_crtc = old_plane_state->crtc; 4730 old_plane_crtc = old_plane_state->crtc;
4640 dm_new_plane_state = to_dm_plane_state(new_plane_state); 4731 dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4646,6 +4737,8 @@ static int dm_update_planes_state(struct dc *dc,
4646 4737
4647 /* Remove any changed/removed planes */ 4738 /* Remove any changed/removed planes */
4648 if (!enable) { 4739 if (!enable) {
4740 if (pflip_needed)
4741 continue;
4649 4742
4650 if (!old_plane_crtc) 4743 if (!old_plane_crtc)
4651 continue; 4744 continue;
@@ -4677,6 +4770,7 @@ static int dm_update_planes_state(struct dc *dc,
4677 *lock_and_validation_needed = true; 4770 *lock_and_validation_needed = true;
4678 4771
4679 } else { /* Add new planes */ 4772 } else { /* Add new planes */
4773 struct dc_plane_state *dc_new_plane_state;
4680 4774
4681 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 4775 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4682 continue; 4776 continue;
@@ -4690,38 +4784,47 @@ static int dm_update_planes_state(struct dc *dc,
4690 if (!dm_new_crtc_state->stream) 4784 if (!dm_new_crtc_state->stream)
4691 continue; 4785 continue;
4692 4786
4787 if (pflip_needed)
4788 continue;
4693 4789
4694 WARN_ON(dm_new_plane_state->dc_state); 4790 WARN_ON(dm_new_plane_state->dc_state);
4695 4791
4696 dm_new_plane_state->dc_state = dc_create_plane_state(dc); 4792 dc_new_plane_state = dc_create_plane_state(dc);
4793 if (!dc_new_plane_state)
4794 return -ENOMEM;
4697 4795
4698 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", 4796 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4699 plane->base.id, new_plane_crtc->base.id); 4797 plane->base.id, new_plane_crtc->base.id);
4700 4798
4701 if (!dm_new_plane_state->dc_state) {
4702 ret = -EINVAL;
4703 return ret;
4704 }
4705
4706 ret = fill_plane_attributes( 4799 ret = fill_plane_attributes(
4707 new_plane_crtc->dev->dev_private, 4800 new_plane_crtc->dev->dev_private,
4708 dm_new_plane_state->dc_state, 4801 dc_new_plane_state,
4709 new_plane_state, 4802 new_plane_state,
4710 new_crtc_state); 4803 new_crtc_state);
4711 if (ret) 4804 if (ret) {
4805 dc_plane_state_release(dc_new_plane_state);
4712 return ret; 4806 return ret;
4807 }
4713 4808
4714 4809 /*
4810 * Any atomic check errors that occur after this will
4811 * not need a release. The plane state will be attached
4812 * to the stream, and therefore part of the atomic
4813 * state. It'll be released when the atomic state is
4814 * cleaned.
4815 */
4715 if (!dc_add_plane_to_context( 4816 if (!dc_add_plane_to_context(
4716 dc, 4817 dc,
4717 dm_new_crtc_state->stream, 4818 dm_new_crtc_state->stream,
4718 dm_new_plane_state->dc_state, 4819 dc_new_plane_state,
4719 dm_state->context)) { 4820 dm_state->context)) {
4720 4821
4721 ret = -EINVAL; 4822 dc_plane_state_release(dc_new_plane_state);
4722 return ret; 4823 return -EINVAL;
4723 } 4824 }
4724 4825
4826 dm_new_plane_state->dc_state = dc_new_plane_state;
4827
4725 /* Tell DC to do a full surface update every time there 4828 /* Tell DC to do a full surface update every time there
4726 * is a plane change. Inefficient, but works for now. 4829 * is a plane change. Inefficient, but works for now.
4727 */ 4830 */
@@ -4735,6 +4838,33 @@ static int dm_update_planes_state(struct dc *dc,
4735 return ret; 4838 return ret;
4736} 4839}
4737 4840
4841static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4842 struct drm_crtc *crtc)
4843{
4844 struct drm_plane *plane;
4845 struct drm_crtc_state *crtc_state;
4846
4847 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
4848
4849 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
4850 struct drm_plane_state *plane_state =
4851 drm_atomic_get_plane_state(state, plane);
4852
4853 if (IS_ERR(plane_state))
4854 return -EDEADLK;
4855
4856 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4857 if (IS_ERR(crtc_state))
4858 return PTR_ERR(crtc_state);
4859
4860 if (crtc->primary == plane && crtc_state->active) {
4861 if (!plane_state->fb)
4862 return -EINVAL;
4863 }
4864 }
4865 return 0;
4866}
4867
4738static int amdgpu_dm_atomic_check(struct drm_device *dev, 4868static int amdgpu_dm_atomic_check(struct drm_device *dev,
4739 struct drm_atomic_state *state) 4869 struct drm_atomic_state *state)
4740{ 4870{
@@ -4758,6 +4888,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
4758 goto fail; 4888 goto fail;
4759 4889
4760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4890 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4891 ret = dm_atomic_check_plane_state_fb(state, crtc);
4892 if (ret)
4893 goto fail;
4894
4761 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 4895 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4762 !new_crtc_state->color_mgmt_changed) 4896 !new_crtc_state->color_mgmt_changed)
4763 continue; 4897 continue;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2faa77a7eeda..b68400c1154b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -85,8 +85,6 @@ struct amdgpu_display_manager {
85 struct dal *dal; 85 struct dal *dal;
86 struct dc *dc; 86 struct dc *dc;
87 struct cgs_device *cgs_device; 87 struct cgs_device *cgs_device;
88 /* lock to be used when DAL is called from SYNC IRQ context */
89 spinlock_t dal_lock;
90 88
91 struct amdgpu_device *adev; /*AMD base driver*/ 89 struct amdgpu_device *adev; /*AMD base driver*/
92 struct drm_device *ddev; /*DRM base driver*/ 90 struct drm_device *ddev; /*DRM base driver*/
@@ -119,17 +117,6 @@ struct amdgpu_display_manager {
119 /* this spin lock synchronizes access to 'irq_handler_list_table' */ 117 /* this spin lock synchronizes access to 'irq_handler_list_table' */
120 spinlock_t irq_handler_list_table_lock; 118 spinlock_t irq_handler_list_table_lock;
121 119
122 /* Timer-related data. */
123 struct list_head timer_handler_list;
124 struct workqueue_struct *timer_workqueue;
125
126 /* Use dal_mutex for any activity which is NOT syncronized by
127 * DRM mode setting locks.
128 * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
129 * DRM mode setting locks being acquired. This is where dal_mutex
130 * is acquired before calling into DAL. */
131 struct mutex dal_mutex;
132
133 struct backlight_device *backlight_dev; 120 struct backlight_device *backlight_dev;
134 121
135 const struct dc_link *backlight_link; 122 const struct dc_link *backlight_link;
@@ -210,6 +197,9 @@ struct dm_plane_state {
210struct dm_crtc_state { 197struct dm_crtc_state {
211 struct drm_crtc_state base; 198 struct drm_crtc_state base;
212 struct dc_stream_state *stream; 199 struct dc_stream_state *stream;
200
201 int crc_skip_count;
202 bool crc_enabled;
213}; 203};
214 204
215#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) 205#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
@@ -268,6 +258,26 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
268void 258void
269amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector); 259amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
270 260
261/* amdgpu_dm_crc.c */
262#ifdef CONFIG_DEBUG_FS
263int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
264 size_t *values_cnt);
265void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
266#else
267#define amdgpu_dm_crtc_set_crc_source NULL
268#define amdgpu_dm_crtc_handle_crc_irq(x)
269#endif
270
271#define MAX_COLOR_LUT_ENTRIES 4096
272/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
273#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
274
275void amdgpu_dm_init_color_mod(void);
276int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
277 struct dc_plane_state *dc_plane_state);
278void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc);
279int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc);
280
271extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 281extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
272 282
273#endif /* __AMDGPU_DM_H__ */ 283#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
new file mode 100644
index 000000000000..f6cb502c303f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -0,0 +1,274 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "amdgpu_mode.h"
27#include "amdgpu_dm.h"
28#include "modules/color/color_gamma.h"
29
30#define MAX_DRM_LUT_VALUE 0xFFFF
31
32/*
33 * Initialize the color module.
34 *
35 * We're not using the full color module, only certain components.
36 * Only call setup functions for components that we need.
37 */
38void amdgpu_dm_init_color_mod(void)
39{
40 setup_x_points_distribution();
41}
42
43
44/*
45 * Return true if the given lut is a linear mapping of values, i.e. it acts
46 * like a bypass LUT.
47 *
48 * It is considered linear if the lut represents:
49 * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in
50 * [0, MAX_COLOR_LUT_ENTRIES)
51 */
52static bool __is_lut_linear(struct drm_color_lut *lut, uint32_t size)
53{
54 int i;
55 uint32_t expected;
56 int delta;
57
58 for (i = 0; i < size; i++) {
59 /* All color values should equal */
60 if ((lut[i].red != lut[i].green) || (lut[i].green != lut[i].blue))
61 return false;
62
63 expected = i * MAX_DRM_LUT_VALUE / (size-1);
64
65 /* Allow a +/-1 error. */
66 delta = lut[i].red - expected;
67 if (delta < -1 || 1 < delta)
68 return false;
69 }
70 return true;
71}
72
73/**
74 * Convert the drm_color_lut to dc_gamma. The conversion depends on the size
75 * of the lut - whether or not it's legacy.
76 */
77static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
78 struct dc_gamma *gamma,
79 bool is_legacy)
80{
81 uint32_t r, g, b;
82 int i;
83
84 if (is_legacy) {
85 for (i = 0; i < MAX_COLOR_LEGACY_LUT_ENTRIES; i++) {
86 r = drm_color_lut_extract(lut[i].red, 16);
87 g = drm_color_lut_extract(lut[i].green, 16);
88 b = drm_color_lut_extract(lut[i].blue, 16);
89
90 gamma->entries.red[i] = dal_fixed31_32_from_int(r);
91 gamma->entries.green[i] = dal_fixed31_32_from_int(g);
92 gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
93 }
94 return;
95 }
96
97 /* else */
98 for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
99 r = drm_color_lut_extract(lut[i].red, 16);
100 g = drm_color_lut_extract(lut[i].green, 16);
101 b = drm_color_lut_extract(lut[i].blue, 16);
102
103 gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE);
104 gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE);
105 gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE);
106 }
107}
108
109/**
110 * amdgpu_dm_set_regamma_lut: Set regamma lut for the given CRTC.
111 * @crtc: amdgpu_dm crtc state
112 *
113 * Update the underlying dc_stream_state's output transfer function (OTF) in
114 * preparation for hardware commit. If no lut is specified by user, we default
115 * to SRGB.
116 *
117 * RETURNS:
118 * 0 on success, -ENOMEM if memory cannot be allocated to calculate the OTF.
119 */
120int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
121{
122 struct drm_property_blob *blob = crtc->base.gamma_lut;
123 struct dc_stream_state *stream = crtc->stream;
124 struct drm_color_lut *lut;
125 uint32_t lut_size;
126 struct dc_gamma *gamma;
127 enum dc_transfer_func_type old_type = stream->out_transfer_func->type;
128
129 bool ret;
130
131 if (!blob) {
132 /* By default, use the SRGB predefined curve.*/
133 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
134 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
135 return 0;
136 }
137
138 lut = (struct drm_color_lut *)blob->data;
139 lut_size = blob->length / sizeof(struct drm_color_lut);
140
141 if (__is_lut_linear(lut, lut_size)) {
142 /* Set to bypass if lut is set to linear */
143 stream->out_transfer_func->type = TF_TYPE_BYPASS;
144 stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
145 return 0;
146 }
147
148 gamma = dc_create_gamma();
149 if (!gamma)
150 return -ENOMEM;
151
152 gamma->num_entries = lut_size;
153 if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
154 gamma->type = GAMMA_RGB_256;
155 else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
156 gamma->type = GAMMA_CS_TFM_1D;
157 else {
158 /* Invalid lut size */
159 dc_gamma_release(&gamma);
160 return -EINVAL;
161 }
162
163 /* Convert drm_lut into dc_gamma */
164 __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
165
166 /* Call color module to translate into something DC understands. Namely
167 * a transfer function.
168 */
169 stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
170 ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
171 gamma, true);
172 dc_gamma_release(&gamma);
173 if (!ret) {
174 stream->out_transfer_func->type = old_type;
175 DRM_ERROR("Out of memory when calculating regamma params\n");
176 return -ENOMEM;
177 }
178
179 return 0;
180}
181
182/**
183 * amdgpu_dm_set_ctm: Set the color transform matrix for the given CRTC.
184 * @crtc: amdgpu_dm crtc state
185 *
186 * Update the underlying dc_stream_state's gamut remap matrix in preparation
187 * for hardware commit. If no matrix is specified by user, gamut remap will be
188 * disabled.
189 */
190void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
191{
192
193 struct drm_property_blob *blob = crtc->base.ctm;
194 struct dc_stream_state *stream = crtc->stream;
195 struct drm_color_ctm *ctm;
196 int64_t val;
197 int i;
198
199 if (!blob) {
200 stream->gamut_remap_matrix.enable_remap = false;
201 return;
202 }
203
204 stream->gamut_remap_matrix.enable_remap = true;
205 ctm = (struct drm_color_ctm *)blob->data;
206 /*
207 * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating
208 * with homogeneous coordinates, augment the matrix with 0's.
209 *
210 * The format provided is S31.32, using signed-magnitude representation.
211 * Our fixed31_32 is also S31.32, but is using 2's complement. We have
212 * to convert from signed-magnitude to 2's complement.
213 */
214 for (i = 0; i < 12; i++) {
215 /* Skip 4th element */
216 if (i % 4 == 3) {
217 stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
218 continue;
219 }
220
221 /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
222 val = ctm->matrix[i - (i/4)];
223 /* If negative, convert to 2's complement. */
224 if (val & (1ULL << 63))
225 val = -(val & ~(1ULL << 63));
226
227 stream->gamut_remap_matrix.matrix[i].value = val;
228 }
229}
230
231
232/**
233 * amdgpu_dm_set_degamma_lut: Set degamma lut for the given CRTC.
234 * @crtc: amdgpu_dm crtc state
235 *
236 * Update the underlying dc_stream_state's input transfer function (ITF) in
237 * preparation for hardware commit. If no lut is specified by user, we default
238 * to SRGB degamma.
239 *
240 * Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
241 * Programmable degamma is not supported, and an attempt to do so will return
242 * -EINVAL.
243 *
244 * RETURNS:
245 * 0 on success, -EINVAL if custom degamma curve is given.
246 */
247int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
248 struct dc_plane_state *dc_plane_state)
249{
250 struct drm_property_blob *blob = crtc_state->degamma_lut;
251 struct drm_color_lut *lut;
252
253 if (!blob) {
254 /* Default to SRGB */
255 dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
256 dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
257 return 0;
258 }
259
260 lut = (struct drm_color_lut *)blob->data;
261 if (__is_lut_linear(lut, MAX_COLOR_LUT_ENTRIES)) {
262 dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
263 dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
264 return 0;
265 }
266
267 /* Otherwise, assume SRGB, since programmable degamma is not
268 * supported.
269 */
270 dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
271 dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
272 return -EINVAL;
273}
274
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
new file mode 100644
index 000000000000..52f2c01349e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <drm/drm_crtc.h>
27
28#include "amdgpu.h"
29#include "amdgpu_dm.h"
30#include "dc.h"
31
32enum amdgpu_dm_pipe_crc_source {
33 AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0,
34 AMDGPU_DM_PIPE_CRC_SOURCE_AUTO,
35 AMDGPU_DM_PIPE_CRC_SOURCE_MAX,
36 AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1,
37};
38
39static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
40{
41 if (!source || !strcmp(source, "none"))
42 return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
43 if (!strcmp(source, "auto"))
44 return AMDGPU_DM_PIPE_CRC_SOURCE_AUTO;
45
46 return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
47}
48
49int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
50 size_t *values_cnt)
51{
52 struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
53 struct dc_stream_state *stream_state = crtc_state->stream;
54
55 enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
56
57 if (source < 0) {
58 DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
59 src_name, crtc->index);
60 return -EINVAL;
61 }
62
63 /* When enabling CRC, we should also disable dithering. */
64 if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
65 if (dc_stream_configure_crc(stream_state->ctx->dc,
66 stream_state,
67 true, true)) {
68 crtc_state->crc_enabled = true;
69 dc_stream_set_dither_option(stream_state,
70 DITHER_OPTION_TRUN8);
71 }
72 else
73 return -EINVAL;
74 } else {
75 if (dc_stream_configure_crc(stream_state->ctx->dc,
76 stream_state,
77 false, false)) {
78 crtc_state->crc_enabled = false;
79 dc_stream_set_dither_option(stream_state,
80 DITHER_OPTION_DEFAULT);
81 }
82 else
83 return -EINVAL;
84 }
85
86 *values_cnt = 3;
87 /* Reset crc_skipped on dm state */
88 crtc_state->crc_skip_count = 0;
89 return 0;
90}
91
92/**
93 * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
94 * @crtc: DRM CRTC object.
95 *
96 * This function should be called at the end of a vblank, when the fb has been
97 * fully processed through the pipe.
98 */
99void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
100{
101 struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
102 struct dc_stream_state *stream_state = crtc_state->stream;
103 uint32_t crcs[3];
104
105 /* Early return if CRC capture is not enabled. */
106 if (!crtc_state->crc_enabled)
107 return;
108
109 /*
110 * Since flipping and crc enablement happen asynchronously, we - more
111 * often than not - will be returning an 'uncooked' crc on first frame.
112 * Probably because hw isn't ready yet. For added security, skip the
113 * first two CRC values.
114 */
115 if (crtc_state->crc_skip_count < 2) {
116 crtc_state->crc_skip_count += 1;
117 return;
118 }
119
120 if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
121 &crcs[0], &crcs[1], &crcs[2]))
122 return;
123
124 drm_crtc_add_crc_entry(crtc, true,
125 drm_crtc_accurate_vblank_count(crtc), crcs);
126}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9bd142f65f9b..9ab69b22b989 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -258,6 +258,15 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
258 return true; 258 return true;
259} 259}
260 260
261
262/*
263 * Clear payload allocation table before enable MST DP link.
264 */
265void dm_helpers_dp_mst_clear_payload_allocation_table(
266 struct dc_context *ctx,
267 const struct dc_link *link)
268{}
269
261/* 270/*
262 * Polls for ACT (allocation change trigger) handled and sends 271 * Polls for ACT (allocation change trigger) handled and sends
263 * ALLOCATE_PAYLOAD message. 272 * ALLOCATE_PAYLOAD message.
@@ -496,3 +505,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
496 505
497 return edid_status; 506 return edid_status;
498} 507}
508
509void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
510{
511 /* TODO: something */
512}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 1874b6cee6af..490017df371d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -51,11 +51,6 @@ struct amdgpu_dm_irq_handler_data {
51 enum dc_irq_source irq_source; 51 enum dc_irq_source irq_source;
52}; 52};
53 53
54struct amdgpu_dm_timer_handler_data {
55 struct handler_common_data hcd;
56 struct delayed_work d_work;
57};
58
59#define DM_IRQ_TABLE_LOCK(adev, flags) \ 54#define DM_IRQ_TABLE_LOCK(adev, flags) \
60 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) 55 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
61 56
@@ -169,62 +164,6 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
169 return hnd_list; 164 return hnd_list;
170} 165}
171 166
172/* If 'handler_in == NULL' then remove ALL handlers. */
173static void remove_timer_handler(struct amdgpu_device *adev,
174 struct amdgpu_dm_timer_handler_data *handler_in)
175{
176 struct amdgpu_dm_timer_handler_data *handler_temp;
177 struct list_head *handler_list;
178 struct list_head *entry, *tmp;
179 unsigned long irq_table_flags;
180 bool handler_removed = false;
181
182 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
183
184 handler_list = &adev->dm.timer_handler_list;
185
186 list_for_each_safe(entry, tmp, handler_list) {
187 /* Note that list_for_each_safe() guarantees that
188 * handler_temp is NOT null. */
189 handler_temp = list_entry(entry,
190 struct amdgpu_dm_timer_handler_data, hcd.list);
191
192 if (handler_in == NULL || handler_in == handler_temp) {
193 list_del(&handler_temp->hcd.list);
194 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
195
196 DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
197 handler_temp);
198
199 if (handler_in == NULL) {
200 /* Since it is still in the queue, it must
201 * be cancelled. */
202 cancel_delayed_work_sync(&handler_temp->d_work);
203 }
204
205 kfree(handler_temp);
206 handler_removed = true;
207
208 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
209 }
210
211 /* Remove ALL handlers. */
212 if (handler_in == NULL)
213 continue;
214
215 /* Remove a SPECIFIC handler.
216 * Found our handler - we can stop here. */
217 if (handler_in == handler_temp)
218 break;
219 }
220
221 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
222
223 if (handler_in != NULL && handler_removed == false)
224 DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
225 handler_in);
226}
227
228static bool 167static bool
229validate_irq_registration_params(struct dc_interrupt_params *int_params, 168validate_irq_registration_params(struct dc_interrupt_params *int_params,
230 void (*ih)(void *)) 169 void (*ih)(void *))
@@ -382,16 +321,6 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
382 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); 321 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
383 } 322 }
384 323
385 INIT_LIST_HEAD(&adev->dm.timer_handler_list);
386
387 /* allocate and initialize the workqueue for DM timer */
388 adev->dm.timer_workqueue = create_singlethread_workqueue(
389 "dm_timer_queue");
390 if (adev->dm.timer_workqueue == NULL) {
391 DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
392 return -1;
393 }
394
395 return 0; 324 return 0;
396} 325}
397 326
@@ -410,11 +339,6 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
410 lh = &adev->dm.irq_handler_list_low_tab[src]; 339 lh = &adev->dm.irq_handler_list_low_tab[src];
411 flush_work(&lh->work); 340 flush_work(&lh->work);
412 } 341 }
413
414 /* Cancel ALL timers and release handlers (if any). */
415 remove_timer_handler(adev, NULL);
416 /* Release the queue itself. */
417 destroy_workqueue(adev->dm.timer_workqueue);
418} 342}
419 343
420int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) 344int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
@@ -683,10 +607,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
683 607
684void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 608void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
685{ 609{
686 if (adev->mode_info.num_crtc > 0) 610
687 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 611 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
688 else
689 adev->crtc_irq.num_types = 0;
690 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 612 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
691 613
692 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 614 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f3d87f418d2e..8291d74f26bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -83,17 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
83 enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? 83 enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
84 I2C_MOT_TRUE : I2C_MOT_FALSE; 84 I2C_MOT_TRUE : I2C_MOT_FALSE;
85 enum ddc_result res; 85 enum ddc_result res;
86 ssize_t read_bytes;
87
88 if (WARN_ON(msg->size > 16))
89 return -E2BIG;
86 90
87 switch (msg->request & ~DP_AUX_I2C_MOT) { 91 switch (msg->request & ~DP_AUX_I2C_MOT) {
88 case DP_AUX_NATIVE_READ: 92 case DP_AUX_NATIVE_READ:
89 res = dal_ddc_service_read_dpcd_data( 93 read_bytes = dal_ddc_service_read_dpcd_data(
90 TO_DM_AUX(aux)->ddc_service, 94 TO_DM_AUX(aux)->ddc_service,
91 false, 95 false,
92 I2C_MOT_UNDEF, 96 I2C_MOT_UNDEF,
93 msg->address, 97 msg->address,
94 msg->buffer, 98 msg->buffer,
95 msg->size); 99 msg->size);
96 break; 100 return read_bytes;
97 case DP_AUX_NATIVE_WRITE: 101 case DP_AUX_NATIVE_WRITE:
98 res = dal_ddc_service_write_dpcd_data( 102 res = dal_ddc_service_write_dpcd_data(
99 TO_DM_AUX(aux)->ddc_service, 103 TO_DM_AUX(aux)->ddc_service,
@@ -104,14 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
104 msg->size); 108 msg->size);
105 break; 109 break;
106 case DP_AUX_I2C_READ: 110 case DP_AUX_I2C_READ:
107 res = dal_ddc_service_read_dpcd_data( 111 read_bytes = dal_ddc_service_read_dpcd_data(
108 TO_DM_AUX(aux)->ddc_service, 112 TO_DM_AUX(aux)->ddc_service,
109 true, 113 true,
110 mot, 114 mot,
111 msg->address, 115 msg->address,
112 msg->buffer, 116 msg->buffer,
113 msg->size); 117 msg->size);
114 break; 118 return read_bytes;
115 case DP_AUX_I2C_WRITE: 119 case DP_AUX_I2C_WRITE:
116 res = dal_ddc_service_write_dpcd_data( 120 res = dal_ddc_service_write_dpcd_data(
117 TO_DM_AUX(aux)->ddc_service, 121 TO_DM_AUX(aux)->ddc_service,
@@ -174,12 +178,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
174 .atomic_get_property = amdgpu_dm_connector_atomic_get_property 178 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
175}; 179};
176 180
177static int dm_connector_update_modes(struct drm_connector *connector,
178 struct edid *edid)
179{
180 return drm_add_edid_modes(connector, edid);
181}
182
183void dm_dp_mst_dc_sink_create(struct drm_connector *connector) 181void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
184{ 182{
185 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 183 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -189,6 +187,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
189 .link = aconnector->dc_link, 187 .link = aconnector->dc_link,
190 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 188 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
191 189
190 /*
191 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
192 */
193 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
194 return;
195
192 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 196 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
193 197
194 if (!edid) { 198 if (!edid) {
@@ -222,7 +226,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
222 int ret = 0; 226 int ret = 0;
223 227
224 if (!aconnector) 228 if (!aconnector)
225 return dm_connector_update_modes(connector, NULL); 229 return drm_add_edid_modes(connector, NULL);
226 230
227 if (!aconnector->edid) { 231 if (!aconnector->edid) {
228 struct edid *edid; 232 struct edid *edid;
@@ -258,7 +262,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
258 &aconnector->base, edid); 262 &aconnector->base, edid);
259 } 263 }
260 264
261 ret = dm_connector_update_modes(connector, aconnector->edid); 265 ret = drm_add_edid_modes(connector, aconnector->edid);
262 266
263 return ret; 267 return ret;
264} 268}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 56e549249134..89342b48be6b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -71,15 +71,6 @@ bool dm_read_persistent_data(struct dc_context *ctx,
71 71
72/**** power component interfaces ****/ 72/**** power component interfaces ****/
73 73
74bool dm_pp_pre_dce_clock_change(
75 struct dc_context *ctx,
76 struct dm_pp_gpu_clock_range *requested_state,
77 struct dm_pp_gpu_clock_range *actual_state)
78{
79 /*TODO*/
80 return false;
81}
82
83bool dm_pp_apply_display_requirements( 74bool dm_pp_apply_display_requirements(
84 const struct dc_context *ctx, 75 const struct dc_context *ctx,
85 const struct dm_pp_display_configuration *pp_display_cfg) 76 const struct dm_pp_display_configuration *pp_display_cfg)
@@ -151,30 +142,6 @@ bool dm_pp_apply_display_requirements(
151 return true; 142 return true;
152} 143}
153 144
154bool dc_service_get_system_clocks_range(
155 const struct dc_context *ctx,
156 struct dm_pp_gpu_clock_range *sys_clks)
157{
158 struct amdgpu_device *adev = ctx->driver_context;
159
160 /* Default values, in case PPLib is not compiled-in. */
161 sys_clks->mclk.max_khz = 800000;
162 sys_clks->mclk.min_khz = 800000;
163
164 sys_clks->sclk.max_khz = 600000;
165 sys_clks->sclk.min_khz = 300000;
166
167 if (adev->pm.dpm_enabled) {
168 sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
169 sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
170
171 sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
172 sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
173 }
174
175 return true;
176}
177
178static void get_default_clock_levels( 145static void get_default_clock_levels(
179 enum dm_pp_clock_type clk_type, 146 enum dm_pp_clock_type clk_type,
180 struct dm_pp_clock_levels *clks) 147 struct dm_pp_clock_levels *clks)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 011a97f82fb6..8a9bba879207 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -593,3 +593,12 @@ uint32_t dal_fixed31_32_clamp_u0d10(
593{ 593{
594 return clamp_ux_dy(arg.value, 0, 10, 1); 594 return clamp_ux_dy(arg.value, 0, 10, 1);
595} 595}
596
597int32_t dal_fixed31_32_s4d19(
598 struct fixed31_32 arg)
599{
600 if (arg.value < 0)
601 return -(int32_t)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
602 else
603 return ux_dy(arg.value, 4, 19);
604}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index 180a9d69d351..31bee054f43a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -60,7 +60,8 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
60 {LOG_EVENT_LINK_LOSS, "LinkLoss"}, 60 {LOG_EVENT_LINK_LOSS, "LinkLoss"},
61 {LOG_EVENT_UNDERFLOW, "Underflow"}, 61 {LOG_EVENT_UNDERFLOW, "Underflow"},
62 {LOG_IF_TRACE, "InterfaceTrace"}, 62 {LOG_IF_TRACE, "InterfaceTrace"},
63 {LOG_DTN, "DTN"} 63 {LOG_DTN, "DTN"},
64 {LOG_PROFILING, "Profiling"}
64}; 65};
65 66
66 67
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index c00e405b63e8..c7f0b27e457e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -49,6 +49,9 @@
49 49
50#define LAST_RECORD_TYPE 0xff 50#define LAST_RECORD_TYPE 0xff
51 51
52#define DC_LOGGER \
53 bp->base.ctx->logger
54
52/* GUID to validate external display connection info table (aka OPM module) */ 55/* GUID to validate external display connection info table (aka OPM module) */
53static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = { 56static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
54 0x91, 0x6E, 0x57, 0x09, 57 0x91, 0x6E, 0x57, 0x09,
@@ -3079,8 +3082,7 @@ static enum bp_result patch_bios_image_from_ext_display_connection_info(
3079 opm_object, 3082 opm_object,
3080 &ext_display_connection_info_tbl) != BP_RESULT_OK) { 3083 &ext_display_connection_info_tbl) != BP_RESULT_OK) {
3081 3084
3082 dm_logger_write(bp->base.ctx->logger, LOG_WARNING, 3085 DC_LOG_WARNING("%s: Failed to read Connection Info Table", __func__);
3083 "%s: Failed to read Connection Info Table", __func__);
3084 return BP_RESULT_UNSUPPORTED; 3086 return BP_RESULT_UNSUPPORTED;
3085 } 3087 }
3086 3088
@@ -3795,14 +3797,11 @@ static const struct dc_vbios_funcs vbios_funcs = {
3795 3797
3796 .get_gpio_pin_info = bios_parser_get_gpio_pin_info, 3798 .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
3797 3799
3798 .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
3799
3800 .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
3801
3802 .get_encoder_cap_info = bios_parser_get_encoder_cap_info, 3800 .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
3803 3801
3804 /* bios scratch register communication */ 3802 /* bios scratch register communication */
3805 .is_accelerated_mode = bios_is_accelerated_mode, 3803 .is_accelerated_mode = bios_is_accelerated_mode,
3804 .get_vga_enabled_displays = bios_get_vga_enabled_displays,
3806 3805
3807 .set_scratch_critical_state = bios_parser_set_scratch_critical_state, 3806 .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
3808 3807
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 1ee1717f2e6f..e7680c41f117 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -44,7 +44,7 @@
44 44
45#include "bios_parser_common.h" 45#include "bios_parser_common.h"
46#define LAST_RECORD_TYPE 0xff 46#define LAST_RECORD_TYPE 0xff
47 47#define SMU9_SYSPLL0_ID 0
48 48
49struct i2c_id_config_access { 49struct i2c_id_config_access {
50 uint8_t bfI2C_LineMux:4; 50 uint8_t bfI2C_LineMux:4;
@@ -1220,7 +1220,7 @@ static unsigned int bios_parser_get_smu_clock_info(
1220 if (!bp->cmd_tbl.get_smu_clock_info) 1220 if (!bp->cmd_tbl.get_smu_clock_info)
1221 return BP_RESULT_FAILURE; 1221 return BP_RESULT_FAILURE;
1222 1222
1223 return bp->cmd_tbl.get_smu_clock_info(bp); 1223 return bp->cmd_tbl.get_smu_clock_info(bp, 0);
1224} 1224}
1225 1225
1226static enum bp_result bios_parser_program_crtc_timing( 1226static enum bp_result bios_parser_program_crtc_timing(
@@ -1280,6 +1280,12 @@ static bool bios_parser_is_accelerated_mode(
1280 return bios_is_accelerated_mode(dcb); 1280 return bios_is_accelerated_mode(dcb);
1281} 1281}
1282 1282
1283static uint32_t bios_parser_get_vga_enabled_displays(
1284 struct dc_bios *bios)
1285{
1286 return bios_get_vga_enabled_displays(bios);
1287}
1288
1283 1289
1284/** 1290/**
1285 * bios_parser_set_scratch_critical_state 1291 * bios_parser_set_scratch_critical_state
@@ -1370,7 +1376,7 @@ static enum bp_result get_firmware_info_v3_1(
1370 if (bp->cmd_tbl.get_smu_clock_info != NULL) { 1376 if (bp->cmd_tbl.get_smu_clock_info != NULL) {
1371 /* VBIOS gives in 10KHz */ 1377 /* VBIOS gives in 10KHz */
1372 info->smu_gpu_pll_output_freq = 1378 info->smu_gpu_pll_output_freq =
1373 bp->cmd_tbl.get_smu_clock_info(bp) * 10; 1379 bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
1374 } 1380 }
1375 1381
1376 return BP_RESULT_OK; 1382 return BP_RESULT_OK;
@@ -1800,6 +1806,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
1800 1806
1801 1807
1802 .is_accelerated_mode = bios_parser_is_accelerated_mode, 1808 .is_accelerated_mode = bios_parser_is_accelerated_mode,
1809 .get_vga_enabled_displays = bios_parser_get_vga_enabled_displays,
1803 1810
1804 .set_scratch_critical_state = bios_parser_set_scratch_critical_state, 1811 .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
1805 1812
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index 5c9e5108c32c..d4589470985c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -78,5 +78,13 @@ void bios_set_scratch_critical_state(
78 REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state); 78 REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
79} 79}
80 80
81uint32_t bios_get_vga_enabled_displays(
82 struct dc_bios *bios)
83{
84 uint32_t active_disp = 1;
81 85
86 if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
87 active_disp = REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
88 return active_disp;
89}
82 90
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index c0047efeb006..75a29e68fb27 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -34,6 +34,7 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
34bool bios_is_accelerated_mode(struct dc_bios *bios); 34bool bios_is_accelerated_mode(struct dc_bios *bios);
35void bios_set_scratch_acc_mode_change(struct dc_bios *bios); 35void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
36void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); 36void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
37uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
37 38
38#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) 39#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
39 40
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index fea5e83736fd..3f63f712c8a4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -34,6 +34,8 @@
34#include "command_table_helper2.h" 34#include "command_table_helper2.h"
35#include "bios_parser_helper.h" 35#include "bios_parser_helper.h"
36#include "bios_parser_types_internal2.h" 36#include "bios_parser_types_internal2.h"
37#define DC_LOGGER \
38 bp->base.ctx->logger
37 39
38#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\ 40#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
39 (((char *)(&((\ 41 (((char *)(&((\
@@ -239,8 +241,7 @@ static enum bp_result transmitter_control_v1_6(
239 if (cntl->action == TRANSMITTER_CONTROL_ENABLE || 241 if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
240 cntl->action == TRANSMITTER_CONTROL_ACTIAVATE || 242 cntl->action == TRANSMITTER_CONTROL_ACTIAVATE ||
241 cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) { 243 cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) {
242 dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\ 244 DC_LOG_BIOS("%s:ps.param.symclk_10khz = %d\n",\
243 "%s:ps.param.symclk_10khz = %d\n",\
244 __func__, ps.param.symclk_10khz); 245 __func__, ps.param.symclk_10khz);
245 } 246 }
246 247
@@ -331,8 +332,7 @@ static enum bp_result set_pixel_clock_v7(
331 (uint8_t) bp->cmd_helper-> 332 (uint8_t) bp->cmd_helper->
332 transmitter_color_depth_to_atom( 333 transmitter_color_depth_to_atom(
333 bp_params->color_depth); 334 bp_params->color_depth);
334 dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\ 335 DC_LOG_BIOS("%s:program display clock = %d"\
335 "%s:program display clock = %d"\
336 "colorDepth = %d\n", __func__,\ 336 "colorDepth = %d\n", __func__,\
337 bp_params->target_pixel_clock, bp_params->color_depth); 337 bp_params->target_pixel_clock, bp_params->color_depth);
338 338
@@ -772,8 +772,7 @@ static enum bp_result set_dce_clock_v2_1(
772 */ 772 */
773 params.param.dceclk_10khz = cpu_to_le32( 773 params.param.dceclk_10khz = cpu_to_le32(
774 bp_params->target_clock_frequency / 10); 774 bp_params->target_clock_frequency / 10);
775 dm_logger_write(bp->base.ctx->logger, LOG_BIOS, 775 DC_LOG_BIOS("%s:target_clock_frequency = %d"\
776 "%s:target_clock_frequency = %d"\
777 "clock_type = %d \n", __func__,\ 776 "clock_type = %d \n", __func__,\
778 bp_params->target_clock_frequency,\ 777 bp_params->target_clock_frequency,\
779 bp_params->clock_type); 778 bp_params->clock_type);
@@ -797,7 +796,7 @@ static enum bp_result set_dce_clock_v2_1(
797 ****************************************************************************** 796 ******************************************************************************
798 *****************************************************************************/ 797 *****************************************************************************/
799 798
800static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp); 799static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id);
801 800
802static void init_get_smu_clock_info(struct bios_parser *bp) 801static void init_get_smu_clock_info(struct bios_parser *bp)
803{ 802{
@@ -806,12 +805,13 @@ static void init_get_smu_clock_info(struct bios_parser *bp)
806 805
807} 806}
808 807
809static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp) 808static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
810{ 809{
811 struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0}; 810 struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
812 struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output; 811 struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
813 812
814 smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ; 813 smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
814 smu_input.syspll_id = id;
815 815
816 /* Get Specific Clock */ 816 /* Get Specific Clock */
817 if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) { 817 if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index 59061b806df5..ec1c0c9f3f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,7 @@ struct cmd_tbl {
96 struct bios_parser *bp, 96 struct bios_parser *bp,
97 struct bp_set_dce_clock_parameters *bp_params); 97 struct bp_set_dce_clock_parameters *bp_params);
98 unsigned int (*get_smu_clock_info)( 98 unsigned int (*get_smu_clock_info)(
99 struct bios_parser *bp); 99 struct bios_parser *bp, uint8_t id);
100 100
101}; 101};
102 102
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
index 1fab634b66be..4c3789df253d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -29,38 +29,7 @@
29#include "dce80/command_table_helper_dce80.h" 29#include "dce80/command_table_helper_dce80.h"
30#include "dce110/command_table_helper_dce110.h" 30#include "dce110/command_table_helper_dce110.h"
31#include "dce112/command_table_helper_dce112.h" 31#include "dce112/command_table_helper_dce112.h"
32 32#include "command_table_helper_struct.h"
33struct command_table_helper {
34 bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
35 uint8_t (*encoder_action_to_atom)(
36 enum bp_encoder_control_action action);
37 uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
38 bool enable_dp_audio);
39 bool (*engine_bp_to_atom)(enum engine_id engine_id,
40 uint32_t *atom_engine_id);
41 void (*assign_control_parameter)(
42 const struct command_table_helper *h,
43 struct bp_encoder_control *control,
44 DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
45 bool (*clock_source_id_to_atom)(enum clock_source_id id,
46 uint32_t *atom_pll_id);
47 bool (*clock_source_id_to_ref_clk_src)(
48 enum clock_source_id id,
49 uint32_t *ref_clk_src_id);
50 uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
51 uint8_t (*encoder_id_to_atom)(enum encoder_id id);
52 uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
53 enum clock_source_id id);
54 uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
55 uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
56 uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
57 uint8_t (*phy_id_to_atom)(enum transmitter t);
58 uint8_t (*disp_power_gating_action_to_atom)(
59 enum bp_pipe_control_action action);
60 bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
61 uint32_t *atom_clock_type);
62 uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
63};
64 33
65bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h, 34bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
66 enum dce_version dce); 35 enum dce_version dce);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
index 9f587c91d843..785fcb20a1b9 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
@@ -29,35 +29,7 @@
29#include "dce80/command_table_helper_dce80.h" 29#include "dce80/command_table_helper_dce80.h"
30#include "dce110/command_table_helper_dce110.h" 30#include "dce110/command_table_helper_dce110.h"
31#include "dce112/command_table_helper2_dce112.h" 31#include "dce112/command_table_helper2_dce112.h"
32 32#include "command_table_helper_struct.h"
33struct command_table_helper {
34 bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
35 uint8_t (*encoder_action_to_atom)(
36 enum bp_encoder_control_action action);
37 uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
38 bool enable_dp_audio);
39 bool (*engine_bp_to_atom)(enum engine_id engine_id,
40 uint32_t *atom_engine_id);
41 bool (*clock_source_id_to_atom)(enum clock_source_id id,
42 uint32_t *atom_pll_id);
43 bool (*clock_source_id_to_ref_clk_src)(
44 enum clock_source_id id,
45 uint32_t *ref_clk_src_id);
46 uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
47 uint8_t (*encoder_id_to_atom)(enum encoder_id id);
48 uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
49 enum clock_source_id id);
50 uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
51 uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
52 uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
53 uint8_t (*phy_id_to_atom)(enum transmitter t);
54 uint8_t (*disp_power_gating_action_to_atom)(
55 enum bp_pipe_control_action action);
56 bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
57 uint32_t *atom_clock_type);
58 uint8_t (*transmitter_color_depth_to_atom)(
59 enum transmitter_color_depth id);
60};
61 33
62bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h, 34bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h,
63 enum dce_version dce); 35 enum dce_version dce);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h
new file mode 100644
index 000000000000..1f2c0a3f06f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_COMMAND_TABLE_HELPER_STRUCT_H__
27#define __DAL_COMMAND_TABLE_HELPER_STRUCT_H__
28
29#include "dce80/command_table_helper_dce80.h"
30#include "dce110/command_table_helper_dce110.h"
31#include "dce112/command_table_helper_dce112.h"
32
33struct _DIG_ENCODER_CONTROL_PARAMETERS_V2;
34struct command_table_helper {
35 bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
36 uint8_t (*encoder_action_to_atom)(
37 enum bp_encoder_control_action action);
38 uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
39 bool enable_dp_audio);
40 bool (*engine_bp_to_atom)(enum engine_id engine_id,
41 uint32_t *atom_engine_id);
42 void (*assign_control_parameter)(
43 const struct command_table_helper *h,
44 struct bp_encoder_control *control,
45 struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
46 bool (*clock_source_id_to_atom)(enum clock_source_id id,
47 uint32_t *atom_pll_id);
48 bool (*clock_source_id_to_ref_clk_src)(
49 enum clock_source_id id,
50 uint32_t *ref_clk_src_id);
51 uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
52 uint8_t (*encoder_id_to_atom)(enum encoder_id id);
53 uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
54 enum clock_source_id id);
55 uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
56 uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
57 uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
58 uint8_t (*phy_id_to_atom)(enum transmitter t);
59 uint8_t (*disp_power_gating_action_to_atom)(
60 enum bp_pipe_control_action action);
61 bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
62 uint32_t *atom_clock_type);
63 uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
64};
65
66#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 7959e382ed28..95f332ee3e7e 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -24,9 +24,17 @@
24# It calculates Bandwidth and Watermarks values for HW programming 24# It calculates Bandwidth and Watermarks values for HW programming
25# 25#
26 26
27CFLAGS_dcn_calcs.o := -mhard-float -msse -mpreferred-stack-boundary=4 27ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
28CFLAGS_dcn_calc_auto.o := -mhard-float -msse -mpreferred-stack-boundary=4 28 cc_stack_align := -mpreferred-stack-boundary=4
29CFLAGS_dcn_calc_math.o := -mhard-float -msse -mpreferred-stack-boundary=4 -Wno-tautological-compare 29else ifneq ($(call cc-option, -mstack-alignment=16),)
30 cc_stack_align := -mstack-alignment=16
31endif
32
33calcs_ccflags := -mhard-float -msse $(cc_stack_align)
34
35CFLAGS_dcn_calcs.o := $(calcs_ccflags)
36CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
37CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
30 38
31BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o 39BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
32 40
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 2e11fac2a63d..0cbab81ab304 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -85,7 +85,6 @@ static void calculate_bandwidth(
85 const uint32_t s_mid5 = 5; 85 const uint32_t s_mid5 = 5;
86 const uint32_t s_mid6 = 6; 86 const uint32_t s_mid6 = 6;
87 const uint32_t s_high = 7; 87 const uint32_t s_high = 7;
88 const uint32_t bus_efficiency = 1;
89 const uint32_t dmif_chunk_buff_margin = 1; 88 const uint32_t dmif_chunk_buff_margin = 1;
90 89
91 uint32_t max_chunks_fbc_mode; 90 uint32_t max_chunks_fbc_mode;
@@ -592,7 +591,12 @@ static void calculate_bandwidth(
592 /* 1 = use channel 0 and 1*/ 591 /* 1 = use channel 0 and 1*/
593 /* 2 = use channel 0,1,2,3*/ 592 /* 2 = use channel 0,1,2,3*/
594 if ((fbc_enabled == 1 && lpt_enabled == 1)) { 593 if ((fbc_enabled == 1 && lpt_enabled == 1)) {
595 data->dram_efficiency = bw_int_to_fixed(1); 594 if (vbios->memory_type == bw_def_hbm)
595 data->dram_efficiency = bw_frc_to_fixed(5, 10);
596 else
597 data->dram_efficiency = bw_int_to_fixed(1);
598
599
596 if (dceip->low_power_tiling_mode == 0) { 600 if (dceip->low_power_tiling_mode == 0) {
597 data->number_of_dram_channels = 1; 601 data->number_of_dram_channels = 1;
598 } 602 }
@@ -607,7 +611,10 @@ static void calculate_bandwidth(
607 } 611 }
608 } 612 }
609 else { 613 else {
610 data->dram_efficiency = bw_frc_to_fixed(8, 10); 614 if (vbios->memory_type == bw_def_hbm)
615 data->dram_efficiency = bw_frc_to_fixed(5, 10);
616 else
617 data->dram_efficiency = bw_frc_to_fixed(8, 10);
611 } 618 }
612 /*memory request size and latency hiding:*/ 619 /*memory request size and latency hiding:*/
613 /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/ 620 /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
@@ -623,7 +630,7 @@ static void calculate_bandwidth(
623 } 630 }
624 else { 631 else {
625 /*graphics portrait tiling mode*/ 632 /*graphics portrait tiling mode*/
626 if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) { 633 if (data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling) {
627 data->orthogonal_rotation[i] = 0; 634 data->orthogonal_rotation[i] = 0;
628 } 635 }
629 else { 636 else {
@@ -634,7 +641,7 @@ static void calculate_bandwidth(
634 else { 641 else {
635 if ((i < 4)) { 642 if ((i < 4)) {
636 /*underlay landscape tiling mode is only supported*/ 643 /*underlay landscape tiling mode is only supported*/
637 if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) { 644 if (data->underlay_micro_tile_mode == bw_def_display_micro_tiling) {
638 data->orthogonal_rotation[i] = 0; 645 data->orthogonal_rotation[i] = 0;
639 } 646 }
640 else { 647 else {
@@ -643,7 +650,7 @@ static void calculate_bandwidth(
643 } 650 }
644 else { 651 else {
645 /*graphics landscape tiling mode*/ 652 /*graphics landscape tiling mode*/
646 if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) { 653 if (data->graphics_micro_tile_mode == bw_def_display_micro_tiling) {
647 data->orthogonal_rotation[i] = 0; 654 data->orthogonal_rotation[i] = 0;
648 } 655 }
649 else { 656 else {
@@ -947,14 +954,14 @@ static void calculate_bandwidth(
947 } 954 }
948 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 955 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
949 if (data->enable[i]) { 956 if (data->enable[i]) {
950 if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) { 957 if (data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0) {
951 /*set maximum chunk limit if only one graphic pipe is enabled*/ 958 /*set maximum chunk limit if only one graphic pipe is enabled*/
952 data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127); 959 data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
953 } 960 }
954 else { 961 else {
955 data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1)); 962 data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
956 /*clamp maximum chunk limit in the graphic display pipe*/ 963 /*clamp maximum chunk limit in the graphic display pipe*/
957 if ((i >= 4)) { 964 if (i >= 4) {
958 data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]); 965 data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
959 } 966 }
960 } 967 }
@@ -1171,9 +1178,9 @@ static void calculate_bandwidth(
1171 } 1178 }
1172 for (i = 0; i <= 2; i++) { 1179 for (i = 0; i <= 2; i++) {
1173 for (j = 0; j <= 7; j++) { 1180 for (j = 0; j <= 7; j++) {
1174 data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency))))); 1181 data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))));
1175 if (data->d1_display_write_back_dwb_enable == 1) { 1182 if (data->d1_display_write_back_dwb_enable == 1) {
1176 data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency))))); 1183 data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width))));
1177 } 1184 }
1178 } 1185 }
1179 } 1186 }
@@ -1258,6 +1265,16 @@ static void calculate_bandwidth(
1258 /* / (dispclk - display bw)*/ 1265 /* / (dispclk - display bw)*/
1259 /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/ 1266 /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
1260 /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/ 1267 /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
1268
1269 /*initialize variables*/
1270 number_of_displays_enabled = 0;
1271 number_of_displays_enabled_with_margin = 0;
1272 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1273 if (data->enable[k]) {
1274 number_of_displays_enabled = number_of_displays_enabled + 1;
1275 }
1276 data->display_pstate_change_enable[k] = 0;
1277 }
1261 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1278 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1262 if (data->enable[i]) { 1279 if (data->enable[i]) {
1263 if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) { 1280 if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
@@ -1276,7 +1293,10 @@ static void calculate_bandwidth(
1276 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1293 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1277 if (data->enable[i]) { 1294 if (data->enable[i]) {
1278 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) { 1295 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
1279 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); 1296 if (number_of_displays_enabled > 2)
1297 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1298 else
1299 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1280 } 1300 }
1281 else { 1301 else {
1282 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); 1302 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
@@ -1337,25 +1357,16 @@ static void calculate_bandwidth(
1337 /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/ 1357 /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
1338 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1358 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1339 if (data->enable[i]) { 1359 if (data->enable[i]) {
1340 if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) { 1360 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) {
1341 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency)); 1361 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
1342 } 1362 }
1343 else { 1363 else {
1344 /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/ 1364 /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
1345 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency)); 1365 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
1346 } 1366 }
1347 data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]); 1367 data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
1348 } 1368 }
1349 } 1369 }
1350 /*initialize variables*/
1351 number_of_displays_enabled = 0;
1352 number_of_displays_enabled_with_margin = 0;
1353 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1354 if (data->enable[k]) {
1355 number_of_displays_enabled = number_of_displays_enabled + 1;
1356 }
1357 data->display_pstate_change_enable[k] = 0;
1358 }
1359 for (i = 0; i <= 2; i++) { 1370 for (i = 0; i <= 2; i++) {
1360 for (j = 0; j <= 7; j++) { 1371 for (j = 0; j <= 7; j++) {
1361 data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999); 1372 data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
@@ -1370,10 +1381,11 @@ static void calculate_bandwidth(
1370 /*determine the minimum dram clock change margin for each set of clock frequencies*/ 1381 /*determine the minimum dram clock change margin for each set of clock frequencies*/
1371 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1382 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1372 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1383 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1373 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); 1384 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
1374 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) { 1385 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
1375 data->display_pstate_change_enable[k] = 1; 1386 data->display_pstate_change_enable[k] = 1;
1376 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; 1387 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1388 data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
1377 } 1389 }
1378 } 1390 }
1379 } 1391 }
@@ -1383,10 +1395,11 @@ static void calculate_bandwidth(
1383 /*determine the minimum dram clock change margin for each display pipe*/ 1395 /*determine the minimum dram clock change margin for each display pipe*/
1384 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1396 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1385 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1397 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1386 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); 1398 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
1387 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) { 1399 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
1388 data->display_pstate_change_enable[k] = 1; 1400 data->display_pstate_change_enable[k] = 1;
1389 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; 1401 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1402 data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
1390 } 1403 }
1391 } 1404 }
1392 } 1405 }
@@ -1396,7 +1409,7 @@ static void calculate_bandwidth(
1396 } 1409 }
1397 /*determine the number of displays with margin to switch in the v_active region*/ 1410 /*determine the number of displays with margin to switch in the v_active region*/
1398 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { 1411 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1399 if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) { 1412 if (data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1) {
1400 number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1; 1413 number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
1401 } 1414 }
1402 } 1415 }
@@ -1420,7 +1433,7 @@ static void calculate_bandwidth(
1420 data->displays_with_same_mode[i] = bw_int_to_fixed(0); 1433 data->displays_with_same_mode[i] = bw_int_to_fixed(0);
1421 if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) { 1434 if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
1422 for (j = 0; j <= maximum_number_of_surfaces - 1; j++) { 1435 for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
1423 if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) { 1436 if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
1424 data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1)); 1437 data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
1425 } 1438 }
1426 } 1439 }
@@ -1435,19 +1448,38 @@ static void calculate_bandwidth(
1435 /*aligned displays with the same timing.*/ 1448 /*aligned displays with the same timing.*/
1436 /*the display(s) with the negative margin can be switched in the v_blank region while the other*/ 1449 /*the display(s) with the negative margin can be switched in the v_blank region while the other*/
1437 /*displays are in v_blank or v_active.*/ 1450 /*displays are in v_blank or v_active.*/
1438 if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) { 1451 if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) {
1439 data->nbp_state_change_enable = bw_def_yes; 1452 data->nbp_state_change_enable = bw_def_yes;
1440 } 1453 }
1441 else { 1454 else {
1442 data->nbp_state_change_enable = bw_def_no; 1455 data->nbp_state_change_enable = bw_def_no;
1443 } 1456 }
1444 /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/ 1457 /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
1445 if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) { 1458 if (number_of_aligned_displays_with_no_margin == number_of_displays_enabled) {
1446 nbp_state_change_enable_blank = bw_def_yes; 1459 nbp_state_change_enable_blank = bw_def_yes;
1447 } 1460 }
1448 else { 1461 else {
1449 nbp_state_change_enable_blank = bw_def_no; 1462 nbp_state_change_enable_blank = bw_def_no;
1450 } 1463 }
1464
1465 /*average bandwidth*/
1466 /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
1467 /*the average bandwidth with compression is the same, divided by the compression ratio*/
1468 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1469 if (data->enable[i]) {
1470 data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1471 data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
1472 }
1473 }
1474 data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
1475 data->total_average_bandwidth = bw_int_to_fixed(0);
1476 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1477 if (data->enable[i]) {
1478 data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
1479 data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
1480 }
1481 }
1482
1451 /*required yclk(pclk)*/ 1483 /*required yclk(pclk)*/
1452 /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/ 1484 /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
1453 /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/ 1485 /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
@@ -1470,7 +1502,7 @@ static void calculate_bandwidth(
1470 } 1502 }
1471 } 1503 }
1472 /*compute minimum time to read one chunk from the dmif buffer*/ 1504 /*compute minimum time to read one chunk from the dmif buffer*/
1473 if ((number_of_displays_enabled > 2)) { 1505 if (number_of_displays_enabled > 2) {
1474 data->chunk_request_delay = 0; 1506 data->chunk_request_delay = 0;
1475 } 1507 }
1476 else { 1508 else {
@@ -1497,17 +1529,20 @@ static void calculate_bandwidth(
1497 } 1529 }
1498 else { 1530 else {
1499 data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000)); 1531 data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
1500 if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) { 1532 if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1533 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
1501 yclk_message = bw_fixed_to_int(vbios->low_yclk); 1534 yclk_message = bw_fixed_to_int(vbios->low_yclk);
1502 data->y_clk_level = low; 1535 data->y_clk_level = low;
1503 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1536 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1504 } 1537 }
1505 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) { 1538 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1539 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
1506 yclk_message = bw_fixed_to_int(vbios->mid_yclk); 1540 yclk_message = bw_fixed_to_int(vbios->mid_yclk);
1507 data->y_clk_level = mid; 1541 data->y_clk_level = mid;
1508 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1542 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1509 } 1543 }
1510 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) { 1544 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1545 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
1511 yclk_message = bw_fixed_to_int(vbios->high_yclk); 1546 yclk_message = bw_fixed_to_int(vbios->high_yclk);
1512 data->y_clk_level = high; 1547 data->y_clk_level = high;
1513 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1548 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
@@ -1523,8 +1558,8 @@ static void calculate_bandwidth(
1523 /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/ 1558 /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
1524 /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/ 1559 /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
1525 /*for dmif, pte and cursor requests have to be included.*/ 1560 /*for dmif, pte and cursor requests have to be included.*/
1526 data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1561 data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
1527 data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1562 data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width);
1528 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { 1563 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
1529 data->required_sclk = bw_int_to_fixed(9999); 1564 data->required_sclk = bw_int_to_fixed(9999);
1530 sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; 1565 sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
@@ -1537,42 +1572,56 @@ static void calculate_bandwidth(
1537 } 1572 }
1538 else { 1573 else {
1539 data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk); 1574 data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
1540 if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) { 1575 if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width))
1576 && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
1541 sclk_message = bw_def_low; 1577 sclk_message = bw_def_low;
1542 data->sclk_level = s_low; 1578 data->sclk_level = s_low;
1543 data->required_sclk = vbios->low_sclk; 1579 data->required_sclk = vbios->low_sclk;
1544 } 1580 }
1545 else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) { 1581 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width))
1582 && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
1546 sclk_message = bw_def_mid; 1583 sclk_message = bw_def_mid;
1547 data->sclk_level = s_mid1; 1584 data->sclk_level = s_mid1;
1548 data->required_sclk = vbios->mid1_sclk; 1585 data->required_sclk = vbios->mid1_sclk;
1549 } 1586 }
1550 else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) { 1587 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width))
1588 && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
1551 sclk_message = bw_def_mid; 1589 sclk_message = bw_def_mid;
1552 data->sclk_level = s_mid2; 1590 data->sclk_level = s_mid2;
1553 data->required_sclk = vbios->mid2_sclk; 1591 data->required_sclk = vbios->mid2_sclk;
1554 } 1592 }
1555 else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) { 1593 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width))
1594 && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
1556 sclk_message = bw_def_mid; 1595 sclk_message = bw_def_mid;
1557 data->sclk_level = s_mid3; 1596 data->sclk_level = s_mid3;
1558 data->required_sclk = vbios->mid3_sclk; 1597 data->required_sclk = vbios->mid3_sclk;
1559 } 1598 }
1560 else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) { 1599 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width))
1600 && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
1561 sclk_message = bw_def_mid; 1601 sclk_message = bw_def_mid;
1562 data->sclk_level = s_mid4; 1602 data->sclk_level = s_mid4;
1563 data->required_sclk = vbios->mid4_sclk; 1603 data->required_sclk = vbios->mid4_sclk;
1564 } 1604 }
1565 else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) { 1605 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width))
1606 && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
1566 sclk_message = bw_def_mid; 1607 sclk_message = bw_def_mid;
1567 data->sclk_level = s_mid5; 1608 data->sclk_level = s_mid5;
1568 data->required_sclk = vbios->mid5_sclk; 1609 data->required_sclk = vbios->mid5_sclk;
1569 } 1610 }
1570 else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) { 1611 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width))
1612 && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
1571 sclk_message = bw_def_mid; 1613 sclk_message = bw_def_mid;
1572 data->sclk_level = s_mid6; 1614 data->sclk_level = s_mid6;
1573 data->required_sclk = vbios->mid6_sclk; 1615 data->required_sclk = vbios->mid6_sclk;
1574 } 1616 }
1575 else if (bw_ltn(data->required_sclk, sclk[s_high])) { 1617 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
1618 && bw_ltn(data->required_sclk, sclk[s_high])) {
1619 sclk_message = bw_def_high;
1620 data->sclk_level = s_high;
1621 data->required_sclk = vbios->high_sclk;
1622 }
1623 else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
1624 && bw_ltn(data->required_sclk, sclk[s_high])) {
1576 sclk_message = bw_def_high; 1625 sclk_message = bw_def_high;
1577 data->sclk_level = s_high; 1626 data->sclk_level = s_high;
1578 data->required_sclk = vbios->high_sclk; 1627 data->required_sclk = vbios->high_sclk;
@@ -1681,7 +1730,7 @@ static void calculate_bandwidth(
1681 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); 1730 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1682 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); 1731 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1683 } 1732 }
1684 if (data->nbp_state_change_enable == bw_def_yes) { 1733 if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) {
1685 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); 1734 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1686 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); 1735 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1687 } 1736 }
@@ -1804,7 +1853,7 @@ static void calculate_bandwidth(
1804 data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); 1853 data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
1805 data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); 1854 data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
1806 /*unconditionally remove black out time from the nb p_state watermark*/ 1855 /*unconditionally remove black out time from the nb p_state watermark*/
1807 if ((data->display_pstate_change_enable[i] == 1)) { 1856 if (data->display_pstate_change_enable[i] == 1) {
1808 data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); 1857 data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
1809 } 1858 }
1810 else { 1859 else {
@@ -1816,7 +1865,7 @@ static void calculate_bandwidth(
1816 data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time); 1865 data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
1817 data->stutter_exit_watermark[i] = bw_int_to_fixed(0); 1866 data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
1818 data->stutter_entry_watermark[i] = bw_int_to_fixed(0); 1867 data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
1819 if ((data->display_pstate_change_enable[i] == 1)) { 1868 if (data->display_pstate_change_enable[i] == 1) {
1820 data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); 1869 data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
1821 } 1870 }
1822 else { 1871 else {
@@ -1861,23 +1910,6 @@ static void calculate_bandwidth(
1861 else { 1910 else {
1862 data->mcifwrdram_access_efficiency = bw_int_to_fixed(0); 1911 data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
1863 } 1912 }
1864 /*average bandwidth*/
1865 /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
1866 /*the average bandwidth with compression is the same, divided by the compression ratio*/
1867 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1868 if (data->enable[i]) {
1869 data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1870 data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
1871 }
1872 }
1873 data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
1874 data->total_average_bandwidth = bw_int_to_fixed(0);
1875 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1876 if (data->enable[i]) {
1877 data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
1878 data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
1879 }
1880 }
1881 /*stutter efficiency*/ 1913 /*stutter efficiency*/
1882 /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/ 1914 /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
1883 /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/ 1915 /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
@@ -1905,7 +1937,7 @@ static void calculate_bandwidth(
1905 data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size))); 1937 data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
1906 } 1938 }
1907 } 1939 }
1908 data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32)))); 1940 data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width));
1909 data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size; 1941 data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
1910 data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time); 1942 data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
1911 data->time_in_self_refresh = data->min_stutter_refresh_duration; 1943 data->time_in_self_refresh = data->min_stutter_refresh_duration;
@@ -1957,7 +1989,7 @@ static void calculate_bandwidth(
1957 for (i = 1; i <= 5; i++) { 1989 for (i = 1; i <= 5; i++) {
1958 data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i))); 1990 data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
1959 if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) { 1991 if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
1960 data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1992 data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
1961 } 1993 }
1962 else { 1994 else {
1963 data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na); 1995 data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
@@ -2033,9 +2065,12 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2033 vbios.cursor_width = 32; 2065 vbios.cursor_width = 32;
2034 vbios.average_compression_rate = 4; 2066 vbios.average_compression_rate = 4;
2035 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; 2067 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
2036 vbios.blackout_duration = bw_int_to_fixed(18); /* us */ 2068 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2037 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20); 2069 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2038 2070
2071 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2072 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2073 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2039 dceip.large_cursor = false; 2074 dceip.large_cursor = false;
2040 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2075 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2041 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2076 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2146,6 +2181,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2146 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2181 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2147 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2182 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2148 2183
2184 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2185 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2186 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2149 dceip.large_cursor = false; 2187 dceip.large_cursor = false;
2150 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2188 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2151 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2189 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2259,6 +2297,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2259 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2297 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2260 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2298 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2261 2299
2300 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2301 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2302 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2262 dceip.large_cursor = false; 2303 dceip.large_cursor = false;
2263 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2304 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2264 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2305 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2366,9 +2407,12 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2366 vbios.cursor_width = 32; 2407 vbios.cursor_width = 32;
2367 vbios.average_compression_rate = 4; 2408 vbios.average_compression_rate = 4;
2368 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; 2409 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
2369 vbios.blackout_duration = bw_int_to_fixed(18); /* us */ 2410 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2370 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20); 2411 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2371 2412
2413 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2414 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2415 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2372 dceip.large_cursor = false; 2416 dceip.large_cursor = false;
2373 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2417 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2374 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2418 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2479,6 +2523,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2479 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2523 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2480 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2524 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2481 2525
2526 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2527 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2528 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2482 dceip.large_cursor = false; 2529 dceip.large_cursor = false;
2483 dceip.dmif_request_buffer_size = bw_int_to_fixed(2304); 2530 dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
2484 dceip.dmif_pipe_en_fbc_chunk_tracker = true; 2531 dceip.dmif_pipe_en_fbc_chunk_tracker = true;
@@ -2597,6 +2644,7 @@ static void populate_initial_data(
2597 data->graphics_tiling_mode = bw_def_tiled; 2644 data->graphics_tiling_mode = bw_def_tiled;
2598 data->underlay_micro_tile_mode = bw_def_display_micro_tiling; 2645 data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
2599 data->graphics_micro_tile_mode = bw_def_display_micro_tiling; 2646 data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
2647 data->increase_voltage_to_support_mclk_switch = true;
2600 2648
2601 /* Pipes with underlay first */ 2649 /* Pipes with underlay first */
2602 for (i = 0; i < pipe_count; i++) { 2650 for (i = 0; i < pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 331891c2c71a..4bb43a371292 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -33,6 +33,8 @@
33#include "dcn10/dcn10_resource.h" 33#include "dcn10/dcn10_resource.h"
34#include "dcn_calc_math.h" 34#include "dcn_calc_math.h"
35 35
36#define DC_LOGGER \
37 dc->ctx->logger
36/* 38/*
37 * NOTE: 39 * NOTE:
38 * This file is gcc-parseable HW gospel, coming straight from HW engineers. 40 * This file is gcc-parseable HW gospel, coming straight from HW engineers.
@@ -486,6 +488,7 @@ static void split_stream_across_pipes(
486 secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; 488 secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
487 secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; 489 secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
488 secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; 490 secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
491 secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
489 if (primary_pipe->bottom_pipe) { 492 if (primary_pipe->bottom_pipe) {
490 ASSERT(primary_pipe->bottom_pipe != secondary_pipe); 493 ASSERT(primary_pipe->bottom_pipe != secondary_pipe);
491 secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; 494 secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
@@ -625,7 +628,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
625 return updated; 628 return updated;
626} 629}
627 630
628void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v) 631static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
629{ 632{
630 /* 633 /*
631 * disable optional pipe split by lower dispclk bounding box 634 * disable optional pipe split by lower dispclk bounding box
@@ -634,7 +637,7 @@ void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
634 v->max_dispclk[0] = v->max_dppclk_vmin0p65; 637 v->max_dispclk[0] = v->max_dppclk_vmin0p65;
635} 638}
636 639
637void hack_force_pipe_split(struct dcn_bw_internal_vars *v, 640static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
638 unsigned int pixel_rate_khz) 641 unsigned int pixel_rate_khz)
639{ 642{
640 float pixel_rate_mhz = pixel_rate_khz / 1000; 643 float pixel_rate_mhz = pixel_rate_khz / 1000;
@@ -647,25 +650,20 @@ void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
647 v->max_dppclk[0] = pixel_rate_mhz; 650 v->max_dppclk[0] = pixel_rate_mhz;
648} 651}
649 652
650void hack_bounding_box(struct dcn_bw_internal_vars *v, 653static void hack_bounding_box(struct dcn_bw_internal_vars *v,
651 struct dc_debug *dbg, 654 struct dc_debug *dbg,
652 struct dc_state *context) 655 struct dc_state *context)
653{ 656{
654 if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) { 657 if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
655 hack_disable_optional_pipe_split(v); 658 hack_disable_optional_pipe_split(v);
656 }
657 659
658 if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP && 660 if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP &&
659 context->stream_count >= 2) { 661 context->stream_count >= 2)
660 hack_disable_optional_pipe_split(v); 662 hack_disable_optional_pipe_split(v);
661 }
662 663
663 if (context->stream_count == 1 && 664 if (context->stream_count == 1 &&
664 dbg->force_single_disp_pipe_split) { 665 dbg->force_single_disp_pipe_split)
665 struct dc_stream_state *stream0 = context->streams[0]; 666 hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_khz);
666
667 hack_force_pipe_split(v, stream0->timing.pix_clk_khz);
668 }
669} 667}
670 668
671bool dcn_validate_bandwidth( 669bool dcn_validate_bandwidth(
@@ -799,23 +797,10 @@ bool dcn_validate_bandwidth(
799 v->phyclk_per_state[2] = v->phyclkv_nom0p8; 797 v->phyclk_per_state[2] = v->phyclkv_nom0p8;
800 v->phyclk_per_state[1] = v->phyclkv_mid0p72; 798 v->phyclk_per_state[1] = v->phyclkv_mid0p72;
801 v->phyclk_per_state[0] = v->phyclkv_min0p65; 799 v->phyclk_per_state[0] = v->phyclkv_min0p65;
802
803 hack_bounding_box(v, &dc->debug, context);
804
805 if (v->voltage_override == dcn_bw_v_max0p9) {
806 v->voltage_override_level = number_of_states - 1;
807 } else if (v->voltage_override == dcn_bw_v_nom0p8) {
808 v->voltage_override_level = number_of_states - 2;
809 } else if (v->voltage_override == dcn_bw_v_mid0p72) {
810 v->voltage_override_level = number_of_states - 3;
811 } else {
812 v->voltage_override_level = 0;
813 }
814 v->synchronized_vblank = dcn_bw_no; 800 v->synchronized_vblank = dcn_bw_no;
815 v->ta_pscalculation = dcn_bw_override; 801 v->ta_pscalculation = dcn_bw_override;
816 v->allow_different_hratio_vratio = dcn_bw_yes; 802 v->allow_different_hratio_vratio = dcn_bw_yes;
817 803
818
819 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { 804 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
820 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 805 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
821 806
@@ -948,8 +933,19 @@ bool dcn_validate_bandwidth(
948 v->number_of_active_planes = input_idx; 933 v->number_of_active_planes = input_idx;
949 934
950 scaler_settings_calculation(v); 935 scaler_settings_calculation(v);
936
937 hack_bounding_box(v, &dc->debug, context);
938
951 mode_support_and_system_configuration(v); 939 mode_support_and_system_configuration(v);
952 940
941 /* Unhack dppclk: dont bother with trying to pipe split if we cannot maintain dpm0 */
942 if (v->voltage_level != 0
943 && context->stream_count == 1
944 && dc->debug.force_single_disp_pipe_split) {
945 v->max_dppclk[0] = v->max_dppclk_vmin0p65;
946 mode_support_and_system_configuration(v);
947 }
948
953 if (v->voltage_level == 0 && 949 if (v->voltage_level == 0 &&
954 (dc->debug.sr_exit_time_dpm0_ns 950 (dc->debug.sr_exit_time_dpm0_ns
955 || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) { 951 || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
@@ -987,8 +983,6 @@ bool dcn_validate_bandwidth(
987 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); 983 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
988 } 984 }
989 985
990 context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
991 context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
992 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); 986 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
993 context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000); 987 context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
994 988
@@ -1002,7 +996,26 @@ bool dcn_validate_bandwidth(
1002 dc->debug.min_disp_clk_khz; 996 dc->debug.min_disp_clk_khz;
1003 } 997 }
1004 998
1005 context->bw.dcn.calc_clk.dppclk_div = (int)(v->dispclk_dppclk_ratio) == 2; 999 context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
1000
1001 switch (v->voltage_level) {
1002 case 0:
1003 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1004 (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
1005 break;
1006 case 1:
1007 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1008 (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
1009 break;
1010 case 2:
1011 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1012 (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
1013 break;
1014 default:
1015 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1016 (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
1017 break;
1018 }
1006 1019
1007 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { 1020 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
1008 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1021 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1248,8 +1261,7 @@ unsigned int dcn_find_dcfclk_suits_all(
1248 else 1261 else
1249 dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000; 1262 dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
1250 1263
1251 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS, 1264 DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk);
1252 "\tdcf_clk for voltage = %d\n", dcf_clk);
1253 return dcf_clk; 1265 return dcf_clk;
1254} 1266}
1255 1267
@@ -1447,8 +1459,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
1447void dcn_bw_sync_calcs_and_dml(struct dc *dc) 1459void dcn_bw_sync_calcs_and_dml(struct dc *dc)
1448{ 1460{
1449 kernel_fpu_begin(); 1461 kernel_fpu_begin();
1450 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS, 1462 DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n"
1451 "sr_exit_time: %d ns\n"
1452 "sr_enter_plus_exit_time: %d ns\n" 1463 "sr_enter_plus_exit_time: %d ns\n"
1453 "urgent_latency: %d ns\n" 1464 "urgent_latency: %d ns\n"
1454 "write_back_latency: %d ns\n" 1465 "write_back_latency: %d ns\n"
@@ -1516,8 +1527,7 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
1516 dc->dcn_soc->vmm_page_size, 1527 dc->dcn_soc->vmm_page_size,
1517 dc->dcn_soc->dram_clock_change_latency * 1000, 1528 dc->dcn_soc->dram_clock_change_latency * 1000,
1518 dc->dcn_soc->return_bus_width); 1529 dc->dcn_soc->return_bus_width);
1519 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS, 1530 DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n"
1520 "rob_buffer_size_in_kbyte: %d\n"
1521 "det_buffer_size_in_kbyte: %d\n" 1531 "det_buffer_size_in_kbyte: %d\n"
1522 "dpp_output_buffer_pixels: %d\n" 1532 "dpp_output_buffer_pixels: %d\n"
1523 "opp_output_buffer_lines: %d\n" 1533 "opp_output_buffer_lines: %d\n"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 35e84ed031de..63a3d468939a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -29,6 +29,7 @@
29#include "core_status.h" 29#include "core_status.h"
30#include "core_types.h" 30#include "core_types.h"
31#include "hw_sequencer.h" 31#include "hw_sequencer.h"
32#include "dce/dce_hwseq.h"
32 33
33#include "resource.h" 34#include "resource.h"
34 35
@@ -38,8 +39,10 @@
38#include "bios_parser_interface.h" 39#include "bios_parser_interface.h"
39#include "include/irq_service_interface.h" 40#include "include/irq_service_interface.h"
40#include "transform.h" 41#include "transform.h"
42#include "dmcu.h"
41#include "dpp.h" 43#include "dpp.h"
42#include "timing_generator.h" 44#include "timing_generator.h"
45#include "abm.h"
43#include "virtual/virtual_link_encoder.h" 46#include "virtual/virtual_link_encoder.h"
44 47
45#include "link_hwss.h" 48#include "link_hwss.h"
@@ -49,6 +52,8 @@
49#include "dm_helpers.h" 52#include "dm_helpers.h"
50#include "mem_input.h" 53#include "mem_input.h"
51#include "hubp.h" 54#include "hubp.h"
55#define DC_LOGGER \
56 dc->ctx->logger
52 57
53 58
54/******************************************************************************* 59/*******************************************************************************
@@ -214,6 +219,130 @@ bool dc_stream_get_crtc_position(struct dc *dc,
214 return ret; 219 return ret;
215} 220}
216 221
222/**
223 * dc_stream_configure_crc: Configure CRC capture for the given stream.
224 * @dc: DC Object
225 * @stream: The stream to configure CRC on.
226 * @enable: Enable CRC if true, disable otherwise.
227 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
228 * once.
229 *
230 * By default, only CRC0 is configured, and the entire frame is used to
231 * calculate the crc.
232 */
233bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
234 bool enable, bool continuous)
235{
236 int i;
237 struct pipe_ctx *pipe;
238 struct crc_params param;
239 struct timing_generator *tg;
240
241 for (i = 0; i < MAX_PIPES; i++) {
242 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
243 if (pipe->stream == stream)
244 break;
245 }
246 /* Stream not found */
247 if (i == MAX_PIPES)
248 return false;
249
250 /* Always capture the full frame */
251 param.windowa_x_start = 0;
252 param.windowa_y_start = 0;
253 param.windowa_x_end = pipe->stream->timing.h_addressable;
254 param.windowa_y_end = pipe->stream->timing.v_addressable;
255 param.windowb_x_start = 0;
256 param.windowb_y_start = 0;
257 param.windowb_x_end = pipe->stream->timing.h_addressable;
258 param.windowb_y_end = pipe->stream->timing.v_addressable;
259
260 /* Default to the union of both windows */
261 param.selection = UNION_WINDOW_A_B;
262 param.continuous_mode = continuous;
263 param.enable = enable;
264
265 tg = pipe->stream_res.tg;
266
267 /* Only call if supported */
268 if (tg->funcs->configure_crc)
269 return tg->funcs->configure_crc(tg, &param);
270 DC_LOG_WARNING("CRC capture not supported.");
271 return false;
272}
273
274/**
275 * dc_stream_get_crc: Get CRC values for the given stream.
276 * @dc: DC object
277 * @stream: The DC stream state of the stream to get CRCs from.
278 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
279 *
280 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
281 * Return false if stream is not found, or if CRCs are not enabled.
282 */
283bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
284 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
285{
286 int i;
287 struct pipe_ctx *pipe;
288 struct timing_generator *tg;
289
290 for (i = 0; i < MAX_PIPES; i++) {
291 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
292 if (pipe->stream == stream)
293 break;
294 }
295 /* Stream not found */
296 if (i == MAX_PIPES)
297 return false;
298
299 tg = pipe->stream_res.tg;
300
301 if (tg->funcs->get_crc)
302 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
303 DC_LOG_WARNING("CRC capture not supported.");
304 return false;
305}
306
307void dc_stream_set_dither_option(struct dc_stream_state *stream,
308 enum dc_dither_option option)
309{
310 struct bit_depth_reduction_params params;
311 struct dc_link *link = stream->status.link;
312 struct pipe_ctx *pipes = NULL;
313 int i;
314
315 for (i = 0; i < MAX_PIPES; i++) {
316 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
317 stream) {
318 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
319 break;
320 }
321 }
322
323 if (!pipes)
324 return;
325 if (option > DITHER_OPTION_MAX)
326 return;
327
328 stream->dither_option = option;
329
330 memset(&params, 0, sizeof(params));
331 resource_build_bit_depth_reduction_params(stream, &params);
332 stream->bit_depth_params = params;
333
334 if (pipes->plane_res.xfm &&
335 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
336 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
337 pipes->plane_res.xfm,
338 pipes->plane_res.scl_data.lb_params.depth,
339 &stream->bit_depth_params);
340 }
341
342 pipes->stream_res.opp->funcs->
343 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
344}
345
217void dc_stream_set_static_screen_events(struct dc *dc, 346void dc_stream_set_static_screen_events(struct dc *dc,
218 struct dc_stream_state **streams, 347 struct dc_stream_state **streams,
219 int num_streams, 348 int num_streams,
@@ -359,9 +488,6 @@ static bool construct(struct dc *dc,
359 dc_version = resource_parse_asic_id(init_params->asic_id); 488 dc_version = resource_parse_asic_id(init_params->asic_id);
360 dc_ctx->dce_version = dc_version; 489 dc_ctx->dce_version = dc_version;
361 490
362#if defined(CONFIG_DRM_AMD_DC_FBC)
363 dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
364#endif
365 /* Resource should construct all asic specific resources. 491 /* Resource should construct all asic specific resources.
366 * This should be the only place where we need to parse the asic id 492 * This should be the only place where we need to parse the asic id
367 */ 493 */
@@ -487,10 +613,15 @@ struct dc *dc_create(const struct dc_init_data *init_params)
487 dc->caps.max_audios = dc->res_pool->audio_count; 613 dc->caps.max_audios = dc->res_pool->audio_count;
488 dc->caps.linear_pitch_alignment = 64; 614 dc->caps.linear_pitch_alignment = 64;
489 615
616 /* Populate versioning information */
617 dc->versions.dc_ver = DC_VER;
618
619 if (dc->res_pool->dmcu != NULL)
620 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
621
490 dc->config = init_params->flags; 622 dc->config = init_params->flags;
491 623
492 dm_logger_write(dc->ctx->logger, LOG_DC, 624 DC_LOG_DC("Display Core initialized\n");
493 "Display Core initialized\n");
494 625
495 626
496 /* TODO: missing feature to be enabled */ 627 /* TODO: missing feature to be enabled */
@@ -524,11 +655,13 @@ static void enable_timing_multisync(
524 if (!ctx->res_ctx.pipe_ctx[i].stream || 655 if (!ctx->res_ctx.pipe_ctx[i].stream ||
525 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 656 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
526 continue; 657 continue;
658 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
659 continue;
527 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 660 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
528 multisync_count++; 661 multisync_count++;
529 } 662 }
530 663
531 if (multisync_count > 1) { 664 if (multisync_count > 0) {
532 dc->hwss.enable_per_frame_crtc_position_reset( 665 dc->hwss.enable_per_frame_crtc_position_reset(
533 dc, multisync_count, multisync_pipes); 666 dc, multisync_count, multisync_pipes);
534 } 667 }
@@ -650,7 +783,6 @@ bool dc_enable_stereo(
650 return ret; 783 return ret;
651} 784}
652 785
653
654/* 786/*
655 * Applies given context to HW and copy it into current context. 787 * Applies given context to HW and copy it into current context.
656 * It's up to the user to release the src context afterwards. 788 * It's up to the user to release the src context afterwards.
@@ -669,7 +801,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
669 dc_streams[i] = context->streams[i]; 801 dc_streams[i] = context->streams[i];
670 802
671 if (!dcb->funcs->is_accelerated_mode(dcb)) 803 if (!dcb->funcs->is_accelerated_mode(dcb))
672 dc->hwss.enable_accelerated_mode(dc); 804 dc->hwss.enable_accelerated_mode(dc, context);
805
806 dc->hwss.set_bandwidth(dc, context, false);
673 807
674 /* re-program planes for existing stream, in case we need to 808 /* re-program planes for existing stream, in case we need to
675 * free up plane resource for later use 809 * free up plane resource for later use
@@ -739,6 +873,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
739 873
740 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 874 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
741 875
876 /* pplib is notified if disp_num changed */
877 dc->hwss.set_bandwidth(dc, context, true);
878
742 dc_release_state(dc->current_state); 879 dc_release_state(dc->current_state);
743 880
744 dc->current_state = context; 881 dc->current_state = context;
@@ -758,7 +895,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
758 if (false == context_changed(dc, context)) 895 if (false == context_changed(dc, context))
759 return DC_OK; 896 return DC_OK;
760 897
761 dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n", 898 DC_LOG_DC("%s: %d streams\n",
762 __func__, context->stream_count); 899 __func__, context->stream_count);
763 900
764 for (i = 0; i < context->stream_count; i++) { 901 for (i = 0; i < context->stream_count; i++) {
@@ -979,6 +1116,9 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
979 if (u->plane_info->rotation != u->surface->rotation) 1116 if (u->plane_info->rotation != u->surface->rotation)
980 update_flags->bits.rotation_change = 1; 1117 update_flags->bits.rotation_change = 1;
981 1118
1119 if (u->plane_info->format != u->surface->format)
1120 update_flags->bits.pixel_format_change = 1;
1121
982 if (u->plane_info->stereo_format != u->surface->stereo_format) 1122 if (u->plane_info->stereo_format != u->surface->stereo_format)
983 update_flags->bits.stereo_format_change = 1; 1123 update_flags->bits.stereo_format_change = 1;
984 1124
@@ -997,6 +1137,9 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
997 */ 1137 */
998 update_flags->bits.bpp_change = 1; 1138 update_flags->bits.bpp_change = 1;
999 1139
1140 if (u->gamma && dce_use_lut(u->plane_info->format))
1141 update_flags->bits.gamma_change = 1;
1142
1000 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 1143 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1001 sizeof(union dc_tiling_info)) != 0) { 1144 sizeof(union dc_tiling_info)) != 0) {
1002 update_flags->bits.swizzle_change = 1; 1145 update_flags->bits.swizzle_change = 1;
@@ -1012,8 +1155,11 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1012 1155
1013 if (update_flags->bits.rotation_change 1156 if (update_flags->bits.rotation_change
1014 || update_flags->bits.stereo_format_change 1157 || update_flags->bits.stereo_format_change
1158 || update_flags->bits.pixel_format_change
1159 || update_flags->bits.gamma_change
1015 || update_flags->bits.bpp_change 1160 || update_flags->bits.bpp_change
1016 || update_flags->bits.bandwidth_change) 1161 || update_flags->bits.bandwidth_change
1162 || update_flags->bits.output_tf_change)
1017 return UPDATE_TYPE_FULL; 1163 return UPDATE_TYPE_FULL;
1018 1164
1019 return UPDATE_TYPE_MED; 1165 return UPDATE_TYPE_MED;
@@ -1092,12 +1238,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
1092 elevate_update_type(&overall_type, type); 1238 elevate_update_type(&overall_type, type);
1093 1239
1094 if (u->in_transfer_func) 1240 if (u->in_transfer_func)
1095 update_flags->bits.in_transfer_func = 1; 1241 update_flags->bits.in_transfer_func_change = 1;
1096 1242
1097 if (u->input_csc_color_matrix) 1243 if (u->input_csc_color_matrix)
1098 update_flags->bits.input_csc_change = 1; 1244 update_flags->bits.input_csc_change = 1;
1099 1245
1100 if (update_flags->bits.in_transfer_func 1246 if (update_flags->bits.in_transfer_func_change
1101 || update_flags->bits.input_csc_change) { 1247 || update_flags->bits.input_csc_change) {
1102 type = UPDATE_TYPE_MED; 1248 type = UPDATE_TYPE_MED;
1103 elevate_update_type(&overall_type, type); 1249 elevate_update_type(&overall_type, type);
@@ -1183,6 +1329,7 @@ static void commit_planes_for_stream(struct dc *dc,
1183 struct dc_state *context) 1329 struct dc_state *context)
1184{ 1330{
1185 int i, j; 1331 int i, j;
1332 struct pipe_ctx *top_pipe_to_program = NULL;
1186 1333
1187 if (update_type == UPDATE_TYPE_FULL) { 1334 if (update_type == UPDATE_TYPE_FULL) {
1188 dc->hwss.set_bandwidth(dc, context, false); 1335 dc->hwss.set_bandwidth(dc, context, false);
@@ -1202,39 +1349,64 @@ static void commit_planes_for_stream(struct dc *dc,
1202 for (j = 0; j < dc->res_pool->pipe_count; j++) { 1349 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1203 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1350 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1204 1351
1205 if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1206 continue;
1207
1208 if (!pipe_ctx->top_pipe && 1352 if (!pipe_ctx->top_pipe &&
1209 pipe_ctx->stream && 1353 pipe_ctx->stream &&
1210 pipe_ctx->stream == stream) { 1354 pipe_ctx->stream == stream) {
1211 struct dc_stream_status *stream_status = 1355 struct dc_stream_status *stream_status = NULL;
1356
1357 top_pipe_to_program = pipe_ctx;
1358
1359 if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1360 continue;
1361
1362 stream_status =
1212 stream_get_status(context, pipe_ctx->stream); 1363 stream_get_status(context, pipe_ctx->stream);
1213 1364
1214 dc->hwss.apply_ctx_for_surface( 1365 dc->hwss.apply_ctx_for_surface(
1215 dc, pipe_ctx->stream, stream_status->plane_count, context); 1366 dc, pipe_ctx->stream, stream_status->plane_count, context);
1367
1368 if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
1369 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1370 // if otg funcs defined check if blanked before programming
1371 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1372 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1373 pipe_ctx->stream_res.abm, stream->abm_level);
1374 } else
1375 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1376 pipe_ctx->stream_res.abm, stream->abm_level);
1377 }
1216 } 1378 }
1217 } 1379 }
1218 1380
1219 if (update_type == UPDATE_TYPE_FULL) 1381 if (update_type == UPDATE_TYPE_FULL)
1220 context_timing_trace(dc, &context->res_ctx); 1382 context_timing_trace(dc, &context->res_ctx);
1221 1383
1222 /* Perform requested Updates */ 1384 /* Lock the top pipe while updating plane addrs, since freesync requires
1223 for (i = 0; i < surface_count; i++) { 1385 * plane addr update event triggers to be synchronized.
1224 struct dc_plane_state *plane_state = srf_updates[i].surface; 1386 * top_pipe_to_program is expected to never be NULL
1387 */
1388 if (update_type == UPDATE_TYPE_FAST) {
1389 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1225 1390
1226 for (j = 0; j < dc->res_pool->pipe_count; j++) { 1391 /* Perform requested Updates */
1227 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1392 for (i = 0; i < surface_count; i++) {
1393 struct dc_plane_state *plane_state = srf_updates[i].surface;
1228 1394
1229 if (pipe_ctx->stream != stream) 1395 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1230 continue; 1396 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1231 1397
1232 if (pipe_ctx->plane_state != plane_state) 1398 if (pipe_ctx->stream != stream)
1233 continue; 1399 continue;
1400
1401 if (pipe_ctx->plane_state != plane_state)
1402 continue;
1234 1403
1235 if (update_type == UPDATE_TYPE_FAST && srf_updates[i].flip_addr) 1404 if (srf_updates[i].flip_addr)
1236 dc->hwss.update_plane_addr(dc, pipe_ctx); 1405 dc->hwss.update_plane_addr(dc, pipe_ctx);
1406 }
1237 } 1407 }
1408
1409 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1238 } 1410 }
1239 1411
1240 if (stream && stream_update && update_type > UPDATE_TYPE_FAST) 1412 if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
@@ -1358,13 +1530,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
1358 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 1530 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1359} 1531}
1360 1532
1361void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 1533bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1362{ 1534{
1363 1535
1364 if (dc == NULL) 1536 if (dc == NULL)
1365 return; 1537 return false;
1366 1538
1367 dal_irq_service_set(dc->res_pool->irqs, src, enable); 1539 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1368} 1540}
1369 1541
1370void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 1542void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
@@ -1487,12 +1659,17 @@ struct dc_sink *dc_link_add_remote_sink(
1487 &dc_sink->dc_edid, 1659 &dc_sink->dc_edid,
1488 &dc_sink->edid_caps); 1660 &dc_sink->edid_caps);
1489 1661
1490 if (edid_status != EDID_OK) 1662 /*
1491 goto fail; 1663 * Treat device as no EDID device if EDID
1664 * parsing fails
1665 */
1666 if (edid_status != EDID_OK) {
1667 dc_sink->dc_edid.length = 0;
1668 dm_error("Bad EDID, status%d!\n", edid_status);
1669 }
1492 1670
1493 return dc_sink; 1671 return dc_sink;
1494fail: 1672
1495 dc_link_remove_remote_sink(link, dc_sink);
1496fail_add_sink: 1673fail_add_sink:
1497 dc_sink_release(dc_sink); 1674 dc_sink_release(dc_sink);
1498 return NULL; 1675 return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 1babac07bcc9..5a552cb3f8a7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,26 +36,22 @@
36#include "hw_sequencer.h" 36#include "hw_sequencer.h"
37 37
38#include "resource.h" 38#include "resource.h"
39#define DC_LOGGER \
40 logger
39 41
40#define SURFACE_TRACE(...) do {\ 42#define SURFACE_TRACE(...) do {\
41 if (dc->debug.surface_trace) \ 43 if (dc->debug.surface_trace) \
42 dm_logger_write(logger, \ 44 DC_LOG_IF_TRACE(__VA_ARGS__); \
43 LOG_IF_TRACE, \
44 ##__VA_ARGS__); \
45} while (0) 45} while (0)
46 46
47#define TIMING_TRACE(...) do {\ 47#define TIMING_TRACE(...) do {\
48 if (dc->debug.timing_trace) \ 48 if (dc->debug.timing_trace) \
49 dm_logger_write(logger, \ 49 DC_LOG_SYNC(__VA_ARGS__); \
50 LOG_SYNC, \
51 ##__VA_ARGS__); \
52} while (0) 50} while (0)
53 51
54#define CLOCK_TRACE(...) do {\ 52#define CLOCK_TRACE(...) do {\
55 if (dc->debug.clock_trace) \ 53 if (dc->debug.clock_trace) \
56 dm_logger_write(logger, \ 54 DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
57 LOG_BANDWIDTH_CALCS, \
58 ##__VA_ARGS__); \
59} while (0) 55} while (0)
60 56
61void pre_surface_trace( 57void pre_surface_trace(
@@ -361,25 +357,20 @@ void context_clock_trace(
361 struct dc *core_dc = dc; 357 struct dc *core_dc = dc;
362 struct dal_logger *logger = core_dc->ctx->logger; 358 struct dal_logger *logger = core_dc->ctx->logger;
363 359
364 CLOCK_TRACE("Current: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n" 360 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
365 "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n" 361 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
366 "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
367 context->bw.dcn.calc_clk.dispclk_khz, 362 context->bw.dcn.calc_clk.dispclk_khz,
368 context->bw.dcn.calc_clk.dppclk_div, 363 context->bw.dcn.calc_clk.dppclk_khz,
369 context->bw.dcn.calc_clk.dcfclk_khz, 364 context->bw.dcn.calc_clk.dcfclk_khz,
370 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 365 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
371 context->bw.dcn.calc_clk.fclk_khz, 366 context->bw.dcn.calc_clk.fclk_khz,
372 context->bw.dcn.calc_clk.dram_ccm_us, 367 context->bw.dcn.calc_clk.socclk_khz);
373 context->bw.dcn.calc_clk.min_active_dram_ccm_us); 368 CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
374 CLOCK_TRACE("Calculated: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n" 369 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
375 "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
376 "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
377 context->bw.dcn.calc_clk.dispclk_khz, 370 context->bw.dcn.calc_clk.dispclk_khz,
378 context->bw.dcn.calc_clk.dppclk_div, 371 context->bw.dcn.calc_clk.dppclk_khz,
379 context->bw.dcn.calc_clk.dcfclk_khz, 372 context->bw.dcn.calc_clk.dcfclk_khz,
380 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 373 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
381 context->bw.dcn.calc_clk.fclk_khz, 374 context->bw.dcn.calc_clk.fclk_khz);
382 context->bw.dcn.calc_clk.dram_ccm_us,
383 context->bw.dcn.calc_clk.min_active_dram_ccm_us);
384#endif 375#endif
385} 376}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a37428271573..eeb04471b2f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,9 +45,11 @@
45#include "dce/dce_11_0_d.h" 45#include "dce/dce_11_0_d.h"
46#include "dce/dce_11_0_enum.h" 46#include "dce/dce_11_0_enum.h"
47#include "dce/dce_11_0_sh_mask.h" 47#include "dce/dce_11_0_sh_mask.h"
48#define DC_LOGGER \
49 dc_ctx->logger
48 50
49#define LINK_INFO(...) \ 51#define LINK_INFO(...) \
50 dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \ 52 DC_LOG_HW_HOTPLUG( \
51 __VA_ARGS__) 53 __VA_ARGS__)
52 54
53/******************************************************************************* 55/*******************************************************************************
@@ -126,6 +128,8 @@ static bool program_hpd_filter(
126 int delay_on_connect_in_ms = 0; 128 int delay_on_connect_in_ms = 0;
127 int delay_on_disconnect_in_ms = 0; 129 int delay_on_disconnect_in_ms = 0;
128 130
131 if (link->is_hpd_filter_disabled)
132 return false;
129 /* Verify feature is supported */ 133 /* Verify feature is supported */
130 switch (link->connector_signal) { 134 switch (link->connector_signal) {
131 case SIGNAL_TYPE_DVI_SINGLE_LINK: 135 case SIGNAL_TYPE_DVI_SINGLE_LINK:
@@ -464,7 +468,7 @@ static void link_disconnect_sink(struct dc_link *link)
464 link->dpcd_sink_count = 0; 468 link->dpcd_sink_count = 0;
465} 469}
466 470
467static void detect_dp( 471static bool detect_dp(
468 struct dc_link *link, 472 struct dc_link *link,
469 struct display_sink_capability *sink_caps, 473 struct display_sink_capability *sink_caps,
470 bool *converter_disable_audio, 474 bool *converter_disable_audio,
@@ -478,7 +482,8 @@ static void detect_dp(
478 482
479 if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { 483 if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
480 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; 484 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
481 detect_dp_sink_caps(link); 485 if (!detect_dp_sink_caps(link))
486 return false;
482 487
483 if (is_mst_supported(link)) { 488 if (is_mst_supported(link)) {
484 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; 489 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
@@ -529,7 +534,7 @@ static void detect_dp(
529 * active dongle unplug processing for short irq 534 * active dongle unplug processing for short irq
530 */ 535 */
531 link_disconnect_sink(link); 536 link_disconnect_sink(link);
532 return; 537 return true;
533 } 538 }
534 539
535 if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER) 540 if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
@@ -541,6 +546,8 @@ static void detect_dp(
541 sink_caps, 546 sink_caps,
542 audio_support); 547 audio_support);
543 } 548 }
549
550 return true;
544} 551}
545 552
546bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) 553bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
@@ -604,11 +611,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
604 } 611 }
605 612
606 case SIGNAL_TYPE_DISPLAY_PORT: { 613 case SIGNAL_TYPE_DISPLAY_PORT: {
607 detect_dp( 614 if (!detect_dp(
608 link, 615 link,
609 &sink_caps, 616 &sink_caps,
610 &converter_disable_audio, 617 &converter_disable_audio,
611 aud_support, reason); 618 aud_support, reason))
619 return false;
612 620
613 /* Active dongle downstream unplug */ 621 /* Active dongle downstream unplug */
614 if (link->type == dc_connection_active_dongle 622 if (link->type == dc_connection_active_dongle
@@ -671,14 +679,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
671 679
672 switch (edid_status) { 680 switch (edid_status) {
673 case EDID_BAD_CHECKSUM: 681 case EDID_BAD_CHECKSUM:
674 dm_logger_write(link->ctx->logger, LOG_ERROR, 682 DC_LOG_ERROR("EDID checksum invalid.\n");
675 "EDID checksum invalid.\n");
676 break; 683 break;
677 case EDID_NO_RESPONSE: 684 case EDID_NO_RESPONSE:
678 dm_logger_write(link->ctx->logger, LOG_ERROR, 685 DC_LOG_ERROR("No EDID read.\n");
679 "No EDID read.\n");
680 return false;
681
682 default: 686 default:
683 break; 687 break;
684 } 688 }
@@ -708,8 +712,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
708 "%s: [Block %d] ", sink->edid_caps.display_name, i); 712 "%s: [Block %d] ", sink->edid_caps.display_name, i);
709 } 713 }
710 714
711 dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER, 715 DC_LOG_DETECTION_EDID_PARSER("%s: "
712 "%s: "
713 "manufacturer_id = %X, " 716 "manufacturer_id = %X, "
714 "product_id = %X, " 717 "product_id = %X, "
715 "serial_number = %X, " 718 "serial_number = %X, "
@@ -729,8 +732,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
729 sink->edid_caps.audio_mode_count); 732 sink->edid_caps.audio_mode_count);
730 733
731 for (i = 0; i < sink->edid_caps.audio_mode_count; i++) { 734 for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
732 dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER, 735 DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, "
733 "%s: mode number = %d, "
734 "format_code = %d, " 736 "format_code = %d, "
735 "channel_count = %d, " 737 "channel_count = %d, "
736 "sample_rate = %d, " 738 "sample_rate = %d, "
@@ -980,8 +982,7 @@ static bool construct(
980 } 982 }
981 break; 983 break;
982 default: 984 default:
983 dm_logger_write(dc_ctx->logger, LOG_WARNING, 985 DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
984 "Unsupported Connector type:%d!\n", link->link_id.id);
985 goto create_fail; 986 goto create_fail;
986 } 987 }
987 988
@@ -1134,7 +1135,7 @@ static void dpcd_configure_panel_mode(
1134{ 1135{
1135 union dpcd_edp_config edp_config_set; 1136 union dpcd_edp_config edp_config_set;
1136 bool panel_mode_edp = false; 1137 bool panel_mode_edp = false;
1137 1138 struct dc_context *dc_ctx = link->ctx;
1138 memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); 1139 memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
1139 1140
1140 if (DP_PANEL_MODE_DEFAULT != panel_mode) { 1141 if (DP_PANEL_MODE_DEFAULT != panel_mode) {
@@ -1171,8 +1172,7 @@ static void dpcd_configure_panel_mode(
1171 ASSERT(result == DDC_RESULT_SUCESSFULL); 1172 ASSERT(result == DDC_RESULT_SUCESSFULL);
1172 } 1173 }
1173 } 1174 }
1174 dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS, 1175 DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
1175 "Link: %d eDP panel mode supported: %d "
1176 "eDP panel mode enabled: %d \n", 1176 "eDP panel mode enabled: %d \n",
1177 link->link_index, 1177 link->link_index,
1178 link->dpcd_caps.panel_mode_edp, 1178 link->dpcd_caps.panel_mode_edp,
@@ -1248,6 +1248,12 @@ static enum dc_status enable_link_dp(
1248 pipe_ctx->clock_source->id, 1248 pipe_ctx->clock_source->id,
1249 &link_settings); 1249 &link_settings);
1250 1250
1251 if (stream->sink->edid_caps.panel_patch.dppowerup_delay > 0) {
1252 int delay_dp_power_up_in_ms = stream->sink->edid_caps.panel_patch.dppowerup_delay;
1253
1254 msleep(delay_dp_power_up_in_ms);
1255 }
1256
1251 panel_mode = dp_get_panel_mode(link); 1257 panel_mode = dp_get_panel_mode(link);
1252 dpcd_configure_panel_mode(link, panel_mode); 1258 dpcd_configure_panel_mode(link, panel_mode);
1253 1259
@@ -1279,13 +1285,12 @@ static enum dc_status enable_link_edp(
1279 enum dc_status status; 1285 enum dc_status status;
1280 struct dc_stream_state *stream = pipe_ctx->stream; 1286 struct dc_stream_state *stream = pipe_ctx->stream;
1281 struct dc_link *link = stream->sink->link; 1287 struct dc_link *link = stream->sink->link;
1282 1288 /*in case it is not on*/
1283 link->dc->hwss.edp_power_control(link, true); 1289 link->dc->hwss.edp_power_control(link, true);
1284 link->dc->hwss.edp_wait_for_hpd_ready(link, true); 1290 link->dc->hwss.edp_wait_for_hpd_ready(link, true);
1285 1291
1286 status = enable_link_dp(state, pipe_ctx); 1292 status = enable_link_dp(state, pipe_ctx);
1287 1293
1288 link->dc->hwss.edp_backlight_control(link, true);
1289 1294
1290 return status; 1295 return status;
1291} 1296}
@@ -1302,6 +1307,9 @@ static enum dc_status enable_link_dp_mst(
1302 if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) 1307 if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
1303 return DC_OK; 1308 return DC_OK;
1304 1309
1310 /* clear payload table */
1311 dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
1312
1305 /* set the sink to MST mode before enabling the link */ 1313 /* set the sink to MST mode before enabling the link */
1306 dp_enable_mst_on_sink(link, true); 1314 dp_enable_mst_on_sink(link, true);
1307 1315
@@ -1749,8 +1757,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1749 link->link_enc, 1757 link->link_enc,
1750 pipe_ctx->clock_source->id, 1758 pipe_ctx->clock_source->id,
1751 display_color_depth, 1759 display_color_depth,
1752 pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A, 1760 pipe_ctx->stream->signal,
1753 pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
1754 stream->phy_pix_clk); 1761 stream->phy_pix_clk);
1755 1762
1756 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 1763 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
@@ -1788,9 +1795,21 @@ static enum dc_status enable_link(
1788 } 1795 }
1789 1796
1790 if (pipe_ctx->stream_res.audio && status == DC_OK) { 1797 if (pipe_ctx->stream_res.audio && status == DC_OK) {
1798 struct dc *core_dc = pipe_ctx->stream->ctx->dc;
1791 /* notify audio driver for audio modes of monitor */ 1799 /* notify audio driver for audio modes of monitor */
1800 struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
1801 unsigned int i, num_audio = 1;
1802 for (i = 0; i < MAX_PIPES; i++) {
1803 /*current_state not updated yet*/
1804 if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
1805 num_audio++;
1806 }
1807
1792 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); 1808 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
1793 1809
1810 if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1811 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1812 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1794 /* un-mute audio */ 1813 /* un-mute audio */
1795 /* TODO: audio should be per stream rather than per link */ 1814 /* TODO: audio should be per stream rather than per link */
1796 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1815 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
@@ -1931,6 +1950,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1931 struct dc *core_dc = link->ctx->dc; 1950 struct dc *core_dc = link->ctx->dc;
1932 struct abm *abm = core_dc->res_pool->abm; 1951 struct abm *abm = core_dc->res_pool->abm;
1933 struct dmcu *dmcu = core_dc->res_pool->dmcu; 1952 struct dmcu *dmcu = core_dc->res_pool->dmcu;
1953 struct dc_context *dc_ctx = link->ctx;
1934 unsigned int controller_id = 0; 1954 unsigned int controller_id = 0;
1935 bool use_smooth_brightness = true; 1955 bool use_smooth_brightness = true;
1936 int i; 1956 int i;
@@ -1940,10 +1960,16 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1940 (abm->funcs->set_backlight_level == NULL)) 1960 (abm->funcs->set_backlight_level == NULL))
1941 return false; 1961 return false;
1942 1962
1963 if (stream) {
1964 if (stream->bl_pwm_level == 0)
1965 frame_ramp = 0;
1966
1967 ((struct dc_stream_state *)stream)->bl_pwm_level = level;
1968 }
1969
1943 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 1970 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
1944 1971
1945 dm_logger_write(link->ctx->logger, LOG_BACKLIGHT, 1972 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
1946 "New Backlight level: %d (0x%X)\n", level, level);
1947 1973
1948 if (dc_is_embedded_signal(link->connector_signal)) { 1974 if (dc_is_embedded_signal(link->connector_signal)) {
1949 if (stream != NULL) { 1975 if (stream != NULL) {
@@ -2110,6 +2136,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
2110 struct fixed31_32 avg_time_slots_per_mtp; 2136 struct fixed31_32 avg_time_slots_per_mtp;
2111 struct fixed31_32 pbn; 2137 struct fixed31_32 pbn;
2112 struct fixed31_32 pbn_per_slot; 2138 struct fixed31_32 pbn_per_slot;
2139 struct dc_context *dc_ctx = link->ctx;
2113 uint8_t i; 2140 uint8_t i;
2114 2141
2115 /* enable_link_dp_mst already check link->enabled_stream_count 2142 /* enable_link_dp_mst already check link->enabled_stream_count
@@ -2127,21 +2154,18 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
2127 link, pipe_ctx->stream_res.stream_enc, &proposed_table); 2154 link, pipe_ctx->stream_res.stream_enc, &proposed_table);
2128 } 2155 }
2129 else 2156 else
2130 dm_logger_write(link->ctx->logger, LOG_WARNING, 2157 DC_LOG_WARNING("Failed to update"
2131 "Failed to update"
2132 "MST allocation table for" 2158 "MST allocation table for"
2133 "pipe idx:%d\n", 2159 "pipe idx:%d\n",
2134 pipe_ctx->pipe_idx); 2160 pipe_ctx->pipe_idx);
2135 2161
2136 dm_logger_write(link->ctx->logger, LOG_MST, 2162 DC_LOG_MST("%s "
2137 "%s "
2138 "stream_count: %d: \n ", 2163 "stream_count: %d: \n ",
2139 __func__, 2164 __func__,
2140 link->mst_stream_alloc_table.stream_count); 2165 link->mst_stream_alloc_table.stream_count);
2141 2166
2142 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 2167 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
2143 dm_logger_write(link->ctx->logger, LOG_MST, 2168 DC_LOG_MST("stream_enc[%d]: 0x%x "
2144 "stream_enc[%d]: 0x%x "
2145 "stream[%d].vcp_id: %d " 2169 "stream[%d].vcp_id: %d "
2146 "stream[%d].slot_count: %d\n", 2170 "stream[%d].slot_count: %d\n",
2147 i, 2171 i,
@@ -2192,6 +2216,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2192 struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0); 2216 struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
2193 uint8_t i; 2217 uint8_t i;
2194 bool mst_mode = (link->type == dc_connection_mst_branch); 2218 bool mst_mode = (link->type == dc_connection_mst_branch);
2219 struct dc_context *dc_ctx = link->ctx;
2195 2220
2196 /* deallocate_mst_payload is called before disable link. When mode or 2221 /* deallocate_mst_payload is called before disable link. When mode or
2197 * disable/enable monitor, new stream is created which is not in link 2222 * disable/enable monitor, new stream is created which is not in link
@@ -2217,23 +2242,20 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2217 link, pipe_ctx->stream_res.stream_enc, &proposed_table); 2242 link, pipe_ctx->stream_res.stream_enc, &proposed_table);
2218 } 2243 }
2219 else { 2244 else {
2220 dm_logger_write(link->ctx->logger, LOG_WARNING, 2245 DC_LOG_WARNING("Failed to update"
2221 "Failed to update"
2222 "MST allocation table for" 2246 "MST allocation table for"
2223 "pipe idx:%d\n", 2247 "pipe idx:%d\n",
2224 pipe_ctx->pipe_idx); 2248 pipe_ctx->pipe_idx);
2225 } 2249 }
2226 } 2250 }
2227 2251
2228 dm_logger_write(link->ctx->logger, LOG_MST, 2252 DC_LOG_MST("%s"
2229 "%s"
2230 "stream_count: %d: ", 2253 "stream_count: %d: ",
2231 __func__, 2254 __func__,
2232 link->mst_stream_alloc_table.stream_count); 2255 link->mst_stream_alloc_table.stream_count);
2233 2256
2234 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 2257 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
2235 dm_logger_write(link->ctx->logger, LOG_MST, 2258 DC_LOG_MST("stream_enc[%d]: 0x%x "
2236 "stream_enc[%d]: 0x%x "
2237 "stream[%d].vcp_id: %d " 2259 "stream[%d].vcp_id: %d "
2238 "stream[%d].slot_count: %d\n", 2260 "stream[%d].slot_count: %d\n",
2239 i, 2261 i,
@@ -2267,12 +2289,24 @@ void core_link_enable_stream(
2267 struct pipe_ctx *pipe_ctx) 2289 struct pipe_ctx *pipe_ctx)
2268{ 2290{
2269 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 2291 struct dc *core_dc = pipe_ctx->stream->ctx->dc;
2292 struct dc_context *dc_ctx = pipe_ctx->stream->ctx;
2293 enum dc_status status;
2294
2295 /* eDP lit up by bios already, no need to enable again. */
2296 if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
2297 core_dc->apply_edp_fast_boot_optimization) {
2298 core_dc->apply_edp_fast_boot_optimization = false;
2299 pipe_ctx->stream->dpms_off = false;
2300 return;
2301 }
2302
2303 if (pipe_ctx->stream->dpms_off)
2304 return;
2270 2305
2271 enum dc_status status = enable_link(state, pipe_ctx); 2306 status = enable_link(state, pipe_ctx);
2272 2307
2273 if (status != DC_OK) { 2308 if (status != DC_OK) {
2274 dm_logger_write(pipe_ctx->stream->ctx->logger, 2309 DC_LOG_WARNING("enabling link %u failed: %d\n",
2275 LOG_WARNING, "enabling link %u failed: %d\n",
2276 pipe_ctx->stream->sink->link->link_index, 2310 pipe_ctx->stream->sink->link->link_index,
2277 status); 2311 status);
2278 2312
@@ -2298,9 +2332,8 @@ void core_link_enable_stream(
2298 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 2332 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2299 allocate_mst_payload(pipe_ctx); 2333 allocate_mst_payload(pipe_ctx);
2300 2334
2301 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2335 core_dc->hwss.unblank_stream(pipe_ctx,
2302 core_dc->hwss.unblank_stream(pipe_ctx, 2336 &pipe_ctx->stream->sink->link->cur_link_settings);
2303 &pipe_ctx->stream->sink->link->cur_link_settings);
2304} 2337}
2305 2338
2306void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) 2339void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
@@ -2310,8 +2343,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
2310 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 2343 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2311 deallocate_mst_payload(pipe_ctx); 2344 deallocate_mst_payload(pipe_ctx);
2312 2345
2313 if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) 2346 core_dc->hwss.blank_stream(pipe_ctx);
2314 core_dc->hwss.edp_backlight_control(pipe_ctx->stream->sink->link, false);
2315 2347
2316 core_dc->hwss.disable_stream(pipe_ctx, option); 2348 core_dc->hwss.disable_stream(pipe_ctx, option);
2317 2349
@@ -2328,3 +2360,36 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
2328 core_dc->hwss.set_avmute(pipe_ctx, enable); 2360 core_dc->hwss.set_avmute(pipe_ctx, enable);
2329} 2361}
2330 2362
2363void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
2364{
2365 struct gpio *hpd;
2366
2367 if (enable) {
2368 link->is_hpd_filter_disabled = false;
2369 program_hpd_filter(link);
2370 } else {
2371 link->is_hpd_filter_disabled = true;
2372 /* Obtain HPD handle */
2373 hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
2374
2375 if (!hpd)
2376 return;
2377
2378 /* Setup HPD filtering */
2379 if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
2380 struct gpio_hpd_config config;
2381
2382 config.delay_on_connect = 0;
2383 config.delay_on_disconnect = 0;
2384
2385 dal_irq_setup_hpd_filter(hpd, &config);
2386
2387 dal_gpio_close(hpd);
2388 } else {
2389 ASSERT_CRITICAL(false);
2390 }
2391 /* Release HPD handle */
2392 dal_gpio_destroy_irq(&hpd);
2393 }
2394}
2395
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index d5294798b0a5..49c2face1e7a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -629,7 +629,7 @@ bool dal_ddc_service_query_ddc_data(
629 return ret; 629 return ret;
630} 630}
631 631
632enum ddc_result dal_ddc_service_read_dpcd_data( 632ssize_t dal_ddc_service_read_dpcd_data(
633 struct ddc_service *ddc, 633 struct ddc_service *ddc,
634 bool i2c, 634 bool i2c,
635 enum i2c_mot_mode mot, 635 enum i2c_mot_mode mot,
@@ -660,8 +660,9 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
660 if (dal_i2caux_submit_aux_command( 660 if (dal_i2caux_submit_aux_command(
661 ddc->ctx->i2caux, 661 ddc->ctx->i2caux,
662 ddc->ddc_pin, 662 ddc->ddc_pin,
663 &command)) 663 &command)) {
664 return DDC_RESULT_SUCESSFULL; 664 return (ssize_t)command.payloads->length;
665 }
665 666
666 return DDC_RESULT_FAILED_OPERATION; 667 return DDC_RESULT_FAILED_OPERATION;
667} 668}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 61e8c3e02d16..3b5053570229 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -11,6 +11,8 @@
11#include "dpcd_defs.h" 11#include "dpcd_defs.h"
12 12
13#include "resource.h" 13#include "resource.h"
14#define DC_LOGGER \
15 link->ctx->logger
14 16
15/* maximum pre emphasis level allowed for each voltage swing level*/ 17/* maximum pre emphasis level allowed for each voltage swing level*/
16static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { 18static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
@@ -63,8 +65,7 @@ static void wait_for_training_aux_rd_interval(
63 65
64 udelay(default_wait_in_micro_secs); 66 udelay(default_wait_in_micro_secs);
65 67
66 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 68 DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
67 "%s:\n wait = %d\n",
68 __func__, 69 __func__,
69 default_wait_in_micro_secs); 70 default_wait_in_micro_secs);
70} 71}
@@ -79,8 +80,7 @@ static void dpcd_set_training_pattern(
79 &dpcd_pattern.raw, 80 &dpcd_pattern.raw,
80 1); 81 1);
81 82
82 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 83 DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
83 "%s\n %x pattern = %x\n",
84 __func__, 84 __func__,
85 DP_TRAINING_PATTERN_SET, 85 DP_TRAINING_PATTERN_SET,
86 dpcd_pattern.v1_4.TRAINING_PATTERN_SET); 86 dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
@@ -116,8 +116,7 @@ static void dpcd_set_link_settings(
116 core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, 116 core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
117 &downspread.raw, sizeof(downspread)); 117 &downspread.raw, sizeof(downspread));
118 118
119 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 119 DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
120 "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
121 __func__, 120 __func__,
122 DP_LINK_BW_SET, 121 DP_LINK_BW_SET,
123 lt_settings->link_settings.link_rate, 122 lt_settings->link_settings.link_rate,
@@ -151,8 +150,7 @@ static enum dpcd_training_patterns
151 break; 150 break;
152 default: 151 default:
153 ASSERT(0); 152 ASSERT(0);
154 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 153 DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
155 "%s: Invalid HW Training pattern: %d\n",
156 __func__, pattern); 154 __func__, pattern);
157 break; 155 break;
158 } 156 }
@@ -184,8 +182,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
184 dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset] 182 dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
185 = dpcd_pattern.raw; 183 = dpcd_pattern.raw;
186 184
187 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 185 DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
188 "%s\n %x pattern = %x\n",
189 __func__, 186 __func__,
190 DP_TRAINING_PATTERN_SET, 187 DP_TRAINING_PATTERN_SET,
191 dpcd_pattern.v1_4.TRAINING_PATTERN_SET); 188 dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
@@ -219,8 +216,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
219 dpcd_lane, 216 dpcd_lane,
220 size_in_bytes); 217 size_in_bytes);
221 218
222 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 219 DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
223 "%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
224 __func__, 220 __func__,
225 DP_TRAINING_LANE0_SET, 221 DP_TRAINING_LANE0_SET,
226 dpcd_lane[0].bits.VOLTAGE_SWING_SET, 222 dpcd_lane[0].bits.VOLTAGE_SWING_SET,
@@ -456,14 +452,12 @@ static void get_lane_status_and_drive_settings(
456 452
457 ln_status_updated->raw = dpcd_buf[2]; 453 ln_status_updated->raw = dpcd_buf[2];
458 454
459 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 455 DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
460 "%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
461 __func__, 456 __func__,
462 DP_LANE0_1_STATUS, dpcd_buf[0], 457 DP_LANE0_1_STATUS, dpcd_buf[0],
463 DP_LANE2_3_STATUS, dpcd_buf[1]); 458 DP_LANE2_3_STATUS, dpcd_buf[1]);
464 459
465 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 460 DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
466 "%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
467 __func__, 461 __func__,
468 DP_ADJUST_REQUEST_LANE0_1, 462 DP_ADJUST_REQUEST_LANE0_1,
469 dpcd_buf[4], 463 dpcd_buf[4],
@@ -556,8 +550,7 @@ static void dpcd_set_lane_settings(
556 } 550 }
557 */ 551 */
558 552
559 dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, 553 DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
560 "%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
561 __func__, 554 __func__,
562 DP_TRAINING_LANE0_SET, 555 DP_TRAINING_LANE0_SET,
563 dpcd_lane[0].bits.VOLTAGE_SWING_SET, 556 dpcd_lane[0].bits.VOLTAGE_SWING_SET,
@@ -669,16 +662,14 @@ static bool perform_post_lt_adj_req_sequence(
669 } 662 }
670 663
671 if (!req_drv_setting_changed) { 664 if (!req_drv_setting_changed) {
672 dm_logger_write(link->ctx->logger, LOG_WARNING, 665 DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n",
673 "%s: Post Link Training Adjust Request Timed out\n",
674 __func__); 666 __func__);
675 667
676 ASSERT(0); 668 ASSERT(0);
677 return true; 669 return true;
678 } 670 }
679 } 671 }
680 dm_logger_write(link->ctx->logger, LOG_WARNING, 672 DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n",
681 "%s: Post Link Training Adjust Request limit reached\n",
682 __func__); 673 __func__);
683 674
684 ASSERT(0); 675 ASSERT(0);
@@ -709,6 +700,22 @@ static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
709 return HW_DP_TRAINING_PATTERN_2; 700 return HW_DP_TRAINING_PATTERN_2;
710} 701}
711 702
703static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
704 union lane_status *dpcd_lane_status)
705{
706 enum link_training_result result = LINK_TRAINING_SUCCESS;
707
708 if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0)
709 result = LINK_TRAINING_CR_FAIL_LANE0;
710 else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0)
711 result = LINK_TRAINING_CR_FAIL_LANE1;
712 else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0)
713 result = LINK_TRAINING_CR_FAIL_LANE23;
714 else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0)
715 result = LINK_TRAINING_CR_FAIL_LANE23;
716 return result;
717}
718
712static enum link_training_result perform_channel_equalization_sequence( 719static enum link_training_result perform_channel_equalization_sequence(
713 struct dc_link *link, 720 struct dc_link *link,
714 struct link_training_settings *lt_settings) 721 struct link_training_settings *lt_settings)
@@ -718,7 +725,7 @@ static enum link_training_result perform_channel_equalization_sequence(
718 uint32_t retries_ch_eq; 725 uint32_t retries_ch_eq;
719 enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; 726 enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
720 union lane_align_status_updated dpcd_lane_status_updated = {{0}}; 727 union lane_align_status_updated dpcd_lane_status_updated = {{0}};
721 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};; 728 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
722 729
723 hw_tr_pattern = get_supported_tp(link); 730 hw_tr_pattern = get_supported_tp(link);
724 731
@@ -771,7 +778,7 @@ static enum link_training_result perform_channel_equalization_sequence(
771 778
772} 779}
773 780
774static bool perform_clock_recovery_sequence( 781static enum link_training_result perform_clock_recovery_sequence(
775 struct dc_link *link, 782 struct dc_link *link,
776 struct link_training_settings *lt_settings) 783 struct link_training_settings *lt_settings)
777{ 784{
@@ -846,11 +853,11 @@ static bool perform_clock_recovery_sequence(
846 853
847 /* 5. check CR done*/ 854 /* 5. check CR done*/
848 if (is_cr_done(lane_count, dpcd_lane_status)) 855 if (is_cr_done(lane_count, dpcd_lane_status))
849 return true; 856 return LINK_TRAINING_SUCCESS;
850 857
851 /* 6. max VS reached*/ 858 /* 6. max VS reached*/
852 if (is_max_vs_reached(lt_settings)) 859 if (is_max_vs_reached(lt_settings))
853 return false; 860 break;
854 861
855 /* 7. same voltage*/ 862 /* 7. same voltage*/
856 /* Note: VS same for all lanes, 863 /* Note: VS same for all lanes,
@@ -869,20 +876,19 @@ static bool perform_clock_recovery_sequence(
869 876
870 if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { 877 if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
871 ASSERT(0); 878 ASSERT(0);
872 dm_logger_write(link->ctx->logger, LOG_ERROR, 879 DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
873 "%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
874 __func__, 880 __func__,
875 LINK_TRAINING_MAX_CR_RETRY); 881 LINK_TRAINING_MAX_CR_RETRY);
876 882
877 } 883 }
878 884
879 return false; 885 return get_cr_failure(lane_count, dpcd_lane_status);
880} 886}
881 887
882static inline bool perform_link_training_int( 888static inline enum link_training_result perform_link_training_int(
883 struct dc_link *link, 889 struct dc_link *link,
884 struct link_training_settings *lt_settings, 890 struct link_training_settings *lt_settings,
885 bool status) 891 enum link_training_result status)
886{ 892{
887 union lane_count_set lane_count_set = { {0} }; 893 union lane_count_set lane_count_set = { {0} };
888 union dpcd_training_pattern dpcd_pattern = { {0} }; 894 union dpcd_training_pattern dpcd_pattern = { {0} };
@@ -903,9 +909,9 @@ static inline bool perform_link_training_int(
903 get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4) 909 get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
904 return status; 910 return status;
905 911
906 if (status && 912 if (status == LINK_TRAINING_SUCCESS &&
907 perform_post_lt_adj_req_sequence(link, lt_settings) == false) 913 perform_post_lt_adj_req_sequence(link, lt_settings) == false)
908 status = false; 914 status = LINK_TRAINING_LQA_FAIL;
909 915
910 lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; 916 lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
911 lane_count_set.bits.ENHANCED_FRAMING = 1; 917 lane_count_set.bits.ENHANCED_FRAMING = 1;
@@ -928,6 +934,8 @@ enum link_training_result dc_link_dp_perform_link_training(
928 enum link_training_result status = LINK_TRAINING_SUCCESS; 934 enum link_training_result status = LINK_TRAINING_SUCCESS;
929 935
930 char *link_rate = "Unknown"; 936 char *link_rate = "Unknown";
937 char *lt_result = "Unknown";
938
931 struct link_training_settings lt_settings; 939 struct link_training_settings lt_settings;
932 940
933 memset(&lt_settings, '\0', sizeof(lt_settings)); 941 memset(&lt_settings, '\0', sizeof(lt_settings));
@@ -951,22 +959,16 @@ enum link_training_result dc_link_dp_perform_link_training(
951 959
952 /* 2. perform link training (set link training done 960 /* 2. perform link training (set link training done
953 * to false is done as well)*/ 961 * to false is done as well)*/
954 if (!perform_clock_recovery_sequence(link, &lt_settings)) { 962 status = perform_clock_recovery_sequence(link, &lt_settings);
955 status = LINK_TRAINING_CR_FAIL; 963 if (status == LINK_TRAINING_SUCCESS) {
956 } else {
957 status = perform_channel_equalization_sequence(link, 964 status = perform_channel_equalization_sequence(link,
958 &lt_settings); 965 &lt_settings);
959 } 966 }
960 967
961 if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { 968 if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
962 if (!perform_link_training_int(link, 969 status = perform_link_training_int(link,
963 &lt_settings, 970 &lt_settings,
964 status == LINK_TRAINING_SUCCESS)) { 971 status);
965 /* the next link training setting in this case
966 * would be the same as CR failure case.
967 */
968 status = LINK_TRAINING_CR_FAIL;
969 }
970 } 972 }
971 973
972 /* 6. print status message*/ 974 /* 6. print status message*/
@@ -991,13 +993,37 @@ enum link_training_result dc_link_dp_perform_link_training(
991 break; 993 break;
992 } 994 }
993 995
996 switch (status) {
997 case LINK_TRAINING_SUCCESS:
998 lt_result = "pass";
999 break;
1000 case LINK_TRAINING_CR_FAIL_LANE0:
1001 lt_result = "CR failed lane0";
1002 break;
1003 case LINK_TRAINING_CR_FAIL_LANE1:
1004 lt_result = "CR failed lane1";
1005 break;
1006 case LINK_TRAINING_CR_FAIL_LANE23:
1007 lt_result = "CR failed lane23";
1008 break;
1009 case LINK_TRAINING_EQ_FAIL_CR:
1010 lt_result = "CR failed in EQ";
1011 break;
1012 case LINK_TRAINING_EQ_FAIL_EQ:
1013 lt_result = "EQ failed";
1014 break;
1015 case LINK_TRAINING_LQA_FAIL:
1016 lt_result = "LQA failed";
1017 break;
1018 default:
1019 break;
1020 }
1021
994 /* Connectivity log: link training */ 1022 /* Connectivity log: link training */
995 CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d", 1023 CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
996 link_rate, 1024 link_rate,
997 lt_settings.link_settings.lane_count, 1025 lt_settings.link_settings.lane_count,
998 (status == LINK_TRAINING_SUCCESS) ? "pass" : 1026 lt_result,
999 ((status == LINK_TRAINING_CR_FAIL) ? "CR failed" :
1000 "EQ failed"),
1001 lt_settings.lane_settings[0].VOLTAGE_SWING, 1027 lt_settings.lane_settings[0].VOLTAGE_SWING,
1002 lt_settings.lane_settings[0].PRE_EMPHASIS); 1028 lt_settings.lane_settings[0].PRE_EMPHASIS);
1003 1029
@@ -1115,6 +1141,7 @@ bool dp_hbr_verify_link_cap(
1115 dp_cs_id, 1141 dp_cs_id,
1116 cur); 1142 cur);
1117 1143
1144
1118 if (skip_link_training) 1145 if (skip_link_training)
1119 success = true; 1146 success = true;
1120 else { 1147 else {
@@ -1279,7 +1306,10 @@ static bool decide_fallback_link_setting(
1279 return false; 1306 return false;
1280 1307
1281 switch (training_result) { 1308 switch (training_result) {
1282 case LINK_TRAINING_CR_FAIL: 1309 case LINK_TRAINING_CR_FAIL_LANE0:
1310 case LINK_TRAINING_CR_FAIL_LANE1:
1311 case LINK_TRAINING_CR_FAIL_LANE23:
1312 case LINK_TRAINING_LQA_FAIL:
1283 { 1313 {
1284 if (!reached_minimum_link_rate 1314 if (!reached_minimum_link_rate
1285 (current_link_setting->link_rate)) { 1315 (current_link_setting->link_rate)) {
@@ -1290,8 +1320,18 @@ static bool decide_fallback_link_setting(
1290 (current_link_setting->lane_count)) { 1320 (current_link_setting->lane_count)) {
1291 current_link_setting->link_rate = 1321 current_link_setting->link_rate =
1292 initial_link_settings.link_rate; 1322 initial_link_settings.link_rate;
1293 current_link_setting->lane_count = 1323 if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
1294 reduce_lane_count( 1324 return false;
1325 else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
1326 current_link_setting->lane_count =
1327 LANE_COUNT_ONE;
1328 else if (training_result ==
1329 LINK_TRAINING_CR_FAIL_LANE23)
1330 current_link_setting->lane_count =
1331 LANE_COUNT_TWO;
1332 else
1333 current_link_setting->lane_count =
1334 reduce_lane_count(
1295 current_link_setting->lane_count); 1335 current_link_setting->lane_count);
1296 } else { 1336 } else {
1297 return false; 1337 return false;
@@ -1465,7 +1505,7 @@ void decide_link_settings(struct dc_stream_state *stream,
1465 /* MST doesn't perform link training for now 1505 /* MST doesn't perform link training for now
1466 * TODO: add MST specific link training routine 1506 * TODO: add MST specific link training routine
1467 */ 1507 */
1468 if (is_mst_supported(link)) { 1508 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1469 *link_setting = link->verified_link_cap; 1509 *link_setting = link->verified_link_cap;
1470 return; 1510 return;
1471 } 1511 }
@@ -1556,8 +1596,7 @@ static bool hpd_rx_irq_check_link_loss_status(
1556 if (sink_status_changed || 1596 if (sink_status_changed ||
1557 !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { 1597 !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
1558 1598
1559 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, 1599 DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
1560 "%s: Link Status changed.\n", __func__);
1561 1600
1562 return_code = true; 1601 return_code = true;
1563 1602
@@ -1570,8 +1609,7 @@ static bool hpd_rx_irq_check_link_loss_status(
1570 sizeof(irq_reg_rx_power_state)); 1609 sizeof(irq_reg_rx_power_state));
1571 1610
1572 if (dpcd_result != DC_OK) { 1611 if (dpcd_result != DC_OK) {
1573 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, 1612 DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
1574 "%s: DPCD read failed to obtain power state.\n",
1575 __func__); 1613 __func__);
1576 } else { 1614 } else {
1577 if (irq_reg_rx_power_state != DP_SET_POWER_D0) 1615 if (irq_reg_rx_power_state != DP_SET_POWER_D0)
@@ -1932,8 +1970,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
1932 * PSR and device auto test, refer to function handle_sst_hpd_irq 1970 * PSR and device auto test, refer to function handle_sst_hpd_irq
1933 * in DAL2.1*/ 1971 * in DAL2.1*/
1934 1972
1935 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, 1973 DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n",
1936 "%s: Got short pulse HPD on link %d\n",
1937 __func__, link->link_index); 1974 __func__, link->link_index);
1938 1975
1939 1976
@@ -1947,8 +1984,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
1947 *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data; 1984 *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
1948 1985
1949 if (result != DC_OK) { 1986 if (result != DC_OK) {
1950 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, 1987 DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n",
1951 "%s: DPCD read failed to obtain irq data\n",
1952 __func__); 1988 __func__);
1953 return false; 1989 return false;
1954 } 1990 }
@@ -1966,8 +2002,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
1966 } 2002 }
1967 2003
1968 if (!allow_hpd_rx_irq(link)) { 2004 if (!allow_hpd_rx_irq(link)) {
1969 dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, 2005 DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
1970 "%s: skipping HPD handling on %d\n",
1971 __func__, link->link_index); 2006 __func__, link->link_index);
1972 return false; 2007 return false;
1973 } 2008 }
@@ -2235,13 +2270,14 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
2235 link->wa_flags.dp_keep_receiver_powered = false; 2270 link->wa_flags.dp_keep_receiver_powered = false;
2236} 2271}
2237 2272
2238static void retrieve_link_cap(struct dc_link *link) 2273static bool retrieve_link_cap(struct dc_link *link)
2239{ 2274{
2240 uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1]; 2275 uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
2241 2276
2242 union down_stream_port_count down_strm_port_count; 2277 union down_stream_port_count down_strm_port_count;
2243 union edp_configuration_cap edp_config_cap; 2278 union edp_configuration_cap edp_config_cap;
2244 union dp_downstream_port_present ds_port = { 0 }; 2279 union dp_downstream_port_present ds_port = { 0 };
2280 enum dc_status status = DC_ERROR_UNEXPECTED;
2245 2281
2246 memset(dpcd_data, '\0', sizeof(dpcd_data)); 2282 memset(dpcd_data, '\0', sizeof(dpcd_data));
2247 memset(&down_strm_port_count, 2283 memset(&down_strm_port_count,
@@ -2249,11 +2285,16 @@ static void retrieve_link_cap(struct dc_link *link)
2249 memset(&edp_config_cap, '\0', 2285 memset(&edp_config_cap, '\0',
2250 sizeof(union edp_configuration_cap)); 2286 sizeof(union edp_configuration_cap));
2251 2287
2252 core_link_read_dpcd( 2288 status = core_link_read_dpcd(
2253 link, 2289 link,
2254 DP_DPCD_REV, 2290 DP_DPCD_REV,
2255 dpcd_data, 2291 dpcd_data,
2256 sizeof(dpcd_data)); 2292 sizeof(dpcd_data));
2293
2294 if (status != DC_OK) {
2295 dm_error("%s: Read dpcd data failed.\n", __func__);
2296 return false;
2297 }
2257 2298
2258 { 2299 {
2259 union training_aux_rd_interval aux_rd_interval; 2300 union training_aux_rd_interval aux_rd_interval;
@@ -2315,11 +2356,13 @@ static void retrieve_link_cap(struct dc_link *link)
2315 2356
2316 /* Connectivity log: detection */ 2357 /* Connectivity log: detection */
2317 CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); 2358 CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
2359
2360 return true;
2318} 2361}
2319 2362
2320void detect_dp_sink_caps(struct dc_link *link) 2363bool detect_dp_sink_caps(struct dc_link *link)
2321{ 2364{
2322 retrieve_link_cap(link); 2365 return retrieve_link_cap(link);
2323 2366
2324 /* dc init_hw has power encoder using default 2367 /* dc init_hw has power encoder using default
2325 * signal for connector. For native DP, no 2368 * signal for connector. For native DP, no
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 2096f2a179f2..7c866a7d5e77 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -102,7 +102,7 @@ void dp_enable_link_phy(
102 dp_receiver_power_ctrl(link, true); 102 dp_receiver_power_ctrl(link, true);
103} 103}
104 104
105static bool edp_receiver_ready_T9(struct dc_link *link) 105bool edp_receiver_ready_T9(struct dc_link *link)
106{ 106{
107 unsigned int tries = 0; 107 unsigned int tries = 0;
108 unsigned char sinkstatus = 0; 108 unsigned char sinkstatus = 0;
@@ -123,6 +123,28 @@ static bool edp_receiver_ready_T9(struct dc_link *link)
123 } while (++tries < 50); 123 } while (++tries < 50);
124 return result; 124 return result;
125} 125}
126bool edp_receiver_ready_T7(struct dc_link *link)
127{
128 unsigned int tries = 0;
129 unsigned char sinkstatus = 0;
130 unsigned char edpRev = 0;
131 enum dc_status result = DC_OK;
132
133 result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
134 if (result == DC_OK && edpRev < DP_EDP_12)
135 return true;
136 /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
137 do {
138 sinkstatus = 0;
139 result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
140 if (sinkstatus == 1)
141 break;
142 if (result != DC_OK)
143 break;
144 udelay(25); //MAx T7 is 50ms
145 } while (++tries < 300);
146 return result;
147}
126 148
127void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) 149void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
128{ 150{
@@ -130,7 +152,6 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
130 dp_receiver_power_ctrl(link, false); 152 dp_receiver_power_ctrl(link, false);
131 153
132 if (signal == SIGNAL_TYPE_EDP) { 154 if (signal == SIGNAL_TYPE_EDP) {
133 edp_receiver_ready_T9(link);
134 link->link_enc->funcs->disable_output(link->link_enc, signal); 155 link->link_enc->funcs->disable_output(link->link_enc, signal);
135 link->dc->hwss.edp_power_control(link, false); 156 link->dc->hwss.edp_power_control(link, false);
136 } else 157 } else
@@ -258,6 +279,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
258 279
259 for (i = 0; i < MAX_PIPES; i++) { 280 for (i = 0; i < MAX_PIPES; i++) {
260 if (pipes[i].stream != NULL && 281 if (pipes[i].stream != NULL &&
282 !pipes[i].top_pipe &&
261 pipes[i].stream->sink != NULL && 283 pipes[i].stream->sink != NULL &&
262 pipes[i].stream->sink->link != NULL && 284 pipes[i].stream->sink->link != NULL &&
263 pipes[i].stream_res.stream_enc != NULL && 285 pipes[i].stream_res.stream_enc != NULL &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 95b8dd0e53c6..ba3487e97361 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -35,6 +35,7 @@
35#include "core_types.h" 35#include "core_types.h"
36#include "set_mode_types.h" 36#include "set_mode_types.h"
37#include "virtual/virtual_stream_encoder.h" 37#include "virtual/virtual_stream_encoder.h"
38#include "dpcd_defs.h"
38 39
39#include "dce80/dce80_resource.h" 40#include "dce80/dce80_resource.h"
40#include "dce100/dce100_resource.h" 41#include "dce100/dce100_resource.h"
@@ -44,7 +45,8 @@
44#include "dcn10/dcn10_resource.h" 45#include "dcn10/dcn10_resource.h"
45#endif 46#endif
46#include "dce120/dce120_resource.h" 47#include "dce120/dce120_resource.h"
47 48#define DC_LOGGER \
49 ctx->logger
48enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) 50enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
49{ 51{
50 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 52 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -696,7 +698,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
696 698
697 699
698 /* Adjust for viewport end clip-off */ 700 /* Adjust for viewport end clip-off */
699 if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) { 701 if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
700 int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x; 702 int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
701 int int_part = dal_fixed31_32_floor( 703 int int_part = dal_fixed31_32_floor(
702 dal_fixed31_32_sub(data->inits.h, data->ratios.horz)); 704 dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
@@ -704,7 +706,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
704 int_part = int_part > 0 ? int_part : 0; 706 int_part = int_part > 0 ? int_part : 0;
705 data->viewport.width += int_part < vp_clip ? int_part : vp_clip; 707 data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
706 } 708 }
707 if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) { 709 if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
708 int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y; 710 int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
709 int int_part = dal_fixed31_32_floor( 711 int int_part = dal_fixed31_32_floor(
710 dal_fixed31_32_sub(data->inits.v, data->ratios.vert)); 712 dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
@@ -712,7 +714,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
712 int_part = int_part > 0 ? int_part : 0; 714 int_part = int_part > 0 ? int_part : 0;
713 data->viewport.height += int_part < vp_clip ? int_part : vp_clip; 715 data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
714 } 716 }
715 if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) { 717 if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
716 int vp_clip = (src.x + src.width) / vpc_div - 718 int vp_clip = (src.x + src.width) / vpc_div -
717 data->viewport_c.width - data->viewport_c.x; 719 data->viewport_c.width - data->viewport_c.x;
718 int int_part = dal_fixed31_32_floor( 720 int int_part = dal_fixed31_32_floor(
@@ -721,7 +723,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
721 int_part = int_part > 0 ? int_part : 0; 723 int_part = int_part > 0 ? int_part : 0;
722 data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip; 724 data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
723 } 725 }
724 if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) { 726 if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
725 int vp_clip = (src.y + src.height) / vpc_div - 727 int vp_clip = (src.y + src.height) / vpc_div -
726 data->viewport_c.height - data->viewport_c.y; 728 data->viewport_c.height - data->viewport_c.y;
727 int int_part = dal_fixed31_32_floor( 729 int int_part = dal_fixed31_32_floor(
@@ -833,7 +835,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
833 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 835 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
834 struct view recout_skip = { 0 }; 836 struct view recout_skip = { 0 };
835 bool res = false; 837 bool res = false;
836 838 struct dc_context *ctx = pipe_ctx->stream->ctx;
837 /* Important: scaling ratio calculation requires pixel format, 839 /* Important: scaling ratio calculation requires pixel format,
838 * lb depth calculation requires recout and taps require scaling ratios. 840 * lb depth calculation requires recout and taps require scaling ratios.
839 * Inits require viewport, taps, ratios and recout of split pipe 841 * Inits require viewport, taps, ratios and recout of split pipe
@@ -892,7 +894,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
892 /* May need to re-check lb size after this in some obscure scenario */ 894 /* May need to re-check lb size after this in some obscure scenario */
893 calculate_inits_and_adj_vp(pipe_ctx, &recout_skip); 895 calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
894 896
895 dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER, 897 DC_LOG_SCALER(
896 "%s: Viewport:\nheight:%d width:%d x:%d " 898 "%s: Viewport:\nheight:%d width:%d x:%d "
897 "y:%d\n dst_rect:\nheight:%d width:%d x:%d " 899 "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
898 "y:%d\n", 900 "y:%d\n",
@@ -1054,6 +1056,7 @@ static int acquire_first_split_pipe(
1054 pipe_ctx->plane_res.ipp = pool->ipps[i]; 1056 pipe_ctx->plane_res.ipp = pool->ipps[i];
1055 pipe_ctx->plane_res.dpp = pool->dpps[i]; 1057 pipe_ctx->plane_res.dpp = pool->dpps[i];
1056 pipe_ctx->stream_res.opp = pool->opps[i]; 1058 pipe_ctx->stream_res.opp = pool->opps[i];
1059 pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
1057 pipe_ctx->pipe_idx = i; 1060 pipe_ctx->pipe_idx = i;
1058 1061
1059 pipe_ctx->stream = stream; 1062 pipe_ctx->stream = stream;
@@ -1121,6 +1124,7 @@ bool dc_add_plane_to_context(
1121 ASSERT(tail_pipe); 1124 ASSERT(tail_pipe);
1122 1125
1123 free_pipe->stream_res.tg = tail_pipe->stream_res.tg; 1126 free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
1127 free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
1124 free_pipe->stream_res.opp = tail_pipe->stream_res.opp; 1128 free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
1125 free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; 1129 free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
1126 free_pipe->stream_res.audio = tail_pipe->stream_res.audio; 1130 free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
@@ -1360,9 +1364,6 @@ bool dc_is_stream_scaling_unchanged(
1360 return true; 1364 return true;
1361} 1365}
1362 1366
1363/* Maximum TMDS single link pixel clock 165MHz */
1364#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
1365
1366static void update_stream_engine_usage( 1367static void update_stream_engine_usage(
1367 struct resource_context *res_ctx, 1368 struct resource_context *res_ctx,
1368 const struct resource_pool *pool, 1369 const struct resource_pool *pool,
@@ -1409,6 +1410,8 @@ static int acquire_first_free_pipe(
1409 pipe_ctx->plane_res.xfm = pool->transforms[i]; 1410 pipe_ctx->plane_res.xfm = pool->transforms[i];
1410 pipe_ctx->plane_res.dpp = pool->dpps[i]; 1411 pipe_ctx->plane_res.dpp = pool->dpps[i];
1411 pipe_ctx->stream_res.opp = pool->opps[i]; 1412 pipe_ctx->stream_res.opp = pool->opps[i];
1413 if (pool->dpps[i])
1414 pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
1412 pipe_ctx->pipe_idx = i; 1415 pipe_ctx->pipe_idx = i;
1413 1416
1414 1417
@@ -1555,6 +1558,9 @@ enum dc_status dc_remove_stream_from_ctx(
1555 dc->res_pool, 1558 dc->res_pool,
1556 del_pipe->clock_source); 1559 del_pipe->clock_source);
1557 1560
1561 if (dc->res_pool->funcs->remove_stream_from_ctx)
1562 dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
1563
1558 memset(del_pipe, 0, sizeof(*del_pipe)); 1564 memset(del_pipe, 0, sizeof(*del_pipe));
1559 1565
1560 break; 1566 break;
@@ -1731,6 +1737,10 @@ enum dc_status resource_map_pool_resources(
1731 pipe_ctx->stream_res.audio, true); 1737 pipe_ctx->stream_res.audio, true);
1732 } 1738 }
1733 1739
1740 /* Add ABM to the resource if on EDP */
1741 if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
1742 pipe_ctx->stream_res.abm = pool->abm;
1743
1734 for (i = 0; i < context->stream_count; i++) 1744 for (i = 0; i < context->stream_count; i++)
1735 if (context->streams[i] == stream) { 1745 if (context->streams[i] == stream) {
1736 context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst; 1746 context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
@@ -2431,7 +2441,8 @@ static void set_vsc_info_packet(
2431 unsigned int vscPacketRevision = 0; 2441 unsigned int vscPacketRevision = 0;
2432 unsigned int i; 2442 unsigned int i;
2433 2443
2434 if (stream->sink->link->psr_enabled) { 2444 /*VSC packet set to 2 when DP revision >= 1.2*/
2445 if (stream->psr_version != 0) {
2435 vscPacketRevision = 2; 2446 vscPacketRevision = 2;
2436 } 2447 }
2437 2448
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 261811e0c094..ce0747ed0f00 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,8 +33,7 @@
33/******************************************************************************* 33/*******************************************************************************
34 * Private functions 34 * Private functions
35 ******************************************************************************/ 35 ******************************************************************************/
36#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000 36void update_stream_signal(struct dc_stream_state *stream)
37static void update_stream_signal(struct dc_stream_state *stream)
38{ 37{
39 38
40 struct dc_sink *dc_sink = stream->sink; 39 struct dc_sink *dc_sink = stream->sink;
@@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream)
45 stream->signal = dc_sink->sink_signal; 44 stream->signal = dc_sink->sink_signal;
46 45
47 if (dc_is_dvi_signal(stream->signal)) { 46 if (dc_is_dvi_signal(stream->signal)) {
48 if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST && 47 if (stream->ctx->dc->caps.dual_link_dvi &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) 48 stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; 50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
51 else 51 else
52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -193,44 +193,19 @@ bool dc_stream_set_cursor_attributes(
193 193
194 core_dc = stream->ctx->dc; 194 core_dc = stream->ctx->dc;
195 res_ctx = &core_dc->current_state->res_ctx; 195 res_ctx = &core_dc->current_state->res_ctx;
196 stream->cursor_attributes = *attributes;
196 197
197 for (i = 0; i < MAX_PIPES; i++) { 198 for (i = 0; i < MAX_PIPES; i++) {
198 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
199 200
200 if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) 201 if (pipe_ctx->stream != stream)
201 continue; 202 continue;
202 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) 203 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
203 continue; 204 continue;
204 205
205 206
206 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL) 207 core_dc->hwss.set_cursor_attribute(pipe_ctx);
207 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
208 pipe_ctx->plane_res.ipp, attributes);
209
210 if (pipe_ctx->plane_res.hubp != NULL &&
211 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
212 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
213 pipe_ctx->plane_res.hubp, attributes);
214
215 if (pipe_ctx->plane_res.mi != NULL &&
216 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
217 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
218 pipe_ctx->plane_res.mi, attributes);
219
220
221 if (pipe_ctx->plane_res.xfm != NULL &&
222 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
223 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
224 pipe_ctx->plane_res.xfm, attributes);
225
226 if (pipe_ctx->plane_res.dpp != NULL &&
227 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
228 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
229 pipe_ctx->plane_res.dpp, attributes->color_format);
230 } 208 }
231
232 stream->cursor_attributes = *attributes;
233
234 return true; 209 return true;
235} 210}
236 211
@@ -254,55 +229,21 @@ bool dc_stream_set_cursor_position(
254 229
255 core_dc = stream->ctx->dc; 230 core_dc = stream->ctx->dc;
256 res_ctx = &core_dc->current_state->res_ctx; 231 res_ctx = &core_dc->current_state->res_ctx;
232 stream->cursor_position = *position;
257 233
258 for (i = 0; i < MAX_PIPES; i++) { 234 for (i = 0; i < MAX_PIPES; i++) {
259 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 235 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
260 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
261 struct mem_input *mi = pipe_ctx->plane_res.mi;
262 struct hubp *hubp = pipe_ctx->plane_res.hubp;
263 struct dpp *dpp = pipe_ctx->plane_res.dpp;
264 struct dc_cursor_position pos_cpy = *position;
265 struct dc_cursor_mi_param param = {
266 .pixel_clk_khz = stream->timing.pix_clk_khz,
267 .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
268 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
269 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
270 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
271 };
272 236
273 if (pipe_ctx->stream != stream || 237 if (pipe_ctx->stream != stream ||
274 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || 238 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
275 !pipe_ctx->plane_state || 239 !pipe_ctx->plane_state ||
276 (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) 240 (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
277 continue; 241 !pipe_ctx->plane_res.ipp)
278
279 if (pipe_ctx->plane_state->address.type
280 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
281 pos_cpy.enable = false;
282
283 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
284 pos_cpy.enable = false;
285
286
287 if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
288 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
289
290 if (mi != NULL && mi->funcs->set_cursor_position != NULL)
291 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
292
293 if (!hubp)
294 continue; 242 continue;
295 243
296 if (hubp->funcs->set_cursor_position != NULL) 244 core_dc->hwss.set_cursor_position(pipe_ctx);
297 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
298
299 if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
300 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
301
302 } 245 }
303 246
304 stream->cursor_position = *position;
305
306 return true; 247 return true;
307} 248}
308 249
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e2e3c9df79ea..fa4b3c8b3bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.27" 41#define DC_VER "3.1.38"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
@@ -48,6 +48,18 @@
48/******************************************************************************* 48/*******************************************************************************
49 * Display Core Interfaces 49 * Display Core Interfaces
50 ******************************************************************************/ 50 ******************************************************************************/
51struct dmcu_version {
52 unsigned int date;
53 unsigned int month;
54 unsigned int year;
55 unsigned int interface_version;
56};
57
58struct dc_versions {
59 const char *dc_ver;
60 struct dmcu_version dmcu_version;
61};
62
51struct dc_caps { 63struct dc_caps {
52 uint32_t max_streams; 64 uint32_t max_streams;
53 uint32_t max_links; 65 uint32_t max_links;
@@ -62,6 +74,7 @@ struct dc_caps {
62 bool dcc_const_color; 74 bool dcc_const_color;
63 bool dynamic_audio; 75 bool dynamic_audio;
64 bool is_apu; 76 bool is_apu;
77 bool dual_link_dvi;
65}; 78};
66 79
67struct dc_dcc_surface_param { 80struct dc_dcc_surface_param {
@@ -94,6 +107,7 @@ struct dc_surface_dcc_cap {
94}; 107};
95 108
96struct dc_static_screen_events { 109struct dc_static_screen_events {
110 bool force_trigger;
97 bool cursor_update; 111 bool cursor_update;
98 bool surface_update; 112 bool surface_update;
99 bool overlay_update; 113 bool overlay_update;
@@ -170,6 +184,16 @@ enum wm_report_mode {
170 WM_REPORT_OVERRIDE = 1, 184 WM_REPORT_OVERRIDE = 1,
171}; 185};
172 186
187struct dc_clocks {
188 int dispclk_khz;
189 int max_supported_dppclk_khz;
190 int dppclk_khz;
191 int dcfclk_khz;
192 int socclk_khz;
193 int dcfclk_deep_sleep_khz;
194 int fclk_khz;
195};
196
173struct dc_debug { 197struct dc_debug {
174 bool surface_visual_confirm; 198 bool surface_visual_confirm;
175 bool sanity_checks; 199 bool sanity_checks;
@@ -211,11 +235,15 @@ struct dc_debug {
211 bool disable_stereo_support; 235 bool disable_stereo_support;
212 bool vsr_support; 236 bool vsr_support;
213 bool performance_trace; 237 bool performance_trace;
238 bool az_endpoint_mute_only;
239 bool always_use_regamma;
240 bool p010_mpo_support;
214}; 241};
215struct dc_state; 242struct dc_state;
216struct resource_pool; 243struct resource_pool;
217struct dce_hwseq; 244struct dce_hwseq;
218struct dc { 245struct dc {
246 struct dc_versions versions;
219 struct dc_caps caps; 247 struct dc_caps caps;
220 struct dc_cap_funcs cap_funcs; 248 struct dc_cap_funcs cap_funcs;
221 struct dc_config config; 249 struct dc_config config;
@@ -252,6 +280,8 @@ struct dc {
252 280
253 bool optimized_required; 281 bool optimized_required;
254 282
283 bool apply_edp_fast_boot_optimization;
284
255 /* FBC compressor */ 285 /* FBC compressor */
256#if defined(CONFIG_DRM_AMD_DC_FBC) 286#if defined(CONFIG_DRM_AMD_DC_FBC)
257 struct compressor *fbc_compressor; 287 struct compressor *fbc_compressor;
@@ -288,9 +318,6 @@ struct dc_init_data {
288 318
289 struct dc_config flags; 319 struct dc_config flags;
290 uint32_t log_mask; 320 uint32_t log_mask;
291#if defined(CONFIG_DRM_AMD_DC_FBC)
292 uint64_t fbc_gpu_addr;
293#endif
294}; 321};
295 322
296struct dc *dc_create(const struct dc_init_data *init_params); 323struct dc *dc_create(const struct dc_init_data *init_params);
@@ -369,6 +396,8 @@ struct dc_transfer_func {
369 struct dc_transfer_func_distributed_points tf_pts; 396 struct dc_transfer_func_distributed_points tf_pts;
370 enum dc_transfer_func_type type; 397 enum dc_transfer_func_type type;
371 enum dc_transfer_func_predefined tf; 398 enum dc_transfer_func_predefined tf;
399 /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
400 uint32_t sdr_ref_white_level;
372 struct dc_context *ctx; 401 struct dc_context *ctx;
373}; 402};
374 403
@@ -397,12 +426,15 @@ union surface_update_flags {
397 uint32_t swizzle_change:1; 426 uint32_t swizzle_change:1;
398 uint32_t scaling_change:1; 427 uint32_t scaling_change:1;
399 uint32_t position_change:1; 428 uint32_t position_change:1;
400 uint32_t in_transfer_func:1; 429 uint32_t in_transfer_func_change:1;
401 uint32_t input_csc_change:1; 430 uint32_t input_csc_change:1;
431 uint32_t output_tf_change:1;
432 uint32_t pixel_format_change:1;
402 433
403 /* Full updates */ 434 /* Full updates */
404 uint32_t new_plane:1; 435 uint32_t new_plane:1;
405 uint32_t bpp_change:1; 436 uint32_t bpp_change:1;
437 uint32_t gamma_change:1;
406 uint32_t bandwidth_change:1; 438 uint32_t bandwidth_change:1;
407 uint32_t clock_change:1; 439 uint32_t clock_change:1;
408 uint32_t stereo_format_change:1; 440 uint32_t stereo_format_change:1;
@@ -414,6 +446,7 @@ union surface_update_flags {
414 446
415struct dc_plane_state { 447struct dc_plane_state {
416 struct dc_plane_address address; 448 struct dc_plane_address address;
449 struct dc_plane_flip_time time;
417 struct scaling_taps scaling_quality; 450 struct scaling_taps scaling_quality;
418 struct rect src_rect; 451 struct rect src_rect;
419 struct rect dst_rect; 452 struct rect dst_rect;
@@ -429,6 +462,7 @@ struct dc_plane_state {
429 struct dc_bias_and_scale *bias_and_scale; 462 struct dc_bias_and_scale *bias_and_scale;
430 struct csc_transform input_csc_color_matrix; 463 struct csc_transform input_csc_color_matrix;
431 struct fixed31_32 coeff_reduction_factor; 464 struct fixed31_32 coeff_reduction_factor;
465 uint32_t sdr_white_level;
432 466
433 // TODO: No longer used, remove 467 // TODO: No longer used, remove
434 struct dc_hdr_static_metadata hdr_static_ctx; 468 struct dc_hdr_static_metadata hdr_static_ctx;
@@ -465,6 +499,7 @@ struct dc_plane_info {
465 enum plane_stereo_format stereo_format; 499 enum plane_stereo_format stereo_format;
466 enum dc_color_space color_space; 500 enum dc_color_space color_space;
467 enum color_transfer_func input_tf; 501 enum color_transfer_func input_tf;
502 unsigned int sdr_white_level;
468 bool horizontal_mirror; 503 bool horizontal_mirror;
469 bool visible; 504 bool visible;
470 bool per_pixel_alpha; 505 bool per_pixel_alpha;
@@ -489,10 +524,8 @@ struct dc_surface_update {
489 /* following updates require alloc/sleep/spin that is not isr safe, 524 /* following updates require alloc/sleep/spin that is not isr safe,
490 * null means no updates 525 * null means no updates
491 */ 526 */
492 /* gamma TO BE REMOVED */
493 struct dc_gamma *gamma; 527 struct dc_gamma *gamma;
494 enum color_transfer_func color_input_tf; 528 enum color_transfer_func color_input_tf;
495 enum color_transfer_func color_output_tf;
496 struct dc_transfer_func *in_transfer_func; 529 struct dc_transfer_func *in_transfer_func;
497 530
498 struct csc_transform *input_csc_color_matrix; 531 struct csc_transform *input_csc_color_matrix;
@@ -524,6 +557,7 @@ struct dc_transfer_func *dc_create_transfer_func(void);
524 */ 557 */
525struct dc_flip_addrs { 558struct dc_flip_addrs {
526 struct dc_plane_address address; 559 struct dc_plane_address address;
560 unsigned int flip_timestamp_in_us;
527 bool flip_immediate; 561 bool flip_immediate;
528 /* TODO: add flip duration for FreeSync */ 562 /* TODO: add flip duration for FreeSync */
529}; 563};
@@ -672,7 +706,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
672 struct dc *dc, 706 struct dc *dc,
673 uint32_t src_id, 707 uint32_t src_id,
674 uint32_t ext_id); 708 uint32_t ext_id);
675void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); 709bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
676void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); 710void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
677enum dc_irq_source dc_get_hpd_irq_source_at_index( 711enum dc_irq_source dc_get_hpd_irq_source_at_index(
678 struct dc *dc, uint32_t link_index); 712 struct dc *dc, uint32_t link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 273d80a4ebce..d9b84ec7954c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -111,6 +111,8 @@ struct dc_vbios_funcs {
111 struct dc_bios *bios); 111 struct dc_bios *bios);
112 bool (*is_accelerated_mode)( 112 bool (*is_accelerated_mode)(
113 struct dc_bios *bios); 113 struct dc_bios *bios);
114 uint32_t (*get_vga_enabled_displays)(
115 struct dc_bios *bios);
114 void (*get_bios_event_info)( 116 void (*get_bios_event_info)(
115 struct dc_bios *bios, 117 struct dc_bios *bios,
116 struct bios_event_info *info); 118 struct bios_event_info *info);
@@ -199,6 +201,7 @@ struct dc_vbios_funcs {
199}; 201};
200 202
201struct bios_registers { 203struct bios_registers {
204 uint32_t BIOS_SCRATCH_3;
202 uint32_t BIOS_SCRATCH_6; 205 uint32_t BIOS_SCRATCH_6;
203}; 206};
204 207
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 03029f72dc3f..b83a7dc2f5a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -413,12 +413,14 @@ struct dc_cursor_mi_param {
413enum { 413enum {
414 GAMMA_RGB_256_ENTRIES = 256, 414 GAMMA_RGB_256_ENTRIES = 256,
415 GAMMA_RGB_FLOAT_1024_ENTRIES = 1024, 415 GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
416 GAMMA_MAX_ENTRIES = 1024 416 GAMMA_CS_TFM_1D_ENTRIES = 4096,
417 GAMMA_MAX_ENTRIES = 4096
417}; 418};
418 419
419enum dc_gamma_type { 420enum dc_gamma_type {
420 GAMMA_RGB_256 = 1, 421 GAMMA_RGB_256 = 1,
421 GAMMA_RGB_FLOAT_1024 = 2 422 GAMMA_RGB_FLOAT_1024 = 2,
423 GAMMA_CS_TFM_1D = 3,
422}; 424};
423 425
424struct dc_gamma { 426struct dc_gamma {
@@ -434,6 +436,8 @@ struct dc_gamma {
434 436
435 /* private to DC core */ 437 /* private to DC core */
436 struct dc_context *ctx; 438 struct dc_context *ctx;
439
440 bool is_identity;
437}; 441};
438 442
439/* Used by both ipp amd opp functions*/ 443/* Used by both ipp amd opp functions*/
@@ -688,8 +692,18 @@ struct crtc_trigger_info {
688 enum trigger_delay delay; 692 enum trigger_delay delay;
689}; 693};
690 694
691struct dc_crtc_timing { 695enum vrr_state {
696 VRR_STATE_OFF = 0,
697 VRR_STATE_VARIABLE,
698 VRR_STATE_FIXED,
699};
692 700
701struct dc_crtc_timing_adjust {
702 uint32_t v_total_min;
703 uint32_t v_total_max;
704};
705
706struct dc_crtc_timing {
693 uint32_t h_total; 707 uint32_t h_total;
694 uint32_t h_border_left; 708 uint32_t h_border_left;
695 uint32_t h_addressable; 709 uint32_t h_addressable;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index f11a734da1db..fb4d9eafdc6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -64,6 +64,8 @@ struct dc_link {
64 enum signal_type connector_signal; 64 enum signal_type connector_signal;
65 enum dc_irq_source irq_source_hpd; 65 enum dc_irq_source irq_source_hpd;
66 enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */ 66 enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
67 bool is_hpd_filter_disabled;
68
67 /* caps is the same as reported_link_cap. link_traing use 69 /* caps is the same as reported_link_cap. link_traing use
68 * reported_link_cap. Will clean up. TODO 70 * reported_link_cap. Will clean up. TODO
69 */ 71 */
@@ -195,6 +197,8 @@ bool dc_link_dp_set_test_pattern(
195 const unsigned char *p_custom_pattern, 197 const unsigned char *p_custom_pattern,
196 unsigned int cust_pattern_size); 198 unsigned int cust_pattern_size);
197 199
200void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
201
198/* 202/*
199 * DPCD access interfaces 203 * DPCD access interfaces
200 */ 204 */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 01c60f11b2bd..d017df56b2ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -48,6 +48,8 @@ struct dc_stream_status {
48struct dc_stream_state { 48struct dc_stream_state {
49 struct dc_sink *sink; 49 struct dc_sink *sink;
50 struct dc_crtc_timing timing; 50 struct dc_crtc_timing timing;
51 struct dc_crtc_timing_adjust timing_adjust;
52 struct vrr_params vrr_params;
51 53
52 struct rect src; /* composition area */ 54 struct rect src; /* composition area */
53 struct rect dst; /* stream addressable area */ 55 struct rect dst; /* stream addressable area */
@@ -65,13 +67,19 @@ struct dc_stream_state {
65 enum dc_dither_option dither_option; 67 enum dc_dither_option dither_option;
66 68
67 enum view_3d_format view_format; 69 enum view_3d_format view_format;
70 enum color_transfer_func output_tf;
68 71
69 bool ignore_msa_timing_param; 72 bool ignore_msa_timing_param;
70 /* TODO: custom INFO packets */ 73 /* TODO: custom INFO packets */
71 /* TODO: ABM info (DMCU) */ 74 /* TODO: ABM info (DMCU) */
72 /* TODO: PSR info */ 75 /* PSR info */
76 unsigned char psr_version;
73 /* TODO: CEA VIC */ 77 /* TODO: CEA VIC */
74 78
79 /* DMCU info */
80 unsigned int abm_level;
81 unsigned int bl_pwm_level;
82
75 /* from core_stream struct */ 83 /* from core_stream struct */
76 struct dc_context *ctx; 84 struct dc_context *ctx;
77 85
@@ -103,6 +111,8 @@ struct dc_stream_update {
103 struct rect dst; 111 struct rect dst;
104 struct dc_transfer_func *out_transfer_func; 112 struct dc_transfer_func *out_transfer_func;
105 struct dc_hdr_static_metadata *hdr_static_metadata; 113 struct dc_hdr_static_metadata *hdr_static_metadata;
114 enum color_transfer_func color_output_tf;
115 unsigned int *abm_level;
106}; 116};
107 117
108bool dc_is_stream_unchanged( 118bool dc_is_stream_unchanged(
@@ -152,7 +162,7 @@ struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
152uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream); 162uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
153 163
154/* TODO: Return parsed values rather than direct register read 164/* TODO: Return parsed values rather than direct register read
155 * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos) 165 * This has a dependency on the caller (amdgpu_display_get_crtc_scanoutpos)
156 * being refactored properly to be dce-specific 166 * being refactored properly to be dce-specific
157 */ 167 */
158bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, 168bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
@@ -237,6 +247,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
237 */ 247 */
238struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); 248struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
239 249
250void update_stream_signal(struct dc_stream_state *stream);
251
240void dc_stream_retain(struct dc_stream_state *dc_stream); 252void dc_stream_retain(struct dc_stream_state *dc_stream);
241void dc_stream_release(struct dc_stream_state *dc_stream); 253void dc_stream_release(struct dc_stream_state *dc_stream);
242 254
@@ -267,11 +279,25 @@ bool dc_stream_get_crtc_position(struct dc *dc,
267 unsigned int *v_pos, 279 unsigned int *v_pos,
268 unsigned int *nom_v_pos); 280 unsigned int *nom_v_pos);
269 281
282bool dc_stream_configure_crc(struct dc *dc,
283 struct dc_stream_state *stream,
284 bool enable,
285 bool continuous);
286
287bool dc_stream_get_crc(struct dc *dc,
288 struct dc_stream_state *stream,
289 uint32_t *r_cr,
290 uint32_t *g_y,
291 uint32_t *b_cb);
292
270void dc_stream_set_static_screen_events(struct dc *dc, 293void dc_stream_set_static_screen_events(struct dc *dc,
271 struct dc_stream_state **stream, 294 struct dc_stream_state **stream,
272 int num_streams, 295 int num_streams,
273 const struct dc_static_screen_events *events); 296 const struct dc_static_screen_events *events);
274 297
298void dc_stream_set_dither_option(struct dc_stream_state *stream,
299 enum dc_dither_option option);
300
275 301
276bool dc_stream_adjust_vmin_vmax(struct dc *dc, 302bool dc_stream_adjust_vmin_vmax(struct dc *dc,
277 struct dc_stream_state **stream, 303 struct dc_stream_state **stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9faddfae241d..9441305d3ab5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -193,6 +193,10 @@ union display_content_support {
193 } bits; 193 } bits;
194}; 194};
195 195
196struct dc_panel_patch {
197 unsigned int dppowerup_delay;
198};
199
196struct dc_edid_caps { 200struct dc_edid_caps {
197 /* sink identification */ 201 /* sink identification */
198 uint16_t manufacturer_id; 202 uint16_t manufacturer_id;
@@ -219,6 +223,8 @@ struct dc_edid_caps {
219 223
220 bool edid_hdmi; 224 bool edid_hdmi;
221 bool hdr_supported; 225 bool hdr_supported;
226
227 struct dc_panel_patch panel_patch;
222}; 228};
223 229
224struct view { 230struct view {
@@ -515,6 +521,24 @@ struct audio_info {
515 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT]; 521 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
516}; 522};
517 523
524struct vrr_params {
525 enum vrr_state state;
526 uint32_t window_min;
527 uint32_t window_max;
528 uint32_t inserted_frame_duration_in_us;
529 uint32_t frames_to_insert;
530 uint32_t frame_counter;
531};
532
533#define DC_PLANE_UPDATE_TIMES_MAX 10
534
535struct dc_plane_flip_time {
536 unsigned int time_elapsed_in_us[DC_PLANE_UPDATE_TIMES_MAX];
537 unsigned int index;
538 unsigned int prev_update_time_in_us;
539};
540
541// Will combine with vrr_params at some point.
518struct freesync_context { 542struct freesync_context {
519 bool supported; 543 bool supported;
520 bool enabled; 544 bool enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index b48190f54907..fe92a1222803 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -42,6 +42,8 @@
42#define FN(reg_name, field_name) \ 42#define FN(reg_name, field_name) \
43 abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name 43 abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name
44 44
45#define DC_LOGGER \
46 abm->ctx->logger
45#define CTX \ 47#define CTX \
46 abm_dce->base.ctx 48 abm_dce->base.ctx
47 49
@@ -323,6 +325,15 @@ static bool dce_abm_immediate_disable(struct abm *abm)
323 /* notifyDMCUMsg */ 325 /* notifyDMCUMsg */
324 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); 326 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
325 327
328 abm->stored_backlight_registers.BL_PWM_CNTL =
329 REG_READ(BL_PWM_CNTL);
330 abm->stored_backlight_registers.BL_PWM_CNTL2 =
331 REG_READ(BL_PWM_CNTL2);
332 abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
333 REG_READ(BL_PWM_PERIOD_CNTL);
334
335 REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
336 &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
326 return true; 337 return true;
327} 338}
328 339
@@ -394,8 +405,7 @@ static bool dce_abm_set_backlight_level(
394{ 405{
395 struct dce_abm *abm_dce = TO_DCE_ABM(abm); 406 struct dce_abm *abm_dce = TO_DCE_ABM(abm);
396 407
397 dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT, 408 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
398 "New Backlight level: %d (0x%X)\n",
399 backlight_level, backlight_level); 409 backlight_level, backlight_level);
400 410
401 /* If DMCU is in reset state, DMCU is uninitialized */ 411 /* If DMCU is in reset state, DMCU is uninitialized */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 0df9ecb2710c..6d5cdcdc8ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -33,6 +33,8 @@
33 33
34#define CTX \ 34#define CTX \
35 aud->base.ctx 35 aud->base.ctx
36#define DC_LOGGER \
37 aud->base.ctx->logger
36#define REG(reg)\ 38#define REG(reg)\
37 (aud->regs->reg) 39 (aud->regs->reg)
38 40
@@ -63,8 +65,7 @@ static void write_indirect_azalia_reg(struct audio *audio,
63 REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0, 65 REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
64 AZALIA_ENDPOINT_REG_DATA, reg_data); 66 AZALIA_ENDPOINT_REG_DATA, reg_data);
65 67
66 dm_logger_write(CTX->logger, LOG_HW_AUDIO, 68 DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
67 "AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
68 reg_index, reg_data); 69 reg_index, reg_data);
69} 70}
70 71
@@ -81,8 +82,7 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index
81 /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ 82 /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
82 value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA); 83 value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
83 84
84 dm_logger_write(CTX->logger, LOG_HW_AUDIO, 85 DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
85 "AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
86 reg_index, value); 86 reg_index, value);
87 87
88 return value; 88 return value;
@@ -359,10 +359,12 @@ void dce_aud_az_enable(struct audio *audio)
359 AUDIO_ENABLED); 359 AUDIO_ENABLED);
360 360
361 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); 361 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
362 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); 362 set_reg_field_value(value, 0,
363 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
364 CLOCK_GATING_DISABLE);
365 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
363 366
364 dm_logger_write(CTX->logger, LOG_HW_AUDIO, 367 DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
365 "\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
366 audio->inst, value); 368 audio->inst, value);
367} 369}
368 370
@@ -372,6 +374,10 @@ void dce_aud_az_disable(struct audio *audio)
372 struct dce_audio *aud = DCE_AUD(audio); 374 struct dce_audio *aud = DCE_AUD(audio);
373 375
374 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); 376 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
377 set_reg_field_value(value, 1,
378 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
379 CLOCK_GATING_DISABLE);
380 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
375 381
376 set_reg_field_value(value, 0, 382 set_reg_field_value(value, 0,
377 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 383 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
@@ -383,8 +389,7 @@ void dce_aud_az_disable(struct audio *audio)
383 CLOCK_GATING_DISABLE); 389 CLOCK_GATING_DISABLE);
384 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); 390 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
385 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); 391 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
386 dm_logger_write(CTX->logger, LOG_HW_AUDIO, 392 DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
387 "\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
388 audio->inst, value); 393 audio->inst, value);
389} 394}
390 395
@@ -716,6 +721,11 @@ void dce_aud_az_configure(
716 DESCRIPTION17); 721 DESCRIPTION17);
717 722
718 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value); 723 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
724 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
725 set_reg_field_value(value, 0,
726 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
727 CLOCK_GATING_DISABLE);
728 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
719} 729}
720 730
721/* 731/*
@@ -783,8 +793,7 @@ void dce_aud_wall_dto_setup(
783 crtc_info->calculated_pixel_clock, 793 crtc_info->calculated_pixel_clock,
784 &clock_info); 794 &clock_info);
785 795
786 dm_logger_write(audio->ctx->logger, LOG_HW_AUDIO,\ 796 DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock = %d"\
787 "\n%s:Input::requested_pixel_clock = %d"\
788 "calculated_pixel_clock =%d\n"\ 797 "calculated_pixel_clock =%d\n"\
789 "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\ 798 "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
790 crtc_info->requested_pixel_clock,\ 799 crtc_info->requested_pixel_clock,\
@@ -897,6 +906,10 @@ void dce_aud_hw_init(
897 REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, 906 REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
898 CLKSTOP, 1, 907 CLKSTOP, 1,
899 EPSS, 1); 908 EPSS, 1);
909 set_reg_field_value(value, 0,
910 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
911 CLOCK_GATING_DISABLE);
912 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
900} 913}
901 914
902static const struct audio_funcs funcs = { 915static const struct audio_funcs funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 31280d252753..0aa2cda60890 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -41,7 +41,8 @@
41 41
42#define CTX \ 42#define CTX \
43 clk_src->base.ctx 43 clk_src->base.ctx
44 44#define DC_LOGGER \
45 calc_pll_cs->ctx->logger
45#undef FN 46#undef FN
46#define FN(reg_name, field_name) \ 47#define FN(reg_name, field_name) \
47 clk_src->cs_shift->field_name, clk_src->cs_mask->field_name 48 clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
@@ -288,7 +289,7 @@ static uint32_t calculate_pixel_clock_pll_dividers(
288 uint32_t max_ref_divider; 289 uint32_t max_ref_divider;
289 290
290 if (pll_settings->adjusted_pix_clk == 0) { 291 if (pll_settings->adjusted_pix_clk == 0) {
291 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR, 292 DC_LOG_ERROR(
292 "%s Bad requested pixel clock", __func__); 293 "%s Bad requested pixel clock", __func__);
293 return MAX_PLL_CALC_ERROR; 294 return MAX_PLL_CALC_ERROR;
294 } 295 }
@@ -349,13 +350,13 @@ static uint32_t calculate_pixel_clock_pll_dividers(
349 * ## SVS Wed 15 Jul 2009 */ 350 * ## SVS Wed 15 Jul 2009 */
350 351
351 if (min_post_divider > max_post_divider) { 352 if (min_post_divider > max_post_divider) {
352 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR, 353 DC_LOG_ERROR(
353 "%s Post divider range is invalid", __func__); 354 "%s Post divider range is invalid", __func__);
354 return MAX_PLL_CALC_ERROR; 355 return MAX_PLL_CALC_ERROR;
355 } 356 }
356 357
357 if (min_ref_divider > max_ref_divider) { 358 if (min_ref_divider > max_ref_divider) {
358 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR, 359 DC_LOG_ERROR(
359 "%s Reference divider range is invalid", __func__); 360 "%s Reference divider range is invalid", __func__);
360 return MAX_PLL_CALC_ERROR; 361 return MAX_PLL_CALC_ERROR;
361 } 362 }
@@ -466,7 +467,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
466{ 467{
467 uint32_t field = 0; 468 uint32_t field = 0;
468 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; 469 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
469 470 struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
470 /* Check if reference clock is external (not pcie/xtalin) 471 /* Check if reference clock is external (not pcie/xtalin)
471 * HW Dce80 spec: 472 * HW Dce80 spec:
472 * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB 473 * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
@@ -493,7 +494,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
493 if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) { 494 if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
494 /* Should never happen, ASSERT and fill up values to be able 495 /* Should never happen, ASSERT and fill up values to be able
495 * to continue. */ 496 * to continue. */
496 dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR, 497 DC_LOG_ERROR(
497 "%s: Failed to adjust pixel clock!!", __func__); 498 "%s: Failed to adjust pixel clock!!", __func__);
498 pll_settings->actual_pix_clk = 499 pll_settings->actual_pix_clk =
499 pix_clk_params->requested_pix_clk; 500 pix_clk_params->requested_pix_clk;
@@ -556,11 +557,12 @@ static uint32_t dce110_get_pix_clk_dividers(
556 struct pll_settings *pll_settings) 557 struct pll_settings *pll_settings)
557{ 558{
558 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); 559 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
560 struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
559 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; 561 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
560 562
561 if (pix_clk_params == NULL || pll_settings == NULL 563 if (pix_clk_params == NULL || pll_settings == NULL
562 || pix_clk_params->requested_pix_clk == 0) { 564 || pix_clk_params->requested_pix_clk == 0) {
563 dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR, 565 DC_LOG_ERROR(
564 "%s: Invalid parameters!!\n", __func__); 566 "%s: Invalid parameters!!\n", __func__);
565 return pll_calc_error; 567 return pll_calc_error;
566 } 568 }
@@ -908,19 +910,9 @@ static bool dce110_program_pix_clk(
908#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 910#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
909 if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { 911 if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
910 unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; 912 unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
911 unsigned dp_dto_ref_kHz = 600000; 913 unsigned dp_dto_ref_kHz = 700000;
912 /* DPREF clock from FPGA TODO: Does FPGA have this value? */
913 unsigned clock_kHz = pll_settings->actual_pix_clk; 914 unsigned clock_kHz = pll_settings->actual_pix_clk;
914 915
915 /* For faster simulation, if mode pixe clock less than 290MHz,
916 * pixel clock can be hard coded to 290Mhz. For 4K mode, pixel clock
917 * is greater than 500Mhz, need real pixel clock
918 * clock_kHz = 290000;
919 */
920 /* TODO: un-hardcode when we can set display clock properly*/
921 /*clock_kHz = pix_clk_params->requested_pix_clk;*/
922 clock_kHz = 290000;
923
924 /* Set DTO values: phase = target clock, modulo = reference clock */ 916 /* Set DTO values: phase = target clock, modulo = reference clock */
925 REG_WRITE(PHASE[inst], clock_kHz); 917 REG_WRITE(PHASE[inst], clock_kHz);
926 REG_WRITE(MODULO[inst], dp_dto_ref_kHz); 918 REG_WRITE(MODULO[inst], dp_dto_ref_kHz);
@@ -1062,14 +1054,14 @@ static void get_ss_info_from_atombios(
1062 struct spread_spectrum_info *ss_info_cur; 1054 struct spread_spectrum_info *ss_info_cur;
1063 struct spread_spectrum_data *ss_data_cur; 1055 struct spread_spectrum_data *ss_data_cur;
1064 uint32_t i; 1056 uint32_t i;
1065 1057 struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
1066 if (ss_entries_num == NULL) { 1058 if (ss_entries_num == NULL) {
1067 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC, 1059 DC_LOG_SYNC(
1068 "Invalid entry !!!\n"); 1060 "Invalid entry !!!\n");
1069 return; 1061 return;
1070 } 1062 }
1071 if (spread_spectrum_data == NULL) { 1063 if (spread_spectrum_data == NULL) {
1072 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC, 1064 DC_LOG_SYNC(
1073 "Invalid array pointer!!!\n"); 1065 "Invalid array pointer!!!\n");
1074 return; 1066 return;
1075 } 1067 }
@@ -1114,7 +1106,7 @@ static void get_ss_info_from_atombios(
1114 ++i, ++ss_info_cur, ++ss_data_cur) { 1106 ++i, ++ss_info_cur, ++ss_data_cur) {
1115 1107
1116 if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) { 1108 if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
1117 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC, 1109 DC_LOG_SYNC(
1118 "Invalid ATOMBIOS SS Table!!!\n"); 1110 "Invalid ATOMBIOS SS Table!!!\n");
1119 goto out_free_data; 1111 goto out_free_data;
1120 } 1112 }
@@ -1124,9 +1116,9 @@ static void get_ss_info_from_atombios(
1124 if (as_signal == AS_SIGNAL_TYPE_HDMI 1116 if (as_signal == AS_SIGNAL_TYPE_HDMI
1125 && ss_info_cur->spread_spectrum_percentage > 6){ 1117 && ss_info_cur->spread_spectrum_percentage > 6){
1126 /* invalid input, do nothing */ 1118 /* invalid input, do nothing */
1127 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC, 1119 DC_LOG_SYNC(
1128 "Invalid SS percentage "); 1120 "Invalid SS percentage ");
1129 dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC, 1121 DC_LOG_SYNC(
1130 "for HDMI in ATOMBIOS info Table!!!\n"); 1122 "for HDMI in ATOMBIOS info Table!!!\n");
1131 continue; 1123 continue;
1132 } 1124 }
@@ -1238,12 +1230,12 @@ static bool calc_pll_max_vco_construct(
1238 if (init_data->num_fract_fb_divider_decimal_point == 0 || 1230 if (init_data->num_fract_fb_divider_decimal_point == 0 ||
1239 init_data->num_fract_fb_divider_decimal_point_precision > 1231 init_data->num_fract_fb_divider_decimal_point_precision >
1240 init_data->num_fract_fb_divider_decimal_point) { 1232 init_data->num_fract_fb_divider_decimal_point) {
1241 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR, 1233 DC_LOG_ERROR(
1242 "The dec point num or precision is incorrect!"); 1234 "The dec point num or precision is incorrect!");
1243 return false; 1235 return false;
1244 } 1236 }
1245 if (init_data->num_fract_fb_divider_decimal_point_precision == 0) { 1237 if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
1246 dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR, 1238 DC_LOG_ERROR(
1247 "Incorrect fract feedback divider precision num!"); 1239 "Incorrect fract feedback divider precision num!");
1248 return false; 1240 return false;
1249 } 1241 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 9e98a5f39a6d..78e6beb6cf26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -34,6 +34,7 @@
34#include "dcn_calcs.h" 34#include "dcn_calcs.h"
35#endif 35#endif
36#include "core_types.h" 36#include "core_types.h"
37#include "dc_types.h"
37 38
38 39
39#define TO_DCE_CLOCKS(clocks)\ 40#define TO_DCE_CLOCKS(clocks)\
@@ -48,6 +49,8 @@
48 49
49#define CTX \ 50#define CTX \
50 clk_dce->base.ctx 51 clk_dce->base.ctx
52#define DC_LOGGER \
53 clk->ctx->logger
51 54
52/* Max clock values for each state indexed by "enum clocks_state": */ 55/* Max clock values for each state indexed by "enum clocks_state": */
53static const struct state_dependent_clocks dce80_max_clks_by_state[] = { 56static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
@@ -291,8 +294,10 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
291 294
292 low_req_clk = i + 1; 295 low_req_clk = i + 1;
293 if (low_req_clk > clk->max_clks_state) { 296 if (low_req_clk > clk->max_clks_state) {
294 dm_logger_write(clk->ctx->logger, LOG_WARNING, 297 DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
295 "%s: clocks unsupported", __func__); 298 __func__,
299 req_clocks->display_clk_khz,
300 req_clocks->pixel_clk_khz);
296 low_req_clk = DM_PP_CLOCKS_STATE_INVALID; 301 low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
297 } 302 }
298 303
@@ -308,8 +313,7 @@ static bool dce_clock_set_min_clocks_state(
308 313
309 if (clocks_state > clk->max_clks_state) { 314 if (clocks_state > clk->max_clks_state) {
310 /*Requested state exceeds max supported state.*/ 315 /*Requested state exceeds max supported state.*/
311 dm_logger_write(clk->ctx->logger, LOG_WARNING, 316 DC_LOG_WARNING("Requested state exceeds max supported state");
312 "Requested state exceeds max supported state");
313 return false; 317 return false;
314 } else if (clocks_state == clk->cur_min_clks_state) { 318 } else if (clocks_state == clk->cur_min_clks_state) {
315 /*if we're trying to set the same state, we can just return 319 /*if we're trying to set the same state, we can just return
@@ -415,9 +419,12 @@ static int dce112_set_clock(
415 419
416 bp->funcs->set_dce_clock(bp, &dce_clk_params); 420 bp->funcs->set_dce_clock(bp, &dce_clk_params);
417 421
418 if (clk_dce->dfs_bypass_disp_clk != actual_clock) 422 if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
419 dmcu->funcs->set_psr_wait_loop(dmcu, 423 if (clk_dce->dfs_bypass_disp_clk != actual_clock)
420 actual_clock / 1000 / 7); 424 dmcu->funcs->set_psr_wait_loop(dmcu,
425 actual_clock / 1000 / 7);
426 }
427
421 clk_dce->dfs_bypass_disp_clk = actual_clock; 428 clk_dce->dfs_bypass_disp_clk = actual_clock;
422 return actual_clock; 429 return actual_clock;
423} 430}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index f663adb33584..2ee3d9bf1062 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -360,7 +360,7 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu)
360 dmcu->dmcu_version.year = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) | 360 dmcu->dmcu_version.year = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) |
361 REG_READ(DMCU_IRAM_RD_DATA)); 361 REG_READ(DMCU_IRAM_RD_DATA));
362 dmcu->dmcu_version.month = REG_READ(DMCU_IRAM_RD_DATA); 362 dmcu->dmcu_version.month = REG_READ(DMCU_IRAM_RD_DATA);
363 dmcu->dmcu_version.day = REG_READ(DMCU_IRAM_RD_DATA); 363 dmcu->dmcu_version.date = REG_READ(DMCU_IRAM_RD_DATA);
364 364
365 /* Disable write access to IRAM to allow dynamic sleep state */ 365 /* Disable write access to IRAM to allow dynamic sleep state */
366 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, 366 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
@@ -521,6 +521,9 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
521 if (dmcu->dmcu_state != DMCU_RUNNING) 521 if (dmcu->dmcu_state != DMCU_RUNNING)
522 return; 522 return;
523 523
524 dcn10_get_dmcu_psr_state(dmcu, &psr_state);
525 if (psr_state == 0 && !enable)
526 return;
524 /* waitDMCUReadyForCmd */ 527 /* waitDMCUReadyForCmd */
525 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 528 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
526 dmcu_wait_reg_ready_interval, 529 dmcu_wait_reg_ready_interval,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 1d4546f23135..c24c0e5ea44e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -46,6 +46,23 @@
46 SR(SMU_INTERRUPT_CONTROL), \ 46 SR(SMU_INTERRUPT_CONTROL), \
47 SR(DC_DMCU_SCRATCH) 47 SR(DC_DMCU_SCRATCH)
48 48
49#define DMCU_DCE80_REG_LIST() \
50 SR(DMCU_CTRL), \
51 SR(DMCU_STATUS), \
52 SR(DMCU_RAM_ACCESS_CTRL), \
53 SR(DMCU_IRAM_WR_CTRL), \
54 SR(DMCU_IRAM_WR_DATA), \
55 SR(MASTER_COMM_DATA_REG1), \
56 SR(MASTER_COMM_DATA_REG2), \
57 SR(MASTER_COMM_DATA_REG3), \
58 SR(MASTER_COMM_CMD_REG), \
59 SR(MASTER_COMM_CNTL_REG), \
60 SR(DMCU_IRAM_RD_CTRL), \
61 SR(DMCU_IRAM_RD_DATA), \
62 SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
63 SR(SMU_INTERRUPT_CONTROL), \
64 SR(DC_DMCU_SCRATCH)
65
49#define DMCU_DCE110_COMMON_REG_LIST() \ 66#define DMCU_DCE110_COMMON_REG_LIST() \
50 DMCU_COMMON_REG_LIST_DCE_BASE(), \ 67 DMCU_COMMON_REG_LIST_DCE_BASE(), \
51 SR(DCI_MEM_PWR_STATUS) 68 SR(DCI_MEM_PWR_STATUS)
@@ -83,6 +100,24 @@
83 STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \ 100 STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
84 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh) 101 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
85 102
103#define DMCU_MASK_SH_LIST_DCE80(mask_sh) \
104 DMCU_SF(DMCU_CTRL, \
105 DMCU_ENABLE, mask_sh), \
106 DMCU_SF(DMCU_STATUS, \
107 UC_IN_STOP_MODE, mask_sh), \
108 DMCU_SF(DMCU_STATUS, \
109 UC_IN_RESET, mask_sh), \
110 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
111 IRAM_HOST_ACCESS_EN, mask_sh), \
112 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
113 IRAM_WR_ADDR_AUTO_INC, mask_sh), \
114 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
115 IRAM_RD_ADDR_AUTO_INC, mask_sh), \
116 DMCU_SF(MASTER_COMM_CMD_REG, \
117 MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
118 DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
119 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
120
86#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \ 121#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
87 DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \ 122 DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
88 DMCU_SF(DCI_MEM_PWR_STATUS, \ 123 DMCU_SF(DCI_MEM_PWR_STATUS, \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index d2e66b1bc0ef..487724345d9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -56,7 +56,7 @@ void dce_pipe_control_lock(struct dc *dc,
56 if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg)) 56 if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
57 return; 57 return;
58 58
59 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx], 59 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
60 BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph, 60 BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
61 BLND_SCL_V_UPDATE_LOCK, &scl, 61 BLND_SCL_V_UPDATE_LOCK, &scl,
62 BLND_BLND_V_UPDATE_LOCK, &blnd, 62 BLND_BLND_V_UPDATE_LOCK, &blnd,
@@ -67,19 +67,19 @@ void dce_pipe_control_lock(struct dc *dc,
67 blnd = lock_val; 67 blnd = lock_val;
68 update_lock_mode = lock_val; 68 update_lock_mode = lock_val;
69 69
70 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val, 70 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
71 BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph, 71 BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
72 BLND_SCL_V_UPDATE_LOCK, scl); 72 BLND_SCL_V_UPDATE_LOCK, scl);
73 73
74 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0) 74 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
75 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val, 75 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
76 BLND_BLND_V_UPDATE_LOCK, blnd, 76 BLND_BLND_V_UPDATE_LOCK, blnd,
77 BLND_V_UPDATE_LOCK_MODE, update_lock_mode); 77 BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
78 78
79 if (hws->wa.blnd_crtc_trigger) { 79 if (hws->wa.blnd_crtc_trigger) {
80 if (!lock) { 80 if (!lock) {
81 uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]); 81 uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst]);
82 REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value); 82 REG_WRITE(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst], value);
83 } 83 }
84 } 84 }
85} 85}
@@ -197,9 +197,9 @@ void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
197} 197}
198 198
199/* Only use LUT for 8 bit formats */ 199/* Only use LUT for 8 bit formats */
200bool dce_use_lut(const struct dc_plane_state *plane_state) 200bool dce_use_lut(enum surface_pixel_format format)
201{ 201{
202 switch (plane_state->format) { 202 switch (format) {
203 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: 203 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
204 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: 204 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
205 return true; 205 return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index b73db9e78437..057b8afd74bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -140,42 +140,8 @@
140 BL_REG_LIST() 140 BL_REG_LIST()
141 141
142#define HWSEQ_DCN_REG_LIST()\ 142#define HWSEQ_DCN_REG_LIST()\
143 SRII(DCHUBP_CNTL, HUBP, 0), \
144 SRII(DCHUBP_CNTL, HUBP, 1), \
145 SRII(DCHUBP_CNTL, HUBP, 2), \
146 SRII(DCHUBP_CNTL, HUBP, 3), \
147 SRII(HUBP_CLK_CNTL, HUBP, 0), \
148 SRII(HUBP_CLK_CNTL, HUBP, 1), \
149 SRII(HUBP_CLK_CNTL, HUBP, 2), \
150 SRII(HUBP_CLK_CNTL, HUBP, 3), \
151 SRII(DPP_CONTROL, DPP_TOP, 0), \
152 SRII(DPP_CONTROL, DPP_TOP, 1), \
153 SRII(DPP_CONTROL, DPP_TOP, 2), \
154 SRII(DPP_CONTROL, DPP_TOP, 3), \
155 SRII(OPP_PIPE_CONTROL, OPP_PIPE, 0), \
156 SRII(OPP_PIPE_CONTROL, OPP_PIPE, 1), \
157 SRII(OPP_PIPE_CONTROL, OPP_PIPE, 2), \
158 SRII(OPP_PIPE_CONTROL, OPP_PIPE, 3), \
159 SR(REFCLK_CNTL), \ 143 SR(REFCLK_CNTL), \
160 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
161 SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
162 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
163 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
164 SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
165 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
166 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
167 SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
168 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
169 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
170 SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
171 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
172 SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
173 SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
174 SR(DCHUBBUB_ARB_SAT_LEVEL),\
175 SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
176 SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ 144 SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
177 SR(DCHUBBUB_TEST_DEBUG_INDEX), \
178 SR(DCHUBBUB_TEST_DEBUG_DATA), \
179 SR(DIO_MEM_PWR_CTRL), \ 145 SR(DIO_MEM_PWR_CTRL), \
180 SR(DCCG_GATE_DISABLE_CNTL), \ 146 SR(DCCG_GATE_DISABLE_CNTL), \
181 SR(DCCG_GATE_DISABLE_CNTL2), \ 147 SR(DCCG_GATE_DISABLE_CNTL2), \
@@ -195,22 +161,10 @@
195 MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\ 161 MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
196 MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR) 162 MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
197 163
198#define HWSEQ_SR_WATERMARK_REG_LIST()\
199 SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
200 SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
201 SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\
202 SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\
203 SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\
204 SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\
205 SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\
206 SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
207
208#define HWSEQ_DCN1_REG_LIST()\ 164#define HWSEQ_DCN1_REG_LIST()\
209 HWSEQ_DCN_REG_LIST(), \ 165 HWSEQ_DCN_REG_LIST(), \
210 HWSEQ_SR_WATERMARK_REG_LIST(), \
211 HWSEQ_PIXEL_RATE_REG_LIST(OTG), \ 166 HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
212 HWSEQ_PHYPLL_REG_LIST(OTG), \ 167 HWSEQ_PHYPLL_REG_LIST(OTG), \
213 SR(DCHUBBUB_SDPIF_FB_TOP),\
214 SR(DCHUBBUB_SDPIF_FB_BASE),\ 168 SR(DCHUBBUB_SDPIF_FB_BASE),\
215 SR(DCHUBBUB_SDPIF_FB_OFFSET),\ 169 SR(DCHUBBUB_SDPIF_FB_OFFSET),\
216 SR(DCHUBBUB_SDPIF_AGP_BASE),\ 170 SR(DCHUBBUB_SDPIF_AGP_BASE),\
@@ -236,6 +190,7 @@
236 SR(D2VGA_CONTROL), \ 190 SR(D2VGA_CONTROL), \
237 SR(D3VGA_CONTROL), \ 191 SR(D3VGA_CONTROL), \
238 SR(D4VGA_CONTROL), \ 192 SR(D4VGA_CONTROL), \
193 SR(VGA_TEST_CONTROL), \
239 SR(DC_IP_REQUEST_CNTL), \ 194 SR(DC_IP_REQUEST_CNTL), \
240 BL_REG_LIST() 195 BL_REG_LIST()
241 196
@@ -260,39 +215,9 @@ struct dce_hwseq_registers {
260 uint32_t DCHUB_AGP_BOT; 215 uint32_t DCHUB_AGP_BOT;
261 uint32_t DCHUB_AGP_TOP; 216 uint32_t DCHUB_AGP_TOP;
262 217
263 uint32_t DCHUBP_CNTL[4];
264 uint32_t HUBP_CLK_CNTL[4];
265 uint32_t DPP_CONTROL[4];
266 uint32_t OPP_PIPE_CONTROL[4];
267 uint32_t REFCLK_CNTL; 218 uint32_t REFCLK_CNTL;
268 uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A; 219
269 uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
270 uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;
271 uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;
272 uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;
273 uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;
274 uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B;
275 uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;
276 uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;
277 uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;
278 uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;
279 uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C;
280 uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;
281 uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;
282 uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;
283 uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;
284 uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D;
285 uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;
286 uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D;
287 uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;
288 uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL;
289 uint32_t DCHUBBUB_ARB_SAT_LEVEL;
290 uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND;
291 uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL; 220 uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL;
292 uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL;
293 uint32_t DCHUBBUB_TEST_DEBUG_INDEX;
294 uint32_t DCHUBBUB_TEST_DEBUG_DATA;
295 uint32_t DCHUBBUB_SDPIF_FB_TOP;
296 uint32_t DCHUBBUB_SDPIF_FB_BASE; 221 uint32_t DCHUBBUB_SDPIF_FB_BASE;
297 uint32_t DCHUBBUB_SDPIF_FB_OFFSET; 222 uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
298 uint32_t DCHUBBUB_SDPIF_AGP_BASE; 223 uint32_t DCHUBBUB_SDPIF_AGP_BASE;
@@ -337,6 +262,7 @@ struct dce_hwseq_registers {
337 uint32_t D2VGA_CONTROL; 262 uint32_t D2VGA_CONTROL;
338 uint32_t D3VGA_CONTROL; 263 uint32_t D3VGA_CONTROL;
339 uint32_t D4VGA_CONTROL; 264 uint32_t D4VGA_CONTROL;
265 uint32_t VGA_TEST_CONTROL;
340 /* MMHUB registers. read only. temporary hack */ 266 /* MMHUB registers. read only. temporary hack */
341 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; 267 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
342 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 268 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -403,6 +329,8 @@ struct dce_hwseq_registers {
403 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\ 329 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
404 SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\ 330 SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
405 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\ 331 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
332 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
333 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
406 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\ 334 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
407 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_) 335 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
408 336
@@ -433,31 +361,17 @@ struct dce_hwseq_registers {
433#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\ 361#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
434 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\ 362 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
435 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \ 363 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
436 HWS_SF(HUBP0_, DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh), \
437 HWS_SF(HUBP0_, HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh), \
438 HWS_SF(DPP_TOP0_, DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
439 HWS_SF(OPP_PIPE0_, OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh),\
440 HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ 364 HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
441 HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
442 HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
443 HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
444 HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \
445 HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
446 HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
447 HWS_SF(, DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
448 HWS_SF(, DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
449 HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh) 365 HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
450 366
451#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\ 367#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
452 HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ 368 HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
453 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh), \ 369 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh), \
454 HWS_SF(, DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
455 HWS_SF(, DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \ 370 HWS_SF(, DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
456 HWS_SF(, DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \ 371 HWS_SF(, DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
457 HWS_SF(, DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \ 372 HWS_SF(, DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \
458 HWS_SF(, DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \ 373 HWS_SF(, DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \
459 HWS_SF(, DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh), \ 374 HWS_SF(, DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh), \
460 HWS_SF(DPP_TOP0_, DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh), \
461 /* todo: get these from GVM instead of reading registers ourselves */\ 375 /* todo: get these from GVM instead of reading registers ourselves */\
462 HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\ 376 HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
463 HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\ 377 HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
@@ -493,7 +407,15 @@ struct dce_hwseq_registers {
493 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ 407 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
494 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 408 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
495 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 409 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
410 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
411 HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
412 HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
413 HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
414 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
415 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
496 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 416 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
417 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
418 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
497 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) 419 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
498 420
499#define HWSEQ_REG_FIELD_LIST(type) \ 421#define HWSEQ_REG_FIELD_LIST(type) \
@@ -526,14 +448,14 @@ struct dce_hwseq_registers {
526 type ENABLE_L1_TLB;\ 448 type ENABLE_L1_TLB;\
527 type SYSTEM_ACCESS_MODE;\ 449 type SYSTEM_ACCESS_MODE;\
528 type LVTMA_BLON;\ 450 type LVTMA_BLON;\
529 type LVTMA_PWRSEQ_TARGET_STATE_R; 451 type LVTMA_PWRSEQ_TARGET_STATE_R;\
452 type LVTMA_DIGON;\
453 type LVTMA_DIGON_OVRD;
530 454
531#define HWSEQ_DCN_REG_FIELD_LIST(type) \ 455#define HWSEQ_DCN_REG_FIELD_LIST(type) \
532 type HUBP_VTG_SEL; \ 456 type HUBP_VTG_SEL; \
533 type HUBP_CLOCK_ENABLE; \ 457 type HUBP_CLOCK_ENABLE; \
534 type DPP_CLOCK_ENABLE; \ 458 type DPP_CLOCK_ENABLE; \
535 type DPPCLK_RATE_CONTROL; \
536 type SDPIF_FB_TOP;\
537 type SDPIF_FB_BASE;\ 459 type SDPIF_FB_BASE;\
538 type SDPIF_FB_OFFSET;\ 460 type SDPIF_FB_OFFSET;\
539 type SDPIF_AGP_BASE;\ 461 type SDPIF_AGP_BASE;\
@@ -546,14 +468,6 @@ struct dce_hwseq_registers {
546 type AGP_BOT;\ 468 type AGP_BOT;\
547 type AGP_TOP;\ 469 type AGP_TOP;\
548 type DCHUBBUB_GLOBAL_TIMER_ENABLE; \ 470 type DCHUBBUB_GLOBAL_TIMER_ENABLE; \
549 type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\
550 type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\
551 type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\
552 type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\
553 type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\
554 type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\
555 type DCHUBBUB_ARB_SAT_LEVEL;\
556 type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
557 type OPP_PIPE_CLOCK_EN;\ 471 type OPP_PIPE_CLOCK_EN;\
558 type IP_REQUEST_EN; \ 472 type IP_REQUEST_EN; \
559 type DOMAIN0_POWER_FORCEON; \ 473 type DOMAIN0_POWER_FORCEON; \
@@ -583,7 +497,13 @@ struct dce_hwseq_registers {
583 type DCFCLK_GATE_DIS; \ 497 type DCFCLK_GATE_DIS; \
584 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ 498 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
585 type DENTIST_DPPCLK_WDIVIDER; \ 499 type DENTIST_DPPCLK_WDIVIDER; \
586 type DENTIST_DISPCLK_WDIVIDER; 500 type DENTIST_DISPCLK_WDIVIDER; \
501 type VGA_TEST_ENABLE; \
502 type VGA_TEST_RENDER_START; \
503 type D1VGA_MODE_ENABLE; \
504 type D2VGA_MODE_ENABLE; \
505 type D3VGA_MODE_ENABLE; \
506 type D4VGA_MODE_ENABLE;
587 507
588struct dce_hwseq_shift { 508struct dce_hwseq_shift {
589 HWSEQ_REG_FIELD_LIST(uint8_t) 509 HWSEQ_REG_FIELD_LIST(uint8_t)
@@ -619,5 +539,5 @@ void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
619 struct clock_source *clk_src, 539 struct clock_source *clk_src,
620 unsigned int tg_inst); 540 unsigned int tg_inst);
621 541
622bool dce_use_lut(const struct dc_plane_state *plane_state); 542bool dce_use_lut(enum surface_pixel_format format);
623#endif /*__DCE_HWSEQ_H__*/ 543#endif /*__DCE_HWSEQ_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index a266e3f5e75f..8167cad7bcf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -56,6 +56,8 @@
56 56
57#define CTX \ 57#define CTX \
58 enc110->base.ctx 58 enc110->base.ctx
59#define DC_LOGGER \
60 enc110->base.ctx->logger
59 61
60#define REG(reg)\ 62#define REG(reg)\
61 (enc110->link_regs->reg) 63 (enc110->link_regs->reg)
@@ -82,13 +84,6 @@
82#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 84#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
83#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 85#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
84 86
85/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
86#define TMDS_MIN_PIXEL_CLOCK 25000
87/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
88#define TMDS_MAX_PIXEL_CLOCK 165000
89/* For current ASICs pixel clock - 600MHz */
90#define MAX_ENCODER_CLOCK 600000
91
92enum { 87enum {
93 DP_MST_UPDATE_MAX_RETRY = 50 88 DP_MST_UPDATE_MAX_RETRY = 50
94}; 89};
@@ -683,6 +678,7 @@ void dce110_link_encoder_construct(
683{ 678{
684 struct bp_encoder_cap_info bp_cap_info = {0}; 679 struct bp_encoder_cap_info bp_cap_info = {0};
685 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; 680 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
681 enum bp_result result = BP_RESULT_OK;
686 682
687 enc110->base.funcs = &dce110_lnk_enc_funcs; 683 enc110->base.funcs = &dce110_lnk_enc_funcs;
688 enc110->base.ctx = init_data->ctx; 684 enc110->base.ctx = init_data->ctx;
@@ -757,15 +753,23 @@ void dce110_link_encoder_construct(
757 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; 753 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
758 } 754 }
759 755
756 /* default to one to mirror Windows behavior */
757 enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
758
759 result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
760 enc110->base.id, &bp_cap_info);
761
760 /* Override features with DCE-specific values */ 762 /* Override features with DCE-specific values */
761 if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info( 763 if (BP_RESULT_OK == result) {
762 enc110->base.ctx->dc_bios, enc110->base.id,
763 &bp_cap_info)) {
764 enc110->base.features.flags.bits.IS_HBR2_CAPABLE = 764 enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
765 bp_cap_info.DP_HBR2_EN; 765 bp_cap_info.DP_HBR2_EN;
766 enc110->base.features.flags.bits.IS_HBR3_CAPABLE = 766 enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
767 bp_cap_info.DP_HBR3_EN; 767 bp_cap_info.DP_HBR3_EN;
768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; 768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
769 } else {
770 DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
771 __func__,
772 result);
769 } 773 }
770} 774}
771 775
@@ -816,7 +820,6 @@ void dce110_link_encoder_hw_init(
816 struct link_encoder *enc) 820 struct link_encoder *enc)
817{ 821{
818 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 822 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
819 struct dc_context *ctx = enc110->base.ctx;
820 struct bp_transmitter_control cntl = { 0 }; 823 struct bp_transmitter_control cntl = { 0 };
821 enum bp_result result; 824 enum bp_result result;
822 825
@@ -828,11 +831,13 @@ void dce110_link_encoder_hw_init(
828 cntl.coherent = false; 831 cntl.coherent = false;
829 cntl.hpd_sel = enc110->base.hpd_source; 832 cntl.hpd_sel = enc110->base.hpd_source;
830 833
834 if (enc110->base.connector.id == CONNECTOR_ID_EDP)
835 cntl.signal = SIGNAL_TYPE_EDP;
836
831 result = link_transmitter_control(enc110, &cntl); 837 result = link_transmitter_control(enc110, &cntl);
832 838
833 if (result != BP_RESULT_OK) { 839 if (result != BP_RESULT_OK) {
834 dm_logger_write(ctx->logger, LOG_ERROR, 840 DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
835 "%s: Failed to execute VBIOS command table!\n",
836 __func__); 841 __func__);
837 BREAK_TO_DEBUGGER(); 842 BREAK_TO_DEBUGGER();
838 return; 843 return;
@@ -904,12 +909,10 @@ void dce110_link_encoder_enable_tmds_output(
904 struct link_encoder *enc, 909 struct link_encoder *enc,
905 enum clock_source_id clock_source, 910 enum clock_source_id clock_source,
906 enum dc_color_depth color_depth, 911 enum dc_color_depth color_depth,
907 bool hdmi, 912 enum signal_type signal,
908 bool dual_link,
909 uint32_t pixel_clock) 913 uint32_t pixel_clock)
910{ 914{
911 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 915 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
912 struct dc_context *ctx = enc110->base.ctx;
913 struct bp_transmitter_control cntl = { 0 }; 916 struct bp_transmitter_control cntl = { 0 };
914 enum bp_result result; 917 enum bp_result result;
915 918
@@ -919,16 +922,12 @@ void dce110_link_encoder_enable_tmds_output(
919 cntl.engine_id = enc->preferred_engine; 922 cntl.engine_id = enc->preferred_engine;
920 cntl.transmitter = enc110->base.transmitter; 923 cntl.transmitter = enc110->base.transmitter;
921 cntl.pll_id = clock_source; 924 cntl.pll_id = clock_source;
922 if (hdmi) { 925 cntl.signal = signal;
923 cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; 926 if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
924 cntl.lanes_number = 4;
925 } else if (dual_link) {
926 cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
927 cntl.lanes_number = 8; 927 cntl.lanes_number = 8;
928 } else { 928 else
929 cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
930 cntl.lanes_number = 4; 929 cntl.lanes_number = 4;
931 } 930
932 cntl.hpd_sel = enc110->base.hpd_source; 931 cntl.hpd_sel = enc110->base.hpd_source;
933 932
934 cntl.pixel_clock = pixel_clock; 933 cntl.pixel_clock = pixel_clock;
@@ -937,8 +936,7 @@ void dce110_link_encoder_enable_tmds_output(
937 result = link_transmitter_control(enc110, &cntl); 936 result = link_transmitter_control(enc110, &cntl);
938 937
939 if (result != BP_RESULT_OK) { 938 if (result != BP_RESULT_OK) {
940 dm_logger_write(ctx->logger, LOG_ERROR, 939 DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
941 "%s: Failed to execute VBIOS command table!\n",
942 __func__); 940 __func__);
943 BREAK_TO_DEBUGGER(); 941 BREAK_TO_DEBUGGER();
944 } 942 }
@@ -951,7 +949,6 @@ void dce110_link_encoder_enable_dp_output(
951 enum clock_source_id clock_source) 949 enum clock_source_id clock_source)
952{ 950{
953 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 951 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
954 struct dc_context *ctx = enc110->base.ctx;
955 struct bp_transmitter_control cntl = { 0 }; 952 struct bp_transmitter_control cntl = { 0 };
956 enum bp_result result; 953 enum bp_result result;
957 954
@@ -978,8 +975,7 @@ void dce110_link_encoder_enable_dp_output(
978 result = link_transmitter_control(enc110, &cntl); 975 result = link_transmitter_control(enc110, &cntl);
979 976
980 if (result != BP_RESULT_OK) { 977 if (result != BP_RESULT_OK) {
981 dm_logger_write(ctx->logger, LOG_ERROR, 978 DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
982 "%s: Failed to execute VBIOS command table!\n",
983 __func__); 979 __func__);
984 BREAK_TO_DEBUGGER(); 980 BREAK_TO_DEBUGGER();
985 } 981 }
@@ -992,7 +988,6 @@ void dce110_link_encoder_enable_dp_mst_output(
992 enum clock_source_id clock_source) 988 enum clock_source_id clock_source)
993{ 989{
994 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 990 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
995 struct dc_context *ctx = enc110->base.ctx;
996 struct bp_transmitter_control cntl = { 0 }; 991 struct bp_transmitter_control cntl = { 0 };
997 enum bp_result result; 992 enum bp_result result;
998 993
@@ -1019,8 +1014,7 @@ void dce110_link_encoder_enable_dp_mst_output(
1019 result = link_transmitter_control(enc110, &cntl); 1014 result = link_transmitter_control(enc110, &cntl);
1020 1015
1021 if (result != BP_RESULT_OK) { 1016 if (result != BP_RESULT_OK) {
1022 dm_logger_write(ctx->logger, LOG_ERROR, 1017 DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
1023 "%s: Failed to execute VBIOS command table!\n",
1024 __func__); 1018 __func__);
1025 BREAK_TO_DEBUGGER(); 1019 BREAK_TO_DEBUGGER();
1026 } 1020 }
@@ -1034,7 +1028,6 @@ void dce110_link_encoder_disable_output(
1034 enum signal_type signal) 1028 enum signal_type signal)
1035{ 1029{
1036 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 1030 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1037 struct dc_context *ctx = enc110->base.ctx;
1038 struct bp_transmitter_control cntl = { 0 }; 1031 struct bp_transmitter_control cntl = { 0 };
1039 enum bp_result result; 1032 enum bp_result result;
1040 1033
@@ -1062,8 +1055,7 @@ void dce110_link_encoder_disable_output(
1062 result = link_transmitter_control(enc110, &cntl); 1055 result = link_transmitter_control(enc110, &cntl);
1063 1056
1064 if (result != BP_RESULT_OK) { 1057 if (result != BP_RESULT_OK) {
1065 dm_logger_write(ctx->logger, LOG_ERROR, 1058 DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
1066 "%s: Failed to execute VBIOS command table!\n",
1067 __func__); 1059 __func__);
1068 BREAK_TO_DEBUGGER(); 1060 BREAK_TO_DEBUGGER();
1069 return; 1061 return;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 8ca9afe47a2b..0ec3433d34b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(
210 struct link_encoder *enc, 210 struct link_encoder *enc,
211 enum clock_source_id clock_source, 211 enum clock_source_id clock_source,
212 enum dc_color_depth color_depth, 212 enum dc_color_depth color_depth,
213 bool hdmi, 213 enum signal_type signal,
214 bool dual_link,
215 uint32_t pixel_clock); 214 uint32_t pixel_clock);
216 215
217/* enables DP PHY output */ 216/* enables DP PHY output */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 83bae207371d..444558ca6533 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -26,7 +26,8 @@
26#include "dc_bios_types.h" 26#include "dc_bios_types.h"
27#include "dce_stream_encoder.h" 27#include "dce_stream_encoder.h"
28#include "reg_helper.h" 28#include "reg_helper.h"
29 29#define DC_LOGGER \
30 enc110->base.ctx->logger
30enum DP_PIXEL_ENCODING { 31enum DP_PIXEL_ENCODING {
31DP_PIXEL_ENCODING_RGB444 = 0x00000000, 32DP_PIXEL_ENCODING_RGB444 = 0x00000000,
32DP_PIXEL_ENCODING_YCBCR422 = 0x00000001, 33DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
@@ -197,7 +198,6 @@ static void dce110_update_hdmi_info_packet(
197 uint32_t packet_index, 198 uint32_t packet_index,
198 const struct encoder_info_packet *info_packet) 199 const struct encoder_info_packet *info_packet)
199{ 200{
200 struct dc_context *ctx = enc110->base.ctx;
201 uint32_t cont, send, line; 201 uint32_t cont, send, line;
202 202
203 if (info_packet->valid) { 203 if (info_packet->valid) {
@@ -277,8 +277,7 @@ static void dce110_update_hdmi_info_packet(
277#endif 277#endif
278 default: 278 default:
279 /* invalid HW packet index */ 279 /* invalid HW packet index */
280 dm_logger_write( 280 DC_LOG_WARNING(
281 ctx->logger, LOG_WARNING,
282 "Invalid HW packet index: %s()\n", 281 "Invalid HW packet index: %s()\n",
283 __func__); 282 __func__);
284 return; 283 return;
@@ -920,6 +919,7 @@ static void dce110_stream_encoder_dp_blank(
920{ 919{
921 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 920 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
922 uint32_t retries = 0; 921 uint32_t retries = 0;
922 uint32_t reg1 = 0;
923 uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; 923 uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
924 924
925 /* Note: For CZ, we are changing driver default to disable 925 /* Note: For CZ, we are changing driver default to disable
@@ -928,7 +928,10 @@ static void dce110_stream_encoder_dp_blank(
928 * handful of panels that cannot handle disable stream at 928 * handful of panels that cannot handle disable stream at
929 * HBLANK and will result in a white line flash across the 929 * HBLANK and will result in a white line flash across the
930 * screen on stream disable. */ 930 * screen on stream disable. */
931 931 REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, &reg1);
932 if ((reg1 & 0x1) == 0)
933 /*stream not enabled*/
934 return;
932 /* Specify the video stream disable point 935 /* Specify the video stream disable point
933 * (2 = start of the next vertical blank) */ 936 * (2 = start of the next vertical blank) */
934 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); 937 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
@@ -1382,7 +1385,7 @@ static void dce110_se_setup_hdmi_audio(
1382 crtc_info->requested_pixel_clock, 1385 crtc_info->requested_pixel_clock,
1383 crtc_info->calculated_pixel_clock, 1386 crtc_info->calculated_pixel_clock,
1384 &audio_clock_info); 1387 &audio_clock_info);
1385 dm_logger_write(enc->ctx->logger, LOG_HW_AUDIO, 1388 DC_LOG_HW_AUDIO(
1386 "\n%s:Input::requested_pixel_clock = %d" \ 1389 "\n%s:Input::requested_pixel_clock = %d" \
1387 "calculated_pixel_clock = %d \n", __func__, \ 1390 "calculated_pixel_clock = %d \n", __func__, \
1388 crtc_info->requested_pixel_clock, \ 1391 crtc_info->requested_pixel_clock, \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 0f662e6ee9bd..832c5daada35 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -38,6 +38,8 @@
38 38
39#define CTX \ 39#define CTX \
40 xfm_dce->base.ctx 40 xfm_dce->base.ctx
41#define DC_LOGGER \
42 xfm_dce->base.ctx->logger
41 43
42#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19)) 44#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
43#define GAMUT_MATRIX_SIZE 12 45#define GAMUT_MATRIX_SIZE 12
@@ -618,80 +620,48 @@ static void program_bit_depth_reduction(
618 enum dc_color_depth depth, 620 enum dc_color_depth depth,
619 const struct bit_depth_reduction_params *bit_depth_params) 621 const struct bit_depth_reduction_params *bit_depth_params)
620{ 622{
621 enum dcp_bit_depth_reduction_mode depth_reduction_mode; 623 enum dcp_out_trunc_round_depth trunc_round_depth;
622 enum dcp_spatial_dither_mode spatial_dither_mode; 624 enum dcp_out_trunc_round_mode trunc_mode;
623 bool frame_random_enable; 625 bool spatial_dither_enable;
624 bool rgb_random_enable;
625 bool highpass_random_enable;
626 626
627 ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */ 627 ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
628 628
629 if (bit_depth_params->flags.SPATIAL_DITHER_ENABLED) { 629 spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED;
630 depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DITHER; 630 /* Default to 12 bit truncation without rounding */
631 frame_random_enable = true; 631 trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT;
632 rgb_random_enable = true; 632 trunc_mode = DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE;
633 highpass_random_enable = true; 633
634 634 if (bit_depth_params->flags.TRUNCATE_ENABLED) {
635 } else { 635 /* Don't enable dithering if truncation is enabled */
636 depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED; 636 spatial_dither_enable = false;
637 frame_random_enable = false; 637 trunc_mode = bit_depth_params->flags.TRUNCATE_MODE ?
638 rgb_random_enable = false; 638 DCP_OUT_TRUNC_ROUND_MODE_ROUND :
639 highpass_random_enable = false; 639 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE;
640
641 if (bit_depth_params->flags.TRUNCATE_DEPTH == 0 ||
642 bit_depth_params->flags.TRUNCATE_DEPTH == 1)
643 trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_8BIT;
644 else if (bit_depth_params->flags.TRUNCATE_DEPTH == 2)
645 trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_10BIT;
646 else {
647 /*
648 * Invalid truncate/round depth. Setting here to 12bit
649 * to prevent use-before-initialize errors.
650 */
651 trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT;
652 BREAK_TO_DEBUGGER();
653 }
640 } 654 }
641 655
642 spatial_dither_mode = DCP_SPATIAL_DITHER_MODE_A_AA_A;
643
644 set_clamp(xfm_dce, depth); 656 set_clamp(xfm_dce, depth);
645 657 set_round(xfm_dce, trunc_mode, trunc_round_depth);
646 switch (depth_reduction_mode) { 658 set_dither(xfm_dce,
647 case DCP_BIT_DEPTH_REDUCTION_MODE_DITHER: 659 spatial_dither_enable,
648 /* Spatial Dither: Set round/truncate to bypass (12bit), 660 DCP_SPATIAL_DITHER_MODE_A_AA_A,
649 * enable Dither (30bpp) */ 661 DCP_SPATIAL_DITHER_DEPTH_30BPP,
650 set_round(xfm_dce, 662 bit_depth_params->flags.FRAME_RANDOM,
651 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE, 663 bit_depth_params->flags.RGB_RANDOM,
652 DCP_OUT_TRUNC_ROUND_DEPTH_12BIT); 664 bit_depth_params->flags.HIGHPASS_RANDOM);
653
654 set_dither(xfm_dce, true, spatial_dither_mode,
655 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
656 rgb_random_enable, highpass_random_enable);
657 break;
658 case DCP_BIT_DEPTH_REDUCTION_MODE_ROUND:
659 /* Round: Enable round (10bit), disable Dither */
660 set_round(xfm_dce,
661 DCP_OUT_TRUNC_ROUND_MODE_ROUND,
662 DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
663
664 set_dither(xfm_dce, false, spatial_dither_mode,
665 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
666 rgb_random_enable, highpass_random_enable);
667 break;
668 case DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE: /* Truncate */
669 /* Truncate: Enable truncate (10bit), disable Dither */
670 set_round(xfm_dce,
671 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
672 DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
673
674 set_dither(xfm_dce, false, spatial_dither_mode,
675 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
676 rgb_random_enable, highpass_random_enable);
677 break;
678
679 case DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED: /* Disabled */
680 /* Truncate: Set round/truncate to bypass (12bit),
681 * disable Dither */
682 set_round(xfm_dce,
683 DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
684 DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
685
686 set_dither(xfm_dce, false, spatial_dither_mode,
687 DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
688 rgb_random_enable, highpass_random_enable);
689 break;
690 default:
691 /* Invalid DCP Depth reduction mode */
692 BREAK_TO_DEBUGGER();
693 break;
694 }
695} 665}
696 666
697static int dce_transform_get_max_num_of_supported_lines( 667static int dce_transform_get_max_num_of_supported_lines(
@@ -725,8 +695,7 @@ static int dce_transform_get_max_num_of_supported_lines(
725 break; 695 break;
726 696
727 default: 697 default:
728 dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING, 698 DC_LOG_WARNING("%s: Invalid LB pixel depth",
729 "%s: Invalid LB pixel depth",
730 __func__); 699 __func__);
731 BREAK_TO_DEBUGGER(); 700 BREAK_TO_DEBUGGER();
732 break; 701 break;
@@ -823,8 +792,7 @@ static void dce_transform_set_pixel_storage_depth(
823 if (!(xfm_dce->lb_pixel_depth_supported & depth)) { 792 if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
824 /*we should use unsupported capabilities 793 /*we should use unsupported capabilities
825 * unless it is required by w/a*/ 794 * unless it is required by w/a*/
826 dm_logger_write(xfm->ctx->logger, LOG_WARNING, 795 DC_LOG_WARNING("%s: Capability not supported",
827 "%s: Capability not supported",
828 __func__); 796 __func__);
829 } 797 }
830} 798}
@@ -879,6 +847,7 @@ static void dce_transform_set_gamut_remap(
879 const struct xfm_grph_csc_adjustment *adjust) 847 const struct xfm_grph_csc_adjustment *adjust)
880{ 848{
881 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); 849 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
850 int i = 0;
882 851
883 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) 852 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
884 /* Bypass if type is bypass or hw */ 853 /* Bypass if type is bypass or hw */
@@ -887,20 +856,8 @@ static void dce_transform_set_gamut_remap(
887 struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE]; 856 struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE];
888 uint16_t arr_reg_val[GAMUT_MATRIX_SIZE]; 857 uint16_t arr_reg_val[GAMUT_MATRIX_SIZE];
889 858
890 arr_matrix[0] = adjust->temperature_matrix[0]; 859 for (i = 0; i < GAMUT_MATRIX_SIZE; i++)
891 arr_matrix[1] = adjust->temperature_matrix[1]; 860 arr_matrix[i] = adjust->temperature_matrix[i];
892 arr_matrix[2] = adjust->temperature_matrix[2];
893 arr_matrix[3] = dal_fixed31_32_zero;
894
895 arr_matrix[4] = adjust->temperature_matrix[3];
896 arr_matrix[5] = adjust->temperature_matrix[4];
897 arr_matrix[6] = adjust->temperature_matrix[5];
898 arr_matrix[7] = dal_fixed31_32_zero;
899
900 arr_matrix[8] = adjust->temperature_matrix[6];
901 arr_matrix[9] = adjust->temperature_matrix[7];
902 arr_matrix[10] = adjust->temperature_matrix[8];
903 arr_matrix[11] = dal_fixed31_32_zero;
904 861
905 convert_float_matrix( 862 convert_float_matrix(
906 arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE); 863 arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE);
@@ -1126,7 +1083,7 @@ void dce110_opp_set_csc_adjustment(
1126 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC; 1083 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
1127 1084
1128 program_color_matrix( 1085 program_color_matrix(
1129 xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW); 1086 xfm_dce, tbl_entry, GRPH_COLOR_MATRIX_SW);
1130 1087
1131 /* We did everything ,now program DxOUTPUT_CSC_CONTROL */ 1088 /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
1132 configure_graphics_mode(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW, 1089 configure_graphics_mode(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
@@ -1215,8 +1172,7 @@ static void program_pwl(struct dce_transform *xfm_dce,
1215 } 1172 }
1216 1173
1217 if (counter == max_tries) { 1174 if (counter == max_tries) {
1218 dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING, 1175 DC_LOG_WARNING("%s: regamma lut was not powered on "
1219 "%s: regamma lut was not powered on "
1220 "in a timely manner," 1176 "in a timely manner,"
1221 " programming still proceeds\n", 1177 " programming still proceeds\n",
1222 __func__); 1178 __func__);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index bfc94b4927b9..948281d8b6af 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -248,6 +248,7 @@
248 XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\ 248 XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
249 XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ 249 XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
250 XFM_SF(DCP0_REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\ 250 XFM_SF(DCP0_REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
251 XFM_SF(DCP0_REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\
251 XFM_SF(SCL0_SCL_MODE, SCL_MODE, mask_sh), \ 252 XFM_SF(SCL0_SCL_MODE, SCL_MODE, mask_sh), \
252 XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \ 253 XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
253 XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \ 254 XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 469af0587604..41f83ecd7469 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -69,7 +69,7 @@ static const struct dce100_hw_seq_reg_offsets reg_offsets[] = {
69 ******************************************************************************/ 69 ******************************************************************************/
70/***************************PIPE_CONTROL***********************************/ 70/***************************PIPE_CONTROL***********************************/
71 71
72static bool dce100_enable_display_power_gating( 72bool dce100_enable_display_power_gating(
73 struct dc *dc, 73 struct dc *dc,
74 uint8_t controller_id, 74 uint8_t controller_id,
75 struct dc_bios *dcb, 75 struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index cb5384ef46c3..c6ec0ed6ec3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -38,5 +38,9 @@ void dce100_set_bandwidth(
38 struct dc_state *context, 38 struct dc_state *context,
39 bool decrease_allowed); 39 bool decrease_allowed);
40 40
41bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
42 struct dc_bios *dcb,
43 enum pipe_gating_control power_gating);
44
41#endif /* __DC_HWSS_DCE100_H__ */ 45#endif /* __DC_HWSS_DCE100_H__ */
42 46
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3ea43e2a9450..3092f76bdb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -51,6 +51,9 @@
51#include "dce/dce_10_0_d.h" 51#include "dce/dce_10_0_d.h"
52#include "dce/dce_10_0_sh_mask.h" 52#include "dce/dce_10_0_sh_mask.h"
53 53
54#include "dce/dce_dmcu.h"
55#include "dce/dce_abm.h"
56
54#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT 57#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
55#include "gmc/gmc_8_2_d.h" 58#include "gmc/gmc_8_2_d.h"
56#include "gmc/gmc_8_2_sh_mask.h" 59#include "gmc/gmc_8_2_sh_mask.h"
@@ -320,7 +323,29 @@ static const struct dce110_clk_src_mask cs_mask = {
320 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) 323 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
321}; 324};
322 325
326static const struct dce_dmcu_registers dmcu_regs = {
327 DMCU_DCE110_COMMON_REG_LIST()
328};
329
330static const struct dce_dmcu_shift dmcu_shift = {
331 DMCU_MASK_SH_LIST_DCE110(__SHIFT)
332};
333
334static const struct dce_dmcu_mask dmcu_mask = {
335 DMCU_MASK_SH_LIST_DCE110(_MASK)
336};
337
338static const struct dce_abm_registers abm_regs = {
339 ABM_DCE110_COMMON_REG_LIST()
340};
341
342static const struct dce_abm_shift abm_shift = {
343 ABM_MASK_SH_LIST_DCE110(__SHIFT)
344};
323 345
346static const struct dce_abm_mask abm_mask = {
347 ABM_MASK_SH_LIST_DCE110(_MASK)
348};
324 349
325#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 350#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
326 351
@@ -622,6 +647,12 @@ static void destruct(struct dce110_resource_pool *pool)
622 if (pool->base.display_clock != NULL) 647 if (pool->base.display_clock != NULL)
623 dce_disp_clk_destroy(&pool->base.display_clock); 648 dce_disp_clk_destroy(&pool->base.display_clock);
624 649
650 if (pool->base.abm != NULL)
651 dce_abm_destroy(&pool->base.abm);
652
653 if (pool->base.dmcu != NULL)
654 dce_dmcu_destroy(&pool->base.dmcu);
655
625 if (pool->base.irqs != NULL) 656 if (pool->base.irqs != NULL)
626 dal_irq_service_destroy(&pool->base.irqs); 657 dal_irq_service_destroy(&pool->base.irqs);
627} 658}
@@ -829,6 +860,25 @@ static bool construct(
829 goto res_create_fail; 860 goto res_create_fail;
830 } 861 }
831 862
863 pool->base.dmcu = dce_dmcu_create(ctx,
864 &dmcu_regs,
865 &dmcu_shift,
866 &dmcu_mask);
867 if (pool->base.dmcu == NULL) {
868 dm_error("DC: failed to create dmcu!\n");
869 BREAK_TO_DEBUGGER();
870 goto res_create_fail;
871 }
872
873 pool->base.abm = dce_abm_create(ctx,
874 &abm_regs,
875 &abm_shift,
876 &abm_mask);
877 if (pool->base.abm == NULL) {
878 dm_error("DC: failed to create abm!\n");
879 BREAK_TO_DEBUGGER();
880 goto res_create_fail;
881 }
832 882
833 /* get static clock information for PPLIB or firmware, save 883 /* get static clock information for PPLIB or firmware, save
834 * max_clock_state 884 * max_clock_state
@@ -849,9 +899,11 @@ static bool construct(
849 *************************************************/ 899 *************************************************/
850 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 900 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
851 pool->base.pipe_count = res_cap.num_timing_generator; 901 pool->base.pipe_count = res_cap.num_timing_generator;
902 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
852 dc->caps.max_downscale_ratio = 200; 903 dc->caps.max_downscale_ratio = 200;
853 dc->caps.i2c_speed_in_khz = 40; 904 dc->caps.i2c_speed_in_khz = 40;
854 dc->caps.max_cursor_size = 128; 905 dc->caps.max_cursor_size = 128;
906 dc->caps.dual_link_dvi = true;
855 907
856 for (i = 0; i < pool->base.pipe_count; i++) { 908 for (i = 0; i < pool->base.pipe_count; i++) {
857 pool->base.timing_generators[i] = 909 pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 6923662413cd..775d3bf0bd39 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -34,6 +34,8 @@
34 34
35#include "dce110_compressor.h" 35#include "dce110_compressor.h"
36 36
37#define DC_LOGGER \
38 cp110->base.ctx->logger
37#define DCP_REG(reg)\ 39#define DCP_REG(reg)\
38 (reg + cp110->offsets.dcp_offset) 40 (reg + cp110->offsets.dcp_offset)
39#define DMIF_REG(reg)\ 41#define DMIF_REG(reg)\
@@ -120,14 +122,10 @@ static void wait_for_fbc_state_changed(
120 } 122 }
121 123
122 if (counter == 10) { 124 if (counter == 10) {
123 dm_logger_write( 125 DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
124 cp110->base.ctx->logger, LOG_WARNING,
125 "%s: wait counter exceeded, changes to HW not applied",
126 __func__); 126 __func__);
127 } else { 127 } else {
128 dm_logger_write( 128 DC_LOG_SYNC("FBC status changed to %d", enabled);
129 cp110->base.ctx->logger, LOG_SYNC,
130 "FBC status changed to %d", enabled);
131 } 129 }
132 130
133 131
@@ -310,9 +308,7 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
310 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1) 308 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
311 fbc_pitch = fbc_pitch / 8; 309 fbc_pitch = fbc_pitch / 8;
312 else 310 else
313 dm_logger_write( 311 DC_LOG_WARNING("%s: Unexpected DCE11 compression ratio",
314 compressor->ctx->logger, LOG_WARNING,
315 "%s: Unexpected DCE11 compression ratio",
316 __func__); 312 __func__);
317 313
318 /* Clear content first. */ 314 /* Clear content first. */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 86cdd7b4811f..30dd62f0f5fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -57,6 +57,8 @@
57#include "dce/dce_11_0_sh_mask.h" 57#include "dce/dce_11_0_sh_mask.h"
58#include "custom_float.h" 58#include "custom_float.h"
59 59
60#include "atomfirmware.h"
61
60/* 62/*
61 * All values are in milliseconds; 63 * All values are in milliseconds;
62 * For eDP, after power-up/power/down, 64 * For eDP, after power-up/power/down,
@@ -68,6 +70,8 @@
68 70
69#define CTX \ 71#define CTX \
70 hws->ctx 72 hws->ctx
73#define DC_LOGGER \
74 ctx->logger
71#define REG(reg)\ 75#define REG(reg)\
72 hws->regs->reg 76 hws->regs->reg
73 77
@@ -275,7 +279,7 @@ dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
275 build_prescale_params(&prescale_params, plane_state); 279 build_prescale_params(&prescale_params, plane_state);
276 ipp->funcs->ipp_program_prescale(ipp, &prescale_params); 280 ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
277 281
278 if (plane_state->gamma_correction && dce_use_lut(plane_state)) 282 if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
279 ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction); 283 ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
280 284
281 if (tf == NULL) { 285 if (tf == NULL) {
@@ -407,6 +411,10 @@ static bool convert_to_custom_float(struct pwl_result_data *rgb_resulted,
407 return true; 411 return true;
408} 412}
409 413
414#define MAX_LOW_POINT 25
415#define NUMBER_REGIONS 16
416#define NUMBER_SW_SEGMENTS 16
417
410static bool 418static bool
411dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, 419dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
412 struct pwl_params *regamma_params) 420 struct pwl_params *regamma_params)
@@ -421,8 +429,8 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
421 struct fixed31_32 y1_min; 429 struct fixed31_32 y1_min;
422 struct fixed31_32 y3_max; 430 struct fixed31_32 y3_max;
423 431
424 int32_t segment_start, segment_end; 432 int32_t region_start, region_end;
425 uint32_t i, j, k, seg_distr[16], increment, start_index, hw_points; 433 uint32_t i, j, k, seg_distr[NUMBER_REGIONS], increment, start_index, hw_points;
426 434
427 if (output_tf == NULL || regamma_params == NULL || output_tf->type == TF_TYPE_BYPASS) 435 if (output_tf == NULL || regamma_params == NULL || output_tf->type == TF_TYPE_BYPASS)
428 return false; 436 return false;
@@ -437,34 +445,20 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
437 /* 16 segments 445 /* 16 segments
438 * segments are from 2^-11 to 2^5 446 * segments are from 2^-11 to 2^5
439 */ 447 */
440 segment_start = -11; 448 region_start = -11;
441 segment_end = 5; 449 region_end = region_start + NUMBER_REGIONS;
442 450
443 seg_distr[0] = 2; 451 for (i = 0; i < NUMBER_REGIONS; i++)
444 seg_distr[1] = 2; 452 seg_distr[i] = 4;
445 seg_distr[2] = 2;
446 seg_distr[3] = 2;
447 seg_distr[4] = 2;
448 seg_distr[5] = 2;
449 seg_distr[6] = 3;
450 seg_distr[7] = 4;
451 seg_distr[8] = 4;
452 seg_distr[9] = 4;
453 seg_distr[10] = 4;
454 seg_distr[11] = 5;
455 seg_distr[12] = 5;
456 seg_distr[13] = 5;
457 seg_distr[14] = 5;
458 seg_distr[15] = 5;
459 453
460 } else { 454 } else {
461 /* 10 segments 455 /* 10 segments
462 * segment is from 2^-10 to 2^0 456 * segment is from 2^-10 to 2^0
463 */ 457 */
464 segment_start = -10; 458 region_start = -10;
465 segment_end = 0; 459 region_end = 0;
466 460
467 seg_distr[0] = 3; 461 seg_distr[0] = 4;
468 seg_distr[1] = 4; 462 seg_distr[1] = 4;
469 seg_distr[2] = 4; 463 seg_distr[2] = 4;
470 seg_distr[3] = 4; 464 seg_distr[3] = 4;
@@ -472,8 +466,8 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
472 seg_distr[5] = 4; 466 seg_distr[5] = 4;
473 seg_distr[6] = 4; 467 seg_distr[6] = 4;
474 seg_distr[7] = 4; 468 seg_distr[7] = 4;
475 seg_distr[8] = 5; 469 seg_distr[8] = 4;
476 seg_distr[9] = 5; 470 seg_distr[9] = 4;
477 seg_distr[10] = -1; 471 seg_distr[10] = -1;
478 seg_distr[11] = -1; 472 seg_distr[11] = -1;
479 seg_distr[12] = -1; 473 seg_distr[12] = -1;
@@ -488,10 +482,12 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
488 } 482 }
489 483
490 j = 0; 484 j = 0;
491 for (k = 0; k < (segment_end - segment_start); k++) { 485 for (k = 0; k < (region_end - region_start); k++) {
492 increment = 32 / (1 << seg_distr[k]); 486 increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
493 start_index = (segment_start + k + 25) * 32; 487 start_index = (region_start + k + MAX_LOW_POINT) *
494 for (i = start_index; i < start_index + 32; i += increment) { 488 NUMBER_SW_SEGMENTS;
489 for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
490 i += increment) {
495 if (j == hw_points - 1) 491 if (j == hw_points - 1)
496 break; 492 break;
497 rgb_resulted[j].red = output_tf->tf_pts.red[i]; 493 rgb_resulted[j].red = output_tf->tf_pts.red[i];
@@ -502,15 +498,15 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
502 } 498 }
503 499
504 /* last point */ 500 /* last point */
505 start_index = (segment_end + 25) * 32; 501 start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
506 rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index]; 502 rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
507 rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; 503 rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
508 rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; 504 rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
509 505
510 arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), 506 arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
511 dal_fixed31_32_from_int(segment_start)); 507 dal_fixed31_32_from_int(region_start));
512 arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), 508 arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
513 dal_fixed31_32_from_int(segment_end)); 509 dal_fixed31_32_from_int(region_end));
514 510
515 y_r = rgb_resulted[0].red; 511 y_r = rgb_resulted[0].red;
516 y_g = rgb_resulted[0].green; 512 y_g = rgb_resulted[0].green;
@@ -625,7 +621,7 @@ static enum dc_status bios_parser_crtc_source_select(
625 const struct dc_sink *sink = pipe_ctx->stream->sink; 621 const struct dc_sink *sink = pipe_ctx->stream->sink;
626 622
627 crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id; 623 crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id;
628 crtc_source_select.controller_id = pipe_ctx->pipe_idx + 1; 624 crtc_source_select.controller_id = pipe_ctx->stream_res.tg->inst + 1;
629 /*TODO: Need to un-hardcode color depth, dp_audio and account for 625 /*TODO: Need to un-hardcode color depth, dp_audio and account for
630 * the case where signal and sink signal is different (translator 626 * the case where signal and sink signal is different (translator
631 * encoder)*/ 627 * encoder)*/
@@ -688,15 +684,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
688 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 684 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
689 struct dc_link *link = pipe_ctx->stream->sink->link; 685 struct dc_link *link = pipe_ctx->stream->sink->link;
690 686
691 /* 1. update AVI info frame (HDMI, DP) 687
692 * we always need to update info frame
693 */
694 uint32_t active_total_with_borders; 688 uint32_t active_total_with_borders;
695 uint32_t early_control = 0; 689 uint32_t early_control = 0;
696 struct timing_generator *tg = pipe_ctx->stream_res.tg; 690 struct timing_generator *tg = pipe_ctx->stream_res.tg;
697 691
698 /* TODOFPGA may change to hwss.update_info_frame */ 692 /* For MST, there are multiply stream go to only one link.
693 * connect DIG back_end to front_end while enable_stream and
694 * disconnect them during disable_stream
695 * BY this, it is logic clean to separate stream and link */
696 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
697 pipe_ctx->stream_res.stream_enc->id, true);
698
699 /* update AVI info frame (HDMI, DP)*/
700 /* TODO: FPGA may change to hwss.update_info_frame */
699 dce110_update_info_frame(pipe_ctx); 701 dce110_update_info_frame(pipe_ctx);
702
700 /* enable early control to avoid corruption on DP monitor*/ 703 /* enable early control to avoid corruption on DP monitor*/
701 active_total_with_borders = 704 active_total_with_borders =
702 timing->h_addressable 705 timing->h_addressable
@@ -717,12 +720,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
717 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); 720 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
718 } 721 }
719 722
720 /* For MST, there are multiply stream go to only one link. 723
721 * connect DIG back_end to front_end while enable_stream and 724
722 * disconnect them during disable_stream
723 * BY this, it is logic clean to separate stream and link */
724 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
725 pipe_ctx->stream_res.stream_enc->id, true);
726 725
727} 726}
728 727
@@ -738,10 +737,14 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
738 737
739static bool is_panel_powered_on(struct dce_hwseq *hws) 738static bool is_panel_powered_on(struct dce_hwseq *hws)
740{ 739{
741 uint32_t value; 740 uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
741
742
743 REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
744
745 REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
742 746
743 REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value); 747 return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
744 return value == 1;
745} 748}
746 749
747static enum bp_result link_transmitter_control( 750static enum bp_result link_transmitter_control(
@@ -822,7 +825,7 @@ void hwss_edp_wait_for_hpd_ready(
822 dal_gpio_destroy_irq(&hpd); 825 dal_gpio_destroy_irq(&hpd);
823 826
824 if (false == edp_hpd_high) { 827 if (false == edp_hpd_high) {
825 dm_logger_write(ctx->logger, LOG_ERROR, 828 DC_LOG_ERROR(
826 "%s: wait timed out!\n", __func__); 829 "%s: wait timed out!\n", __func__);
827 } 830 }
828} 831}
@@ -846,7 +849,7 @@ void hwss_edp_power_control(
846 if (power_up != is_panel_powered_on(hwseq)) { 849 if (power_up != is_panel_powered_on(hwseq)) {
847 /* Send VBIOS command to prompt eDP panel power */ 850 /* Send VBIOS command to prompt eDP panel power */
848 851
849 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3, 852 DC_LOG_HW_RESUME_S3(
850 "%s: Panel Power action: %s\n", 853 "%s: Panel Power action: %s\n",
851 __func__, (power_up ? "On":"Off")); 854 __func__, (power_up ? "On":"Off"));
852 855
@@ -862,11 +865,11 @@ void hwss_edp_power_control(
862 bp_result = link_transmitter_control(ctx->dc_bios, &cntl); 865 bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
863 866
864 if (bp_result != BP_RESULT_OK) 867 if (bp_result != BP_RESULT_OK)
865 dm_logger_write(ctx->logger, LOG_ERROR, 868 DC_LOG_ERROR(
866 "%s: Panel Power bp_result: %d\n", 869 "%s: Panel Power bp_result: %d\n",
867 __func__, bp_result); 870 __func__, bp_result);
868 } else { 871 } else {
869 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3, 872 DC_LOG_HW_RESUME_S3(
870 "%s: Skipping Panel Power action: %s\n", 873 "%s: Skipping Panel Power action: %s\n",
871 __func__, (power_up ? "On":"Off")); 874 __func__, (power_up ? "On":"Off"));
872 } 875 }
@@ -892,7 +895,7 @@ void hwss_edp_backlight_control(
892 } 895 }
893 896
894 if (enable && is_panel_backlight_on(hws)) { 897 if (enable && is_panel_backlight_on(hws)) {
895 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3, 898 DC_LOG_HW_RESUME_S3(
896 "%s: panel already powered up. Do nothing.\n", 899 "%s: panel already powered up. Do nothing.\n",
897 __func__); 900 __func__);
898 return; 901 return;
@@ -900,7 +903,7 @@ void hwss_edp_backlight_control(
900 903
901 /* Send VBIOS command to control eDP panel backlight */ 904 /* Send VBIOS command to control eDP panel backlight */
902 905
903 dm_logger_write(ctx->logger, LOG_HW_RESUME_S3, 906 DC_LOG_HW_RESUME_S3(
904 "%s: backlight action: %s\n", 907 "%s: backlight action: %s\n",
905 __func__, (enable ? "On":"Off")); 908 __func__, (enable ? "On":"Off"));
906 909
@@ -914,6 +917,7 @@ void hwss_edp_backlight_control(
914 /*todo: unhardcode*/ 917 /*todo: unhardcode*/
915 cntl.lanes_number = LANE_COUNT_FOUR; 918 cntl.lanes_number = LANE_COUNT_FOUR;
916 cntl.hpd_sel = link->link_enc->hpd_source; 919 cntl.hpd_sel = link->link_enc->hpd_source;
920 cntl.signal = SIGNAL_TYPE_EDP;
917 921
918 /* For eDP, the following delays might need to be considered 922 /* For eDP, the following delays might need to be considered
919 * after link training completed: 923 * after link training completed:
@@ -926,7 +930,13 @@ void hwss_edp_backlight_control(
926 * Enable it in the future if necessary. 930 * Enable it in the future if necessary.
927 */ 931 */
928 /* dc_service_sleep_in_milliseconds(50); */ 932 /* dc_service_sleep_in_milliseconds(50); */
933 /*edp 1.2*/
934 if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
935 edp_receiver_ready_T7(link);
929 link_transmitter_control(ctx->dc_bios, &cntl); 936 link_transmitter_control(ctx->dc_bios, &cntl);
937 /*edp 1.2*/
938 if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
939 edp_receiver_ready_T9(link);
930} 940}
931 941
932void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) 942void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
@@ -946,7 +956,11 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
946 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 956 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
947 pipe_ctx->stream_res.stream_enc, true); 957 pipe_ctx->stream_res.stream_enc, true);
948 if (pipe_ctx->stream_res.audio) { 958 if (pipe_ctx->stream_res.audio) {
949 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 959 if (option != KEEP_ACQUIRED_RESOURCE ||
960 !dc->debug.az_endpoint_mute_only) {
961 /*only disalbe az_endpoint if power down or free*/
962 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
963 }
950 964
951 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 965 if (dc_is_dp_signal(pipe_ctx->stream->signal))
952 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( 966 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
@@ -969,9 +983,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
969 */ 983 */
970 } 984 }
971 985
972 /* blank at encoder level */
973 if (dc_is_dp_signal(pipe_ctx->stream->signal))
974 pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
975 986
976 link->link_enc->funcs->connect_dig_be_to_fe( 987 link->link_enc->funcs->connect_dig_be_to_fe(
977 link->link_enc, 988 link->link_enc,
@@ -984,12 +995,32 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
984 struct dc_link_settings *link_settings) 995 struct dc_link_settings *link_settings)
985{ 996{
986 struct encoder_unblank_param params = { { 0 } }; 997 struct encoder_unblank_param params = { { 0 } };
998 struct dc_stream_state *stream = pipe_ctx->stream;
999 struct dc_link *link = stream->sink->link;
987 1000
988 /* only 3 items below are used by unblank */ 1001 /* only 3 items below are used by unblank */
989 params.pixel_clk_khz = 1002 params.pixel_clk_khz =
990 pipe_ctx->stream->timing.pix_clk_khz; 1003 pipe_ctx->stream->timing.pix_clk_khz;
991 params.link_settings.link_rate = link_settings->link_rate; 1004 params.link_settings.link_rate = link_settings->link_rate;
992 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params); 1005
1006 if (dc_is_dp_signal(pipe_ctx->stream->signal))
1007 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
1008
1009 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1010 link->dc->hwss.edp_backlight_control(link, true);
1011 stream->bl_pwm_level = 0;
1012 }
1013}
1014void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
1015{
1016 struct dc_stream_state *stream = pipe_ctx->stream;
1017 struct dc_link *link = stream->sink->link;
1018
1019 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1020 link->dc->hwss.edp_backlight_control(link, false);
1021
1022 if (dc_is_dp_signal(pipe_ctx->stream->signal))
1023 pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
993} 1024}
994 1025
995 1026
@@ -1091,7 +1122,7 @@ static void build_audio_output(
1091 1122
1092 audio_output->pll_info.dto_source = 1123 audio_output->pll_info.dto_source =
1093 translate_to_dto_source( 1124 translate_to_dto_source(
1094 pipe_ctx->pipe_idx + 1); 1125 pipe_ctx->stream_res.tg->inst + 1);
1095 1126
1096 /* TODO hard code to enable for now. Need get from stream */ 1127 /* TODO hard code to enable for now. Need get from stream */
1097 audio_output->pll_info.ss_enabled = true; 1128 audio_output->pll_info.ss_enabled = true;
@@ -1103,7 +1134,7 @@ static void build_audio_output(
1103static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx, 1134static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
1104 struct tg_color *color) 1135 struct tg_color *color)
1105{ 1136{
1106 uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4; 1137 uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
1107 1138
1108 switch (pipe_ctx->plane_res.scl_data.format) { 1139 switch (pipe_ctx->plane_res.scl_data.format) {
1109 case PIXEL_FORMAT_ARGB8888: 1140 case PIXEL_FORMAT_ARGB8888:
@@ -1300,10 +1331,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
1300 1331
1301 resource_build_info_frame(pipe_ctx); 1332 resource_build_info_frame(pipe_ctx);
1302 dce110_update_info_frame(pipe_ctx); 1333 dce110_update_info_frame(pipe_ctx);
1303 if (!pipe_ctx_old->stream) { 1334 if (!pipe_ctx_old->stream)
1304 if (!pipe_ctx->stream->dpms_off) 1335 core_link_enable_stream(context, pipe_ctx);
1305 core_link_enable_stream(context, pipe_ctx);
1306 }
1307 1336
1308 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; 1337 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
1309 1338
@@ -1407,6 +1436,31 @@ static void disable_vga_and_power_gate_all_controllers(
1407 } 1436 }
1408} 1437}
1409 1438
1439static struct dc_link *get_link_for_edp_not_in_use(
1440 struct dc *dc,
1441 struct dc_state *context)
1442{
1443 int i;
1444 struct dc_link *link = NULL;
1445
1446 /* check if eDP panel is suppose to be set mode, if yes, no need to disable */
1447 for (i = 0; i < context->stream_count; i++) {
1448 if (context->streams[i]->signal == SIGNAL_TYPE_EDP)
1449 return NULL;
1450 }
1451
1452 /* check if there is an eDP panel not in use */
1453 for (i = 0; i < dc->link_count; i++) {
1454 if (dc->links[i]->local_sink &&
1455 dc->links[i]->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1456 link = dc->links[i];
1457 break;
1458 }
1459 }
1460
1461 return link;
1462}
1463
1410/** 1464/**
1411 * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need: 1465 * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
1412 * 1. Power down all DC HW blocks 1466 * 1. Power down all DC HW blocks
@@ -1414,11 +1468,37 @@ static void disable_vga_and_power_gate_all_controllers(
1414 * 3. Enable power gating for controller 1468 * 3. Enable power gating for controller
1415 * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS) 1469 * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS)
1416 */ 1470 */
1417void dce110_enable_accelerated_mode(struct dc *dc) 1471void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
1418{ 1472{
1419 power_down_all_hw_blocks(dc); 1473 struct dc_bios *dcb = dc->ctx->dc_bios;
1420 1474
1421 disable_vga_and_power_gate_all_controllers(dc); 1475 /* vbios already light up eDP, so we can leverage vbios and skip eDP
1476 * programming
1477 */
1478 bool can_eDP_fast_boot_optimize =
1479 (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
1480
1481 /* if OS doesn't light up eDP and eDP link is available, we want to disable */
1482 struct dc_link *edp_link_to_turnoff = NULL;
1483
1484 if (can_eDP_fast_boot_optimize) {
1485 edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
1486
1487 if (!edp_link_to_turnoff)
1488 dc->apply_edp_fast_boot_optimization = true;
1489 }
1490
1491 if (!dc->apply_edp_fast_boot_optimization) {
1492 if (edp_link_to_turnoff) {
1493 /*turn off backlight before DP_blank and encoder powered down*/
1494 dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
1495 }
1496 /*resume from S3, no vbios posting, no need to power down again*/
1497 power_down_all_hw_blocks(dc);
1498 disable_vga_and_power_gate_all_controllers(dc);
1499 if (edp_link_to_turnoff)
1500 dc->hwss.edp_power_control(edp_link_to_turnoff, false);
1501 }
1422 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios); 1502 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
1423} 1503}
1424 1504
@@ -1439,7 +1519,7 @@ static uint32_t compute_pstate_blackout_duration(
1439 return total_dest_line_time_ns; 1519 return total_dest_line_time_ns;
1440} 1520}
1441 1521
1442void dce110_set_displaymarks( 1522static void dce110_set_displaymarks(
1443 const struct dc *dc, 1523 const struct dc *dc,
1444 struct dc_state *context) 1524 struct dc_state *context)
1445{ 1525{
@@ -1553,6 +1633,8 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
1553 value |= 0x80; 1633 value |= 0x80;
1554 if (events->cursor_update) 1634 if (events->cursor_update)
1555 value |= 0x2; 1635 value |= 0x2;
1636 if (events->force_trigger)
1637 value |= 0x1;
1556 1638
1557#if defined(CONFIG_DRM_AMD_DC_FBC) 1639#if defined(CONFIG_DRM_AMD_DC_FBC)
1558 value |= 0x84; 1640 value |= 0x84;
@@ -1690,9 +1772,13 @@ static void apply_min_clocks(
1690 * Check if FBC can be enabled 1772 * Check if FBC can be enabled
1691 */ 1773 */
1692static bool should_enable_fbc(struct dc *dc, 1774static bool should_enable_fbc(struct dc *dc,
1693 struct dc_state *context) 1775 struct dc_state *context,
1776 uint32_t *pipe_idx)
1694{ 1777{
1695 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1778 uint32_t i;
1779 struct pipe_ctx *pipe_ctx = NULL;
1780 struct resource_context *res_ctx = &context->res_ctx;
1781
1696 1782
1697 ASSERT(dc->fbc_compressor); 1783 ASSERT(dc->fbc_compressor);
1698 1784
@@ -1704,6 +1790,14 @@ static bool should_enable_fbc(struct dc *dc,
1704 if (context->stream_count != 1) 1790 if (context->stream_count != 1)
1705 return false; 1791 return false;
1706 1792
1793 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1794 if (res_ctx->pipe_ctx[i].stream) {
1795 pipe_ctx = &res_ctx->pipe_ctx[i];
1796 *pipe_idx = i;
1797 break;
1798 }
1799 }
1800
1707 /* Only supports eDP */ 1801 /* Only supports eDP */
1708 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) 1802 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
1709 return false; 1803 return false;
@@ -1729,11 +1823,14 @@ static bool should_enable_fbc(struct dc *dc,
1729static void enable_fbc(struct dc *dc, 1823static void enable_fbc(struct dc *dc,
1730 struct dc_state *context) 1824 struct dc_state *context)
1731{ 1825{
1732 if (should_enable_fbc(dc, context)) { 1826 uint32_t pipe_idx = 0;
1827
1828 if (should_enable_fbc(dc, context, &pipe_idx)) {
1733 /* Program GRPH COMPRESSED ADDRESS and PITCH */ 1829 /* Program GRPH COMPRESSED ADDRESS and PITCH */
1734 struct compr_addr_and_pitch_params params = {0, 0, 0}; 1830 struct compr_addr_and_pitch_params params = {0, 0, 0};
1735 struct compressor *compr = dc->fbc_compressor; 1831 struct compressor *compr = dc->fbc_compressor;
1736 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1832 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
1833
1737 1834
1738 params.source_view_width = pipe_ctx->stream->timing.h_addressable; 1835 params.source_view_width = pipe_ctx->stream->timing.h_addressable;
1739 params.source_view_height = pipe_ctx->stream->timing.v_addressable; 1836 params.source_view_height = pipe_ctx->stream->timing.v_addressable;
@@ -1748,36 +1845,6 @@ static void enable_fbc(struct dc *dc,
1748} 1845}
1749#endif 1846#endif
1750 1847
1751static enum dc_status apply_ctx_to_hw_fpga(
1752 struct dc *dc,
1753 struct dc_state *context)
1754{
1755 enum dc_status status = DC_ERROR_UNEXPECTED;
1756 int i;
1757
1758 for (i = 0; i < MAX_PIPES; i++) {
1759 struct pipe_ctx *pipe_ctx_old =
1760 &dc->current_state->res_ctx.pipe_ctx[i];
1761 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1762
1763 if (pipe_ctx->stream == NULL)
1764 continue;
1765
1766 if (pipe_ctx->stream == pipe_ctx_old->stream)
1767 continue;
1768
1769 status = apply_single_controller_ctx_to_hw(
1770 pipe_ctx,
1771 context,
1772 dc);
1773
1774 if (status != DC_OK)
1775 return status;
1776 }
1777
1778 return DC_OK;
1779}
1780
1781static void dce110_reset_hw_ctx_wrap( 1848static void dce110_reset_hw_ctx_wrap(
1782 struct dc *dc, 1849 struct dc *dc,
1783 struct dc_state *context) 1850 struct dc_state *context)
@@ -1847,11 +1914,6 @@ enum dc_status dce110_apply_ctx_to_hw(
1847 if (context->stream_count <= 0) 1914 if (context->stream_count <= 0)
1848 return DC_OK; 1915 return DC_OK;
1849 1916
1850 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1851 apply_ctx_to_hw_fpga(dc, context);
1852 return DC_OK;
1853 }
1854
1855 /* Apply new context */ 1917 /* Apply new context */
1856 dcb->funcs->set_scratch_critical_state(dcb, true); 1918 dcb->funcs->set_scratch_critical_state(dcb, true);
1857 1919
@@ -2050,9 +2112,6 @@ enum dc_status dce110_apply_ctx_to_hw(
2050 return status; 2112 return status;
2051 } 2113 }
2052 2114
2053 /* pplib is notified if disp_num changed */
2054 dc->hwss.set_bandwidth(dc, context, true);
2055
2056 /* to save power */ 2115 /* to save power */
2057 apply_min_clocks(dc, context, &clocks_state, false); 2116 apply_min_clocks(dc, context, &clocks_state, false);
2058 2117
@@ -2134,13 +2193,14 @@ static void program_surface_visibility(const struct dc *dc,
2134 } else if (!pipe_ctx->plane_state->visible) 2193 } else if (!pipe_ctx->plane_state->visible)
2135 blank_target = true; 2194 blank_target = true;
2136 2195
2137 dce_set_blender_mode(dc->hwseq, pipe_ctx->pipe_idx, blender_mode); 2196 dce_set_blender_mode(dc->hwseq, pipe_ctx->stream_res.tg->inst, blender_mode);
2138 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target); 2197 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target);
2139 2198
2140} 2199}
2141 2200
2142static void program_gamut_remap(struct pipe_ctx *pipe_ctx) 2201static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
2143{ 2202{
2203 int i = 0;
2144 struct xfm_grph_csc_adjustment adjust; 2204 struct xfm_grph_csc_adjustment adjust;
2145 memset(&adjust, 0, sizeof(adjust)); 2205 memset(&adjust, 0, sizeof(adjust));
2146 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 2206 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -2148,33 +2208,10 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
2148 2208
2149 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { 2209 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2150 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 2210 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2151 adjust.temperature_matrix[0] = 2211
2152 pipe_ctx->stream-> 2212 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2153 gamut_remap_matrix.matrix[0]; 2213 adjust.temperature_matrix[i] =
2154 adjust.temperature_matrix[1] = 2214 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2155 pipe_ctx->stream->
2156 gamut_remap_matrix.matrix[1];
2157 adjust.temperature_matrix[2] =
2158 pipe_ctx->stream->
2159 gamut_remap_matrix.matrix[2];
2160 adjust.temperature_matrix[3] =
2161 pipe_ctx->stream->
2162 gamut_remap_matrix.matrix[4];
2163 adjust.temperature_matrix[4] =
2164 pipe_ctx->stream->
2165 gamut_remap_matrix.matrix[5];
2166 adjust.temperature_matrix[5] =
2167 pipe_ctx->stream->
2168 gamut_remap_matrix.matrix[6];
2169 adjust.temperature_matrix[6] =
2170 pipe_ctx->stream->
2171 gamut_remap_matrix.matrix[8];
2172 adjust.temperature_matrix[7] =
2173 pipe_ctx->stream->
2174 gamut_remap_matrix.matrix[9];
2175 adjust.temperature_matrix[8] =
2176 pipe_ctx->stream->
2177 gamut_remap_matrix.matrix[10];
2178 } 2215 }
2179 2216
2180 pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); 2217 pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
@@ -2198,7 +2235,7 @@ static void set_plane_config(
2198 memset(&tbl_entry, 0, sizeof(tbl_entry)); 2235 memset(&tbl_entry, 0, sizeof(tbl_entry));
2199 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 2236 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2200 2237
2201 dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true); 2238 dce_enable_fe_clock(dc->hwseq, mi->inst, true);
2202 2239
2203 set_default_colors(pipe_ctx); 2240 set_default_colors(pipe_ctx);
2204 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { 2241 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
@@ -2215,33 +2252,10 @@ static void set_plane_config(
2215 2252
2216 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { 2253 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2217 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 2254 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2218 adjust.temperature_matrix[0] = 2255
2219 pipe_ctx->stream-> 2256 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2220 gamut_remap_matrix.matrix[0]; 2257 adjust.temperature_matrix[i] =
2221 adjust.temperature_matrix[1] = 2258 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2222 pipe_ctx->stream->
2223 gamut_remap_matrix.matrix[1];
2224 adjust.temperature_matrix[2] =
2225 pipe_ctx->stream->
2226 gamut_remap_matrix.matrix[2];
2227 adjust.temperature_matrix[3] =
2228 pipe_ctx->stream->
2229 gamut_remap_matrix.matrix[4];
2230 adjust.temperature_matrix[4] =
2231 pipe_ctx->stream->
2232 gamut_remap_matrix.matrix[5];
2233 adjust.temperature_matrix[5] =
2234 pipe_ctx->stream->
2235 gamut_remap_matrix.matrix[6];
2236 adjust.temperature_matrix[6] =
2237 pipe_ctx->stream->
2238 gamut_remap_matrix.matrix[8];
2239 adjust.temperature_matrix[7] =
2240 pipe_ctx->stream->
2241 gamut_remap_matrix.matrix[9];
2242 adjust.temperature_matrix[8] =
2243 pipe_ctx->stream->
2244 gamut_remap_matrix.matrix[10];
2245 } 2259 }
2246 2260
2247 pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); 2261 pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
@@ -2286,7 +2300,7 @@ static void update_plane_addr(const struct dc *dc,
2286 plane_state->status.requested_address = plane_state->address; 2300 plane_state->status.requested_address = plane_state->address;
2287} 2301}
2288 2302
2289void dce110_update_pending_status(struct pipe_ctx *pipe_ctx) 2303static void dce110_update_pending_status(struct pipe_ctx *pipe_ctx)
2290{ 2304{
2291 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2305 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2292 2306
@@ -2527,7 +2541,7 @@ void dce110_fill_display_configs(
2527 2541
2528 num_cfgs++; 2542 num_cfgs++;
2529 cfg->signal = pipe_ctx->stream->signal; 2543 cfg->signal = pipe_ctx->stream->signal;
2530 cfg->pipe_idx = pipe_ctx->pipe_idx; 2544 cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
2531 cfg->src_height = stream->src.height; 2545 cfg->src_height = stream->src.height;
2532 cfg->src_width = stream->src.width; 2546 cfg->src_width = stream->src.width;
2533 cfg->ddi_channel_mapping = 2547 cfg->ddi_channel_mapping =
@@ -2680,9 +2694,8 @@ static void dce110_program_front_end_for_pipe(
2680 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2694 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2681 struct xfm_grph_csc_adjustment adjust; 2695 struct xfm_grph_csc_adjustment adjust;
2682 struct out_csc_color_matrix tbl_entry; 2696 struct out_csc_color_matrix tbl_entry;
2683 struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
2684 unsigned int i; 2697 unsigned int i;
2685 2698 struct dc_context *ctx = dc->ctx;
2686 memset(&tbl_entry, 0, sizeof(tbl_entry)); 2699 memset(&tbl_entry, 0, sizeof(tbl_entry));
2687 2700
2688 if (dc->current_state) 2701 if (dc->current_state)
@@ -2691,7 +2704,7 @@ static void dce110_program_front_end_for_pipe(
2691 memset(&adjust, 0, sizeof(adjust)); 2704 memset(&adjust, 0, sizeof(adjust));
2692 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 2705 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2693 2706
2694 dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true); 2707 dce_enable_fe_clock(dc->hwseq, mi->inst, true);
2695 2708
2696 set_default_colors(pipe_ctx); 2709 set_default_colors(pipe_ctx);
2697 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment 2710 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
@@ -2709,33 +2722,10 @@ static void dce110_program_front_end_for_pipe(
2709 2722
2710 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { 2723 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2711 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 2724 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2712 adjust.temperature_matrix[0] = 2725
2713 pipe_ctx->stream-> 2726 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2714 gamut_remap_matrix.matrix[0]; 2727 adjust.temperature_matrix[i] =
2715 adjust.temperature_matrix[1] = 2728 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2716 pipe_ctx->stream->
2717 gamut_remap_matrix.matrix[1];
2718 adjust.temperature_matrix[2] =
2719 pipe_ctx->stream->
2720 gamut_remap_matrix.matrix[2];
2721 adjust.temperature_matrix[3] =
2722 pipe_ctx->stream->
2723 gamut_remap_matrix.matrix[4];
2724 adjust.temperature_matrix[4] =
2725 pipe_ctx->stream->
2726 gamut_remap_matrix.matrix[5];
2727 adjust.temperature_matrix[5] =
2728 pipe_ctx->stream->
2729 gamut_remap_matrix.matrix[6];
2730 adjust.temperature_matrix[6] =
2731 pipe_ctx->stream->
2732 gamut_remap_matrix.matrix[8];
2733 adjust.temperature_matrix[7] =
2734 pipe_ctx->stream->
2735 gamut_remap_matrix.matrix[9];
2736 adjust.temperature_matrix[8] =
2737 pipe_ctx->stream->
2738 gamut_remap_matrix.matrix[10];
2739 } 2729 }
2740 2730
2741 pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); 2731 pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
@@ -2772,12 +2762,15 @@ static void dce110_program_front_end_for_pipe(
2772 plane_state->rotation); 2762 plane_state->rotation);
2773 2763
2774 /* Moved programming gamma from dc to hwss */ 2764 /* Moved programming gamma from dc to hwss */
2775 if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) { 2765 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2766 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2767 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2776 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); 2768 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
2769
2770 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2777 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); 2771 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
2778 }
2779 2772
2780 dm_logger_write(dc->ctx->logger, LOG_SURFACE, 2773 DC_LOG_SURFACE(
2781 "Pipe:%d 0x%x: addr hi:0x%x, " 2774 "Pipe:%d 0x%x: addr hi:0x%x, "
2782 "addr low:0x%x, " 2775 "addr low:0x%x, "
2783 "src: %d, %d, %d," 2776 "src: %d, %d, %d,"
@@ -2800,7 +2793,7 @@ static void dce110_program_front_end_for_pipe(
2800 pipe_ctx->plane_state->clip_rect.width, 2793 pipe_ctx->plane_state->clip_rect.width,
2801 pipe_ctx->plane_state->clip_rect.height); 2794 pipe_ctx->plane_state->clip_rect.height);
2802 2795
2803 dm_logger_write(dc->ctx->logger, LOG_SURFACE, 2796 DC_LOG_SURFACE(
2804 "Pipe %d: width, height, x, y\n" 2797 "Pipe %d: width, height, x, y\n"
2805 "viewport:%d, %d, %d, %d\n" 2798 "viewport:%d, %d, %d, %d\n"
2806 "recout: %d, %d, %d, %d\n", 2799 "recout: %d, %d, %d, %d\n",
@@ -2872,7 +2865,8 @@ static void dce110_apply_ctx_for_surface(
2872 2865
2873static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) 2866static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
2874{ 2867{
2875 int fe_idx = pipe_ctx->pipe_idx; 2868 int fe_idx = pipe_ctx->plane_res.mi ?
2869 pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx;
2876 2870
2877 /* Do not power down fe when stream is active on dce*/ 2871 /* Do not power down fe when stream is active on dce*/
2878 if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream) 2872 if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
@@ -2915,6 +2909,52 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
2915 } 2909 }
2916} 2910}
2917 2911
2912void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
2913{
2914 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2915 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
2916 struct mem_input *mi = pipe_ctx->plane_res.mi;
2917 struct dc_cursor_mi_param param = {
2918 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2919 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2920 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2921 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2922 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2923 };
2924
2925 if (pipe_ctx->plane_state->address.type
2926 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2927 pos_cpy.enable = false;
2928
2929 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2930 pos_cpy.enable = false;
2931
2932 if (ipp->funcs->ipp_cursor_set_position)
2933 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
2934 if (mi->funcs->set_cursor_position)
2935 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
2936}
2937
2938void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2939{
2940 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2941
2942 if (pipe_ctx->plane_res.ipp &&
2943 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
2944 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
2945 pipe_ctx->plane_res.ipp, attributes);
2946
2947 if (pipe_ctx->plane_res.mi &&
2948 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
2949 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
2950 pipe_ctx->plane_res.mi, attributes);
2951
2952 if (pipe_ctx->plane_res.xfm &&
2953 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
2954 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
2955 pipe_ctx->plane_res.xfm, attributes);
2956}
2957
2918static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} 2958static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
2919 2959
2920static void optimize_shared_resources(struct dc *dc) {} 2960static void optimize_shared_resources(struct dc *dc) {}
@@ -2938,6 +2978,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2938 .enable_stream = dce110_enable_stream, 2978 .enable_stream = dce110_enable_stream,
2939 .disable_stream = dce110_disable_stream, 2979 .disable_stream = dce110_disable_stream,
2940 .unblank_stream = dce110_unblank_stream, 2980 .unblank_stream = dce110_unblank_stream,
2981 .blank_stream = dce110_blank_stream,
2941 .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, 2982 .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
2942 .enable_display_power_gating = dce110_enable_display_power_gating, 2983 .enable_display_power_gating = dce110_enable_display_power_gating,
2943 .disable_plane = dce110_power_down_fe, 2984 .disable_plane = dce110_power_down_fe,
@@ -2957,6 +2998,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2957 .edp_backlight_control = hwss_edp_backlight_control, 2998 .edp_backlight_control = hwss_edp_backlight_control,
2958 .edp_power_control = hwss_edp_power_control, 2999 .edp_power_control = hwss_edp_power_control,
2959 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 3000 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
3001 .set_cursor_position = dce110_set_cursor_position,
3002 .set_cursor_attribute = dce110_set_cursor_attribute
2960}; 3003};
2961 3004
2962void dce110_hw_sequencer_construct(struct dc *dc) 3005void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index fc637647f643..5d7e9f516827 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -39,11 +39,7 @@ enum dc_status dce110_apply_ctx_to_hw(
39 struct dc *dc, 39 struct dc *dc,
40 struct dc_state *context); 40 struct dc_state *context);
41 41
42void dce110_set_display_clock(struct dc_state *context);
43 42
44void dce110_set_displaymarks(
45 const struct dc *dc,
46 struct dc_state *context);
47 43
48void dce110_enable_stream(struct pipe_ctx *pipe_ctx); 44void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
49 45
@@ -52,15 +48,14 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
52void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, 48void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
53 struct dc_link_settings *link_settings); 49 struct dc_link_settings *link_settings);
54 50
51void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
55void dce110_update_info_frame(struct pipe_ctx *pipe_ctx); 52void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
56 53
57void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); 54void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
58void dce110_enable_accelerated_mode(struct dc *dc); 55void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
59 56
60void dce110_power_down(struct dc *dc); 57void dce110_power_down(struct dc *dc);
61 58
62void dce110_update_pending_status(struct pipe_ctx *pipe_ctx);
63
64void dce110_fill_display_configs( 59void dce110_fill_display_configs(
65 const struct dc_state *context, 60 const struct dc_state *context,
66 struct dm_pp_display_configuration *pp_display_cfg); 61 struct dm_pp_display_configuration *pp_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
index feb397b5c1a3..4245e1f818a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -727,7 +727,7 @@ void dce110_opp_v_set_csc_adjustment(
727 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC; 727 CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
728 728
729 program_color_matrix_v( 729 program_color_matrix_v(
730 xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW); 730 xfm_dce, tbl_entry, GRPH_COLOR_MATRIX_SW);
731 731
732 /* We did everything ,now program DxOUTPUT_CSC_CONTROL */ 732 /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
733 configure_graphics_mode_v(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW, 733 configure_graphics_mode_v(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c4779578fb7..b1f14be20fdf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -52,6 +52,8 @@
52#include "dce/dce_abm.h" 52#include "dce/dce_abm.h"
53#include "dce/dce_dmcu.h" 53#include "dce/dce_dmcu.h"
54 54
55#define DC_LOGGER \
56 dc->ctx->logger
55#if defined(CONFIG_DRM_AMD_DC_FBC) 57#if defined(CONFIG_DRM_AMD_DC_FBC)
56#include "dce110/dce110_compressor.h" 58#include "dce110/dce110_compressor.h"
57#endif 59#endif
@@ -700,7 +702,7 @@ static void get_pixel_clock_parameters(
700 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; 702 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
701 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; 703 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
702 pixel_clk_params->signal_type = pipe_ctx->stream->signal; 704 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
703 pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1; 705 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
704 /* TODO: un-hardcode*/ 706 /* TODO: un-hardcode*/
705 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * 707 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
706 LINK_RATE_REF_FREQ_IN_KHZ; 708 LINK_RATE_REF_FREQ_IN_KHZ;
@@ -771,8 +773,7 @@ static bool dce110_validate_bandwidth(
771{ 773{
772 bool result = false; 774 bool result = false;
773 775
774 dm_logger_write( 776 DC_LOG_BANDWIDTH_CALCS(
775 dc->ctx->logger, LOG_BANDWIDTH_CALCS,
776 "%s: start", 777 "%s: start",
777 __func__); 778 __func__);
778 779
@@ -786,8 +787,7 @@ static bool dce110_validate_bandwidth(
786 result = true; 787 result = true;
787 788
788 if (!result) 789 if (!result)
789 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION, 790 DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n",
790 "%s: %dx%d@%d Bandwidth validation failed!\n",
791 __func__, 791 __func__,
792 context->streams[0]->timing.h_addressable, 792 context->streams[0]->timing.h_addressable,
793 context->streams[0]->timing.v_addressable, 793 context->streams[0]->timing.v_addressable,
@@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
846 return result; 846 return result;
847} 847}
848 848
849enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
850 struct dc_caps *caps)
851{
852 if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
853 ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
854 return DC_FAIL_SURFACE_VALIDATE;
855
856 return DC_OK;
857}
858
849static bool dce110_validate_surface_sets( 859static bool dce110_validate_surface_sets(
850 struct dc_state *context) 860 struct dc_state *context)
851{ 861{
@@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
869 plane->src_rect.height > 1080)) 879 plane->src_rect.height > 1080))
870 return false; 880 return false;
871 881
882 /* we don't have the logic to support underlay
883 * only yet so block the use case where we get
884 * NV12 plane as top layer
885 */
886 if (j == 0)
887 return false;
888
872 /* irrespective of plane format, 889 /* irrespective of plane format,
873 * stream should be RGB encoded 890 * stream should be RGB encoded
874 */ 891 */
@@ -973,7 +990,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
973 990
974 dc->hwss.enable_display_power_gating( 991 dc->hwss.enable_display_power_gating(
975 dc, 992 dc,
976 pipe_ctx->pipe_idx, 993 pipe_ctx->stream_res.tg->inst,
977 dcb, PIPE_GATING_CONTROL_DISABLE); 994 dcb, PIPE_GATING_CONTROL_DISABLE);
978 995
979 /* 996 /*
@@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
1021 .link_enc_create = dce110_link_encoder_create, 1038 .link_enc_create = dce110_link_encoder_create,
1022 .validate_guaranteed = dce110_validate_guaranteed, 1039 .validate_guaranteed = dce110_validate_guaranteed,
1023 .validate_bandwidth = dce110_validate_bandwidth, 1040 .validate_bandwidth = dce110_validate_bandwidth,
1041 .validate_plane = dce110_validate_plane,
1024 .acquire_idle_pipe_for_layer = dce110_acquire_underlay, 1042 .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
1025 .add_stream_to_ctx = dce110_add_stream_to_ctx, 1043 .add_stream_to_ctx = dce110_add_stream_to_ctx,
1026 .validate_global = dce110_validate_global 1044 .validate_global = dce110_validate_global
@@ -1152,7 +1170,7 @@ static bool construct(
1152 1170
1153 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1171 pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
1154 pool->base.underlay_pipe_index = pool->base.pipe_count; 1172 pool->base.underlay_pipe_index = pool->base.pipe_count;
1155 1173 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
1156 dc->caps.max_downscale_ratio = 150; 1174 dc->caps.max_downscale_ratio = 150;
1157 dc->caps.i2c_speed_in_khz = 100; 1175 dc->caps.i2c_speed_in_khz = 100;
1158 dc->caps.max_cursor_size = 128; 1176 dc->caps.max_cursor_size = 128;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 25ca72139e5f..be7153924a70 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -2077,6 +2077,125 @@ bool dce110_arm_vert_intr(struct timing_generator *tg, uint8_t width)
2077 return true; 2077 return true;
2078} 2078}
2079 2079
2080static bool dce110_is_tg_enabled(struct timing_generator *tg)
2081{
2082 uint32_t addr = 0;
2083 uint32_t value = 0;
2084 uint32_t field = 0;
2085 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
2086
2087 addr = CRTC_REG(mmCRTC_CONTROL);
2088 value = dm_read_reg(tg->ctx, addr);
2089 field = get_reg_field_value(value, CRTC_CONTROL,
2090 CRTC_CURRENT_MASTER_EN_STATE);
2091 return field == 1;
2092}
2093
2094bool dce110_configure_crc(struct timing_generator *tg,
2095 const struct crc_params *params)
2096{
2097 uint32_t cntl_addr = 0;
2098 uint32_t addr = 0;
2099 uint32_t value;
2100 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
2101
2102 /* Cannot configure crc on a CRTC that is disabled */
2103 if (!dce110_is_tg_enabled(tg))
2104 return false;
2105
2106 cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL);
2107
2108 /* First, disable CRC before we configure it. */
2109 dm_write_reg(tg->ctx, cntl_addr, 0);
2110
2111 if (!params->enable)
2112 return true;
2113
2114 /* Program frame boundaries */
2115 /* Window A x axis start and end. */
2116 value = 0;
2117 addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
2118 set_reg_field_value(value, params->windowa_x_start,
2119 CRTC_CRC0_WINDOWA_X_CONTROL,
2120 CRTC_CRC0_WINDOWA_X_START);
2121 set_reg_field_value(value, params->windowa_x_end,
2122 CRTC_CRC0_WINDOWA_X_CONTROL,
2123 CRTC_CRC0_WINDOWA_X_END);
2124 dm_write_reg(tg->ctx, addr, value);
2125
2126 /* Window A y axis start and end. */
2127 value = 0;
2128 addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
2129 set_reg_field_value(value, params->windowa_y_start,
2130 CRTC_CRC0_WINDOWA_Y_CONTROL,
2131 CRTC_CRC0_WINDOWA_Y_START);
2132 set_reg_field_value(value, params->windowa_y_end,
2133 CRTC_CRC0_WINDOWA_Y_CONTROL,
2134 CRTC_CRC0_WINDOWA_Y_END);
2135 dm_write_reg(tg->ctx, addr, value);
2136
2137 /* Window B x axis start and end. */
2138 value = 0;
2139 addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
2140 set_reg_field_value(value, params->windowb_x_start,
2141 CRTC_CRC0_WINDOWB_X_CONTROL,
2142 CRTC_CRC0_WINDOWB_X_START);
2143 set_reg_field_value(value, params->windowb_x_end,
2144 CRTC_CRC0_WINDOWB_X_CONTROL,
2145 CRTC_CRC0_WINDOWB_X_END);
2146 dm_write_reg(tg->ctx, addr, value);
2147
2148 /* Window B y axis start and end. */
2149 value = 0;
2150 addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
2151 set_reg_field_value(value, params->windowb_y_start,
2152 CRTC_CRC0_WINDOWB_Y_CONTROL,
2153 CRTC_CRC0_WINDOWB_Y_START);
2154 set_reg_field_value(value, params->windowb_y_end,
2155 CRTC_CRC0_WINDOWB_Y_CONTROL,
2156 CRTC_CRC0_WINDOWB_Y_END);
2157 dm_write_reg(tg->ctx, addr, value);
2158
2159 /* Set crc mode and selection, and enable. Only using CRC0*/
2160 value = 0;
2161 set_reg_field_value(value, params->continuous_mode ? 1 : 0,
2162 CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
2163 set_reg_field_value(value, params->selection,
2164 CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
2165 set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
2166 dm_write_reg(tg->ctx, cntl_addr, value);
2167
2168 return true;
2169}
2170
2171bool dce110_get_crc(struct timing_generator *tg,
2172 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
2173{
2174 uint32_t addr = 0;
2175 uint32_t value = 0;
2176 uint32_t field = 0;
2177 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
2178
2179 addr = CRTC_REG(mmCRTC_CRC_CNTL);
2180 value = dm_read_reg(tg->ctx, addr);
2181 field = get_reg_field_value(value, CRTC_CRC_CNTL, CRTC_CRC_EN);
2182
2183 /* Early return if CRC is not enabled for this CRTC */
2184 if (!field)
2185 return false;
2186
2187 addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
2188 value = dm_read_reg(tg->ctx, addr);
2189 *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
2190 *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
2191
2192 addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
2193 value = dm_read_reg(tg->ctx, addr);
2194 *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
2195
2196 return true;
2197}
2198
2080static const struct timing_generator_funcs dce110_tg_funcs = { 2199static const struct timing_generator_funcs dce110_tg_funcs = {
2081 .validate_timing = dce110_tg_validate_timing, 2200 .validate_timing = dce110_tg_validate_timing,
2082 .program_timing = dce110_tg_program_timing, 2201 .program_timing = dce110_tg_program_timing,
@@ -2112,6 +2231,9 @@ static const struct timing_generator_funcs dce110_tg_funcs = {
2112 dce110_timing_generator_set_static_screen_control, 2231 dce110_timing_generator_set_static_screen_control,
2113 .set_test_pattern = dce110_timing_generator_set_test_pattern, 2232 .set_test_pattern = dce110_timing_generator_set_test_pattern,
2114 .arm_vert_intr = dce110_arm_vert_intr, 2233 .arm_vert_intr = dce110_arm_vert_intr,
2234 .is_tg_enabled = dce110_is_tg_enabled,
2235 .configure_crc = dce110_configure_crc,
2236 .get_crc = dce110_get_crc,
2115}; 2237};
2116 2238
2117void dce110_timing_generator_construct( 2239void dce110_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 232747c7c60b..734d4965dab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -276,4 +276,10 @@ void dce110_tg_set_colors(struct timing_generator *tg,
276bool dce110_arm_vert_intr( 276bool dce110_arm_vert_intr(
277 struct timing_generator *tg, uint8_t width); 277 struct timing_generator *tg, uint8_t width);
278 278
279bool dce110_configure_crc(struct timing_generator *tg,
280 const struct crc_params *params);
281
282bool dce110_get_crc(struct timing_generator *tg,
283 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
284
279#endif /* __DC_TIMING_GENERATOR_DCE110_H__ */ 285#endif /* __DC_TIMING_GENERATOR_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 59b4cd329715..8ad04816e7d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -38,6 +38,8 @@
38 38
39#include "timing_generator.h" 39#include "timing_generator.h"
40 40
41#define DC_LOGGER \
42 tg->ctx->logger
41/** ******************************************************************************** 43/** ********************************************************************************
42 * 44 *
43 * DCE11 Timing Generator Implementation 45 * DCE11 Timing Generator Implementation
@@ -606,8 +608,7 @@ static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_gener
606static bool dce110_timing_generator_v_did_triggered_reset_occur( 608static bool dce110_timing_generator_v_did_triggered_reset_occur(
607 struct timing_generator *tg) 609 struct timing_generator *tg)
608{ 610{
609 dm_logger_write(tg->ctx->logger, LOG_ERROR, 611 DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
610 "Timing Sync not supported on underlay pipe\n");
611 return false; 612 return false;
612} 613}
613 614
@@ -615,8 +616,7 @@ static void dce110_timing_generator_v_setup_global_swap_lock(
615 struct timing_generator *tg, 616 struct timing_generator *tg,
616 const struct dcp_gsl_params *gsl_params) 617 const struct dcp_gsl_params *gsl_params)
617{ 618{
618 dm_logger_write(tg->ctx->logger, LOG_ERROR, 619 DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
619 "Timing Sync not supported on underlay pipe\n");
620 return; 620 return;
621} 621}
622 622
@@ -624,24 +624,21 @@ static void dce110_timing_generator_v_enable_reset_trigger(
624 struct timing_generator *tg, 624 struct timing_generator *tg,
625 int source_tg_inst) 625 int source_tg_inst)
626{ 626{
627 dm_logger_write(tg->ctx->logger, LOG_ERROR, 627 DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
628 "Timing Sync not supported on underlay pipe\n");
629 return; 628 return;
630} 629}
631 630
632static void dce110_timing_generator_v_disable_reset_trigger( 631static void dce110_timing_generator_v_disable_reset_trigger(
633 struct timing_generator *tg) 632 struct timing_generator *tg)
634{ 633{
635 dm_logger_write(tg->ctx->logger, LOG_ERROR, 634 DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
636 "Timing Sync not supported on underlay pipe\n");
637 return; 635 return;
638} 636}
639 637
640static void dce110_timing_generator_v_tear_down_global_swap_lock( 638static void dce110_timing_generator_v_tear_down_global_swap_lock(
641 struct timing_generator *tg) 639 struct timing_generator *tg)
642{ 640{
643 dm_logger_write(tg->ctx->logger, LOG_ERROR, 641 DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
644 "Timing Sync not supported on underlay pipe\n");
645 return; 642 return;
646} 643}
647 644
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index 47390dc58306..8ba3c12fc608 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -30,6 +30,8 @@
30#include "dce/dce_11_0_sh_mask.h" 30#include "dce/dce_11_0_sh_mask.h"
31 31
32#define SCLV_PHASES 64 32#define SCLV_PHASES 64
33#define DC_LOGGER \
34 xfm->ctx->logger
33 35
34struct sclv_ratios_inits { 36struct sclv_ratios_inits {
35 uint32_t h_int_scale_ratio_luma; 37 uint32_t h_int_scale_ratio_luma;
@@ -670,8 +672,7 @@ static void dce110_xfmv_set_pixel_storage_depth(
670 if (!(xfm_dce->lb_pixel_depth_supported & depth)) { 672 if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
671 /*we should use unsupported capabilities 673 /*we should use unsupported capabilities
672 * unless it is required by w/a*/ 674 * unless it is required by w/a*/
673 dm_logger_write(xfm->ctx->logger, LOG_WARNING, 675 DC_LOG_WARNING("%s: Capability not supported",
674 "%s: Capability not supported",
675 __func__); 676 __func__);
676 } 677 }
677} 678}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
index 69649928768c..faae12cf7968 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -33,7 +33,8 @@
33#include "include/logger_interface.h" 33#include "include/logger_interface.h"
34 34
35#include "dce112_compressor.h" 35#include "dce112_compressor.h"
36 36#define DC_LOGGER \
37 cp110->base.ctx->logger
37#define DCP_REG(reg)\ 38#define DCP_REG(reg)\
38 (reg + cp110->offsets.dcp_offset) 39 (reg + cp110->offsets.dcp_offset)
39#define DMIF_REG(reg)\ 40#define DMIF_REG(reg)\
@@ -129,8 +130,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
129 LOW_POWER_TILING_NUM_PIPES); 130 LOW_POWER_TILING_NUM_PIPES);
130 break; 131 break;
131 default: 132 default:
132 dm_logger_write( 133 DC_LOG_WARNING(
133 cp110->base.ctx->logger, LOG_WARNING,
134 "%s: Invalid LPT NUM_PIPES!!!", 134 "%s: Invalid LPT NUM_PIPES!!!",
135 __func__); 135 __func__);
136 break; 136 break;
@@ -175,8 +175,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
175 LOW_POWER_TILING_NUM_BANKS); 175 LOW_POWER_TILING_NUM_BANKS);
176 break; 176 break;
177 default: 177 default:
178 dm_logger_write( 178 DC_LOG_WARNING(
179 cp110->base.ctx->logger, LOG_WARNING,
180 "%s: Invalid LPT NUM_BANKS!!!", 179 "%s: Invalid LPT NUM_BANKS!!!",
181 __func__); 180 __func__);
182 break; 181 break;
@@ -209,8 +208,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
209 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE); 208 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
210 break; 209 break;
211 default: 210 default:
212 dm_logger_write( 211 DC_LOG_WARNING(
213 cp110->base.ctx->logger, LOG_WARNING,
214 "%s: Invalid LPT INTERLEAVE_SIZE!!!", 212 "%s: Invalid LPT INTERLEAVE_SIZE!!!",
215 __func__); 213 __func__);
216 break; 214 break;
@@ -253,15 +251,13 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
253 LOW_POWER_TILING_ROW_SIZE); 251 LOW_POWER_TILING_ROW_SIZE);
254 break; 252 break;
255 default: 253 default:
256 dm_logger_write( 254 DC_LOG_WARNING(
257 cp110->base.ctx->logger, LOG_WARNING,
258 "%s: Invalid LPT ROW_SIZE!!!", 255 "%s: Invalid LPT ROW_SIZE!!!",
259 __func__); 256 __func__);
260 break; 257 break;
261 } 258 }
262 } else { 259 } else {
263 dm_logger_write( 260 DC_LOG_WARNING(
264 cp110->base.ctx->logger, LOG_WARNING,
265 "%s: LPT MC Configuration is not provided", 261 "%s: LPT MC Configuration is not provided",
266 __func__); 262 __func__);
267 } 263 }
@@ -311,8 +307,7 @@ static void wait_for_fbc_state_changed(
311 } 307 }
312 308
313 if (counter == 10) { 309 if (counter == 10) {
314 dm_logger_write( 310 DC_LOG_WARNING(
315 cp110->base.ctx->logger, LOG_WARNING,
316 "%s: wait counter exceeded, changes to HW not applied", 311 "%s: wait counter exceeded, changes to HW not applied",
317 __func__); 312 __func__);
318 } 313 }
@@ -525,8 +520,7 @@ void dce112_compressor_program_compressed_surface_address_and_pitch(
525 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1) 520 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
526 fbc_pitch = fbc_pitch / 8; 521 fbc_pitch = fbc_pitch / 8;
527 else 522 else
528 dm_logger_write( 523 DC_LOG_WARNING(
529 compressor->ctx->logger, LOG_WARNING,
530 "%s: Unexpected DCE11 compression ratio", 524 "%s: Unexpected DCE11 compression ratio",
531 __func__); 525 __func__);
532 526
@@ -690,8 +684,7 @@ void dce112_compressor_program_lpt_control(
690 LOW_POWER_TILING_MODE); 684 LOW_POWER_TILING_MODE);
691 break; 685 break;
692 default: 686 default:
693 dm_logger_write( 687 DC_LOG_WARNING(
694 compressor->ctx->logger, LOG_WARNING,
695 "%s: Invalid selected DRAM channels for LPT!!!", 688 "%s: Invalid selected DRAM channels for LPT!!!",
696 __func__); 689 __func__);
697 break; 690 break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 663e0a047a4b..cd1e3f72c44e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -56,6 +56,8 @@
56#include "dce/dce_11_2_sh_mask.h" 56#include "dce/dce_11_2_sh_mask.h"
57 57
58#include "dce100/dce100_resource.h" 58#include "dce100/dce100_resource.h"
59#define DC_LOGGER \
60 dc->ctx->logger
59 61
60#ifndef mmDP_DPHY_INTERNAL_CTRL 62#ifndef mmDP_DPHY_INTERNAL_CTRL
61 #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 63 #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
@@ -722,8 +724,7 @@ bool dce112_validate_bandwidth(
722{ 724{
723 bool result = false; 725 bool result = false;
724 726
725 dm_logger_write( 727 DC_LOG_BANDWIDTH_CALCS(
726 dc->ctx->logger, LOG_BANDWIDTH_CALCS,
727 "%s: start", 728 "%s: start",
728 __func__); 729 __func__);
729 730
@@ -737,7 +738,7 @@ bool dce112_validate_bandwidth(
737 result = true; 738 result = true;
738 739
739 if (!result) 740 if (!result)
740 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION, 741 DC_LOG_BANDWIDTH_VALIDATION(
741 "%s: Bandwidth validation failed!", 742 "%s: Bandwidth validation failed!",
742 __func__); 743 __func__);
743 744
@@ -1100,9 +1101,12 @@ static bool construct(
1100 *************************************************/ 1101 *************************************************/
1101 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1102 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
1102 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1103 pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
1104 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
1103 dc->caps.max_downscale_ratio = 200; 1105 dc->caps.max_downscale_ratio = 200;
1104 dc->caps.i2c_speed_in_khz = 100; 1106 dc->caps.i2c_speed_in_khz = 100;
1105 dc->caps.max_cursor_size = 128; 1107 dc->caps.max_cursor_size = 128;
1108 dc->caps.dual_link_dvi = true;
1109
1106 1110
1107 /************************************************* 1111 /*************************************************
1108 * Create resources * 1112 * Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 75d029742f96..e96ff86d2fc3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -33,7 +33,8 @@
33 33
34#include "dce/dce_12_0_offset.h" 34#include "dce/dce_12_0_offset.h"
35#include "dce/dce_12_0_sh_mask.h" 35#include "dce/dce_12_0_sh_mask.h"
36#include "soc15ip.h" 36#include "soc15_hw_ip.h"
37#include "vega10_ip_offset.h"
37#include "reg_helper.h" 38#include "reg_helper.h"
38 39
39#define CTX \ 40#define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 57cd67359567..4659a4bfabaa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -56,7 +56,8 @@
56 56
57#include "dce/dce_12_0_offset.h" 57#include "dce/dce_12_0_offset.h"
58#include "dce/dce_12_0_sh_mask.h" 58#include "dce/dce_12_0_sh_mask.h"
59#include "soc15ip.h" 59#include "soc15_hw_ip.h"
60#include "vega10_ip_offset.h"
60#include "nbio/nbio_6_1_offset.h" 61#include "nbio/nbio_6_1_offset.h"
61#include "reg_helper.h" 62#include "reg_helper.h"
62 63
@@ -830,11 +831,14 @@ static bool construct(
830 831
831 /* TODO: Fill more data from GreenlandAsicCapability.cpp */ 832 /* TODO: Fill more data from GreenlandAsicCapability.cpp */
832 pool->base.pipe_count = res_cap.num_timing_generator; 833 pool->base.pipe_count = res_cap.num_timing_generator;
834 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
833 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 835 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
834 836
835 dc->caps.max_downscale_ratio = 200; 837 dc->caps.max_downscale_ratio = 200;
836 dc->caps.i2c_speed_in_khz = 100; 838 dc->caps.i2c_speed_in_khz = 100;
837 dc->caps.max_cursor_size = 128; 839 dc->caps.max_cursor_size = 128;
840 dc->caps.dual_link_dvi = true;
841
838 dc->debug = debug_defaults; 842 dc->debug = debug_defaults;
839 843
840 /************************************************* 844 /*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 0aa60e5727e0..7bee78172d85 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -27,7 +27,8 @@
27 27
28#include "dce/dce_12_0_offset.h" 28#include "dce/dce_12_0_offset.h"
29#include "dce/dce_12_0_sh_mask.h" 29#include "dce/dce_12_0_sh_mask.h"
30#include "soc15ip.h" 30#include "soc15_hw_ip.h"
31#include "vega10_ip_offset.h"
31 32
32#include "dc_types.h" 33#include "dc_types.h"
33#include "dc_bios_types.h" 34#include "dc_bios_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index bc388aa4b2f5..666fcb2bdbba 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,7 +23,7 @@
23# Makefile for the 'controller' sub-component of DAL. 23# Makefile for the 'controller' sub-component of DAL.
24# It provides the control and status of HW CRTC block. 24# It provides the control and status of HW CRTC block.
25 25
26DCE80 = dce80_timing_generator.o dce80_compressor.o dce80_hw_sequencer.o \ 26DCE80 = dce80_timing_generator.o dce80_hw_sequencer.o \
27 dce80_resource.o 27 dce80_resource.o
28 28
29AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80)) 29AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
deleted file mode 100644
index 951f2caba9b3..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
+++ /dev/null
@@ -1,834 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "dce/dce_8_0_d.h"
29#include "dce/dce_8_0_sh_mask.h"
30#include "gmc/gmc_7_1_sh_mask.h"
31#include "gmc/gmc_7_1_d.h"
32
33#include "include/logger_interface.h"
34#include "dce80_compressor.h"
35
36#define DCP_REG(reg)\
37 (reg + cp80->offsets.dcp_offset)
38#define DMIF_REG(reg)\
39 (reg + cp80->offsets.dmif_offset)
40
41static const struct dce80_compressor_reg_offsets reg_offsets[] = {
42{
43 .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
44 .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
45 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
46},
47{
48 .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
49 .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
50 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
51},
52{
53 .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
54 .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
55 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
56},
57{
58 .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
59 .dmif_offset = (mmDMIF_PG3_DPG_PIPE_DPM_CONTROL
60 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
61},
62{
63 .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
64 .dmif_offset = (mmDMIF_PG4_DPG_PIPE_DPM_CONTROL
65 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
66},
67{
68 .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
69 .dmif_offset = (mmDMIF_PG5_DPG_PIPE_DPM_CONTROL
70 - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
71}
72};
73
74static const uint32_t dce8_one_lpt_channel_max_resolution = 2048 * 1200;
75
76enum fbc_idle_force {
77 /* Bit 0 - Display registers updated */
78 FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
79
80 /* Bit 2 - FBC_GRPH_COMP_EN register updated */
81 FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
82 /* Bit 3 - FBC_SRC_SEL register updated */
83 FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
84 /* Bit 4 - FBC_MIN_COMPRESSION register updated */
85 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
86 /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
87 FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
88 /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
89 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
90 /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
91 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
92
93 /* Bit 24 - Memory write to region 0 defined by MC registers. */
94 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
95 /* Bit 25 - Memory write to region 1 defined by MC registers */
96 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
97 /* Bit 26 - Memory write to region 2 defined by MC registers */
98 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
99 /* Bit 27 - Memory write to region 3 defined by MC registers. */
100 FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
101
102 /* Bit 28 - Memory write from any client other than MCIF */
103 FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
104 /* Bit 29 - CG statics screen signal is inactive */
105 FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
106};
107
108static uint32_t lpt_size_alignment(struct dce80_compressor *cp80)
109{
110 /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
111 return cp80->base.raw_size * cp80->base.banks_num *
112 cp80->base.dram_channels_num;
113}
114
115static uint32_t lpt_memory_control_config(struct dce80_compressor *cp80,
116 uint32_t lpt_control)
117{
118 /*LPT MC Config */
119 if (cp80->base.options.bits.LPT_MC_CONFIG == 1) {
120 /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
121 * 00 - 1 CHANNEL
122 * 01 - 2 CHANNELS
123 * 02 - 4 OR 6 CHANNELS
124 * (Only for discrete GPU, N/A for CZ)
125 * 03 - 8 OR 12 CHANNELS
126 * (Only for discrete GPU, N/A for CZ) */
127 switch (cp80->base.dram_channels_num) {
128 case 2:
129 set_reg_field_value(
130 lpt_control,
131 1,
132 LOW_POWER_TILING_CONTROL,
133 LOW_POWER_TILING_NUM_PIPES);
134 break;
135 case 1:
136 set_reg_field_value(
137 lpt_control,
138 0,
139 LOW_POWER_TILING_CONTROL,
140 LOW_POWER_TILING_NUM_PIPES);
141 break;
142 default:
143 dm_logger_write(
144 cp80->base.ctx->logger, LOG_WARNING,
145 "%s: Invalid LPT NUM_PIPES!!!",
146 __func__);
147 break;
148 }
149
150 /* The mapping for LPT NUM_BANKS is in
151 * GRPH_CONTROL.GRPH_NUM_BANKS register field
152 * Specifies the number of memory banks for tiling
153 * purposes. Only applies to 2D and 3D tiling modes.
154 * POSSIBLE VALUES:
155 * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
156 * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
157 * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
158 * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
159 switch (cp80->base.banks_num) {
160 case 16:
161 set_reg_field_value(
162 lpt_control,
163 3,
164 LOW_POWER_TILING_CONTROL,
165 LOW_POWER_TILING_NUM_BANKS);
166 break;
167 case 8:
168 set_reg_field_value(
169 lpt_control,
170 2,
171 LOW_POWER_TILING_CONTROL,
172 LOW_POWER_TILING_NUM_BANKS);
173 break;
174 case 4:
175 set_reg_field_value(
176 lpt_control,
177 1,
178 LOW_POWER_TILING_CONTROL,
179 LOW_POWER_TILING_NUM_BANKS);
180 break;
181 case 2:
182 set_reg_field_value(
183 lpt_control,
184 0,
185 LOW_POWER_TILING_CONTROL,
186 LOW_POWER_TILING_NUM_BANKS);
187 break;
188 default:
189 dm_logger_write(
190 cp80->base.ctx->logger, LOG_WARNING,
191 "%s: Invalid LPT NUM_BANKS!!!",
192 __func__);
193 break;
194 }
195
196 /* The mapping is in DMIF_ADDR_CALC.
197 * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
198 * Carrizo specifies the memory interleave per pipe.
199 * It effectively specifies the location of pipe bits in
200 * the memory address.
201 * POSSIBLE VALUES:
202 * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
203 * interleave
204 * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
205 * interleave
206 */
207 switch (cp80->base.channel_interleave_size) {
208 case 256: /*256B */
209 set_reg_field_value(
210 lpt_control,
211 0,
212 LOW_POWER_TILING_CONTROL,
213 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
214 break;
215 case 512: /*512B */
216 set_reg_field_value(
217 lpt_control,
218 1,
219 LOW_POWER_TILING_CONTROL,
220 LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
221 break;
222 default:
223 dm_logger_write(
224 cp80->base.ctx->logger, LOG_WARNING,
225 "%s: Invalid LPT INTERLEAVE_SIZE!!!",
226 __func__);
227 break;
228 }
229
230 /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
231 * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
232 * for Carrizo. Specifies the size of dram row in bytes.
233 * This should match up with NOOFCOLS field in
234 * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
235 * This register DMIF_ADDR_CALC is not used by the
236 * hardware as it is only used for addrlib assertions.
237 * POSSIBLE VALUES:
238 * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
239 * boundary
240 * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
241 * boundary
242 * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
243 * boundary */
244 switch (cp80->base.raw_size) {
245 case 4096: /*4 KB */
246 set_reg_field_value(
247 lpt_control,
248 2,
249 LOW_POWER_TILING_CONTROL,
250 LOW_POWER_TILING_ROW_SIZE);
251 break;
252 case 2048:
253 set_reg_field_value(
254 lpt_control,
255 1,
256 LOW_POWER_TILING_CONTROL,
257 LOW_POWER_TILING_ROW_SIZE);
258 break;
259 case 1024:
260 set_reg_field_value(
261 lpt_control,
262 0,
263 LOW_POWER_TILING_CONTROL,
264 LOW_POWER_TILING_ROW_SIZE);
265 break;
266 default:
267 dm_logger_write(
268 cp80->base.ctx->logger, LOG_WARNING,
269 "%s: Invalid LPT ROW_SIZE!!!",
270 __func__);
271 break;
272 }
273 } else {
274 dm_logger_write(
275 cp80->base.ctx->logger, LOG_WARNING,
276 "%s: LPT MC Configuration is not provided",
277 __func__);
278 }
279
280 return lpt_control;
281}
282
283static bool is_source_bigger_than_epanel_size(
284 struct dce80_compressor *cp80,
285 uint32_t source_view_width,
286 uint32_t source_view_height)
287{
288 if (cp80->base.embedded_panel_h_size != 0 &&
289 cp80->base.embedded_panel_v_size != 0 &&
290 ((source_view_width * source_view_height) >
291 (cp80->base.embedded_panel_h_size *
292 cp80->base.embedded_panel_v_size)))
293 return true;
294
295 return false;
296}
297
298static uint32_t align_to_chunks_number_per_line(
299 struct dce80_compressor *cp80,
300 uint32_t pixels)
301{
302 return 256 * ((pixels + 255) / 256);
303}
304
305static void wait_for_fbc_state_changed(
306 struct dce80_compressor *cp80,
307 bool enabled)
308{
309 uint8_t counter = 0;
310 uint32_t addr = mmFBC_STATUS;
311 uint32_t value;
312
313 while (counter < 10) {
314 value = dm_read_reg(cp80->base.ctx, addr);
315 if (get_reg_field_value(
316 value,
317 FBC_STATUS,
318 FBC_ENABLE_STATUS) == enabled)
319 break;
320 udelay(10);
321 counter++;
322 }
323
324 if (counter == 10) {
325 dm_logger_write(
326 cp80->base.ctx->logger, LOG_WARNING,
327 "%s: wait counter exceeded, changes to HW not applied",
328 __func__);
329 }
330}
331
332void dce80_compressor_power_up_fbc(struct compressor *compressor)
333{
334 uint32_t value;
335 uint32_t addr;
336
337 addr = mmFBC_CNTL;
338 value = dm_read_reg(compressor->ctx, addr);
339 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
340 set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
341 set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
342 dm_write_reg(compressor->ctx, addr, value);
343
344 addr = mmFBC_COMP_MODE;
345 value = dm_read_reg(compressor->ctx, addr);
346 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
347 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
348 set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
349 dm_write_reg(compressor->ctx, addr, value);
350
351 addr = mmFBC_COMP_CNTL;
352 value = dm_read_reg(compressor->ctx, addr);
353 set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
354 dm_write_reg(compressor->ctx, addr, value);
355 /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
356 /* 1 ==> 4:1 */
357 /* 2 ==> 8:1 */
358 /* 0xF ==> 1:1 */
359 set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
360 dm_write_reg(compressor->ctx, addr, value);
361 compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
362
363 value = 0;
364 dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
365
366 value = 0xFFFFFF;
367 dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
368}
369
370void dce80_compressor_enable_fbc(
371 struct compressor *compressor,
372 uint32_t paths_num,
373 struct compr_addr_and_pitch_params *params)
374{
375 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
376
377 if (compressor->options.bits.FBC_SUPPORT &&
378 (compressor->options.bits.DUMMY_BACKEND == 0) &&
379 (!dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
380 (!is_source_bigger_than_epanel_size(
381 cp80,
382 params->source_view_width,
383 params->source_view_height))) {
384
385 uint32_t addr;
386 uint32_t value;
387
388 /* Before enabling FBC first need to enable LPT if applicable
389 * LPT state should always be changed (enable/disable) while FBC
390 * is disabled */
391 if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
392 (params->source_view_width *
393 params->source_view_height <=
394 dce8_one_lpt_channel_max_resolution)) {
395 dce80_compressor_enable_lpt(compressor);
396 }
397
398 addr = mmFBC_CNTL;
399 value = dm_read_reg(compressor->ctx, addr);
400 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
401 set_reg_field_value(
402 value,
403 params->inst,
404 FBC_CNTL, FBC_SRC_SEL);
405 dm_write_reg(compressor->ctx, addr, value);
406
407 /* Keep track of enum controller_id FBC is attached to */
408 compressor->is_enabled = true;
409 compressor->attached_inst = params->inst;
410 cp80->offsets = reg_offsets[params->inst];
411
412 /*Toggle it as there is bug in HW */
413 set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
414 dm_write_reg(compressor->ctx, addr, value);
415 set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
416 dm_write_reg(compressor->ctx, addr, value);
417
418 wait_for_fbc_state_changed(cp80, true);
419 }
420}
421
422void dce80_compressor_disable_fbc(struct compressor *compressor)
423{
424 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
425
426 if (compressor->options.bits.FBC_SUPPORT &&
427 dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
428 uint32_t reg_data;
429 /* Turn off compression */
430 reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
431 set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
432 dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
433
434 /* Reset enum controller_id to undefined */
435 compressor->attached_inst = 0;
436 compressor->is_enabled = false;
437
438 /* Whenever disabling FBC make sure LPT is disabled if LPT
439 * supported */
440 if (compressor->options.bits.LPT_SUPPORT)
441 dce80_compressor_disable_lpt(compressor);
442
443 wait_for_fbc_state_changed(cp80, false);
444 }
445}
446
447bool dce80_compressor_is_fbc_enabled_in_hw(
448 struct compressor *compressor,
449 uint32_t *inst)
450{
451 /* Check the hardware register */
452 uint32_t value;
453
454 value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
455 if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
456 if (inst != NULL)
457 *inst = compressor->attached_inst;
458 return true;
459 }
460
461 value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
462 if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
463 if (inst != NULL)
464 *inst = compressor->attached_inst;
465 return true;
466 }
467
468 return false;
469}
470
471bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
472{
473 /* Check the hardware register */
474 uint32_t value = dm_read_reg(compressor->ctx,
475 mmLOW_POWER_TILING_CONTROL);
476
477 return get_reg_field_value(
478 value,
479 LOW_POWER_TILING_CONTROL,
480 LOW_POWER_TILING_ENABLE);
481}
482
483void dce80_compressor_program_compressed_surface_address_and_pitch(
484 struct compressor *compressor,
485 struct compr_addr_and_pitch_params *params)
486{
487 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
488 uint32_t value = 0;
489 uint32_t fbc_pitch = 0;
490 uint32_t compressed_surf_address_low_part =
491 compressor->compr_surface_address.addr.low_part;
492
493 /* Clear content first. */
494 dm_write_reg(
495 compressor->ctx,
496 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
497 0);
498 dm_write_reg(compressor->ctx,
499 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
500
501 if (compressor->options.bits.LPT_SUPPORT) {
502 uint32_t lpt_alignment = lpt_size_alignment(cp80);
503
504 if (lpt_alignment != 0) {
505 compressed_surf_address_low_part =
506 ((compressed_surf_address_low_part
507 + (lpt_alignment - 1)) / lpt_alignment)
508 * lpt_alignment;
509 }
510 }
511
512 /* Write address, HIGH has to be first. */
513 dm_write_reg(compressor->ctx,
514 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
515 compressor->compr_surface_address.addr.high_part);
516 dm_write_reg(compressor->ctx,
517 DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
518 compressed_surf_address_low_part);
519
520 fbc_pitch = align_to_chunks_number_per_line(
521 cp80,
522 params->source_view_width);
523
524 if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
525 fbc_pitch = fbc_pitch / 8;
526 else
527 dm_logger_write(
528 compressor->ctx->logger, LOG_WARNING,
529 "%s: Unexpected DCE8 compression ratio",
530 __func__);
531
532 /* Clear content first. */
533 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
534
535 /* Write FBC Pitch. */
536 set_reg_field_value(
537 value,
538 fbc_pitch,
539 GRPH_COMPRESS_PITCH,
540 GRPH_COMPRESS_PITCH);
541 dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
542
543}
544
545void dce80_compressor_disable_lpt(struct compressor *compressor)
546{
547 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
548 uint32_t value;
549 uint32_t addr;
550 uint32_t inx;
551
552 /* Disable all pipes LPT Stutter */
553 for (inx = 0; inx < 3; inx++) {
554 value =
555 dm_read_reg(
556 compressor->ctx,
557 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
558 set_reg_field_value(
559 value,
560 0,
561 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
562 STUTTER_ENABLE_NONLPTCH);
563 dm_write_reg(
564 compressor->ctx,
565 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
566 value);
567 }
568
569 /* Disable LPT */
570 addr = mmLOW_POWER_TILING_CONTROL;
571 value = dm_read_reg(compressor->ctx, addr);
572 set_reg_field_value(
573 value,
574 0,
575 LOW_POWER_TILING_CONTROL,
576 LOW_POWER_TILING_ENABLE);
577 dm_write_reg(compressor->ctx, addr, value);
578
579 /* Clear selection of Channel(s) containing Compressed Surface */
580 addr = mmGMCON_LPT_TARGET;
581 value = dm_read_reg(compressor->ctx, addr);
582 set_reg_field_value(
583 value,
584 0xFFFFFFFF,
585 GMCON_LPT_TARGET,
586 STCTRL_LPT_TARGET);
587 dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
588}
589
590void dce80_compressor_enable_lpt(struct compressor *compressor)
591{
592 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
593 uint32_t value;
594 uint32_t addr;
595 uint32_t value_control;
596 uint32_t channels;
597
598 /* Enable LPT Stutter from Display pipe */
599 value = dm_read_reg(compressor->ctx,
600 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
601 set_reg_field_value(
602 value,
603 1,
604 DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
605 STUTTER_ENABLE_NONLPTCH);
606 dm_write_reg(compressor->ctx,
607 DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
608
609 /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
610 * will disable LPT.
611 * STCTRL_LPT_TARGETn corresponds to channel n. */
612 addr = mmLOW_POWER_TILING_CONTROL;
613 value_control = dm_read_reg(compressor->ctx, addr);
614 channels = get_reg_field_value(value_control,
615 LOW_POWER_TILING_CONTROL,
616 LOW_POWER_TILING_MODE);
617
618 addr = mmGMCON_LPT_TARGET;
619 value = dm_read_reg(compressor->ctx, addr);
620 set_reg_field_value(
621 value,
622 channels + 1, /* not mentioned in programming guide,
623 but follow DCE8.1 */
624 GMCON_LPT_TARGET,
625 STCTRL_LPT_TARGET);
626 dm_write_reg(compressor->ctx, addr, value);
627
628 /* Enable LPT */
629 addr = mmLOW_POWER_TILING_CONTROL;
630 value = dm_read_reg(compressor->ctx, addr);
631 set_reg_field_value(
632 value,
633 1,
634 LOW_POWER_TILING_CONTROL,
635 LOW_POWER_TILING_ENABLE);
636 dm_write_reg(compressor->ctx, addr, value);
637}
638
639void dce80_compressor_program_lpt_control(
640 struct compressor *compressor,
641 struct compr_addr_and_pitch_params *params)
642{
643 struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
644 uint32_t rows_per_channel;
645 uint32_t lpt_alignment;
646 uint32_t source_view_width;
647 uint32_t source_view_height;
648 uint32_t lpt_control = 0;
649
650 if (!compressor->options.bits.LPT_SUPPORT)
651 return;
652
653 lpt_control = dm_read_reg(compressor->ctx,
654 mmLOW_POWER_TILING_CONTROL);
655
656 /* POSSIBLE VALUES for Low Power Tiling Mode:
657 * 00 - Use channel 0
658 * 01 - Use Channel 0 and 1
659 * 02 - Use Channel 0,1,2,3
660 * 03 - reserved */
661 switch (compressor->lpt_channels_num) {
662 /* case 2:
663 * Use Channel 0 & 1 / Not used for DCE 11 */
664 case 1:
665 /*Use Channel 0 for LPT for DCE 11 */
666 set_reg_field_value(
667 lpt_control,
668 0,
669 LOW_POWER_TILING_CONTROL,
670 LOW_POWER_TILING_MODE);
671 break;
672 default:
673 dm_logger_write(
674 compressor->ctx->logger, LOG_WARNING,
675 "%s: Invalid selected DRAM channels for LPT!!!",
676 __func__);
677 break;
678 }
679
680 lpt_control = lpt_memory_control_config(cp80, lpt_control);
681
682 /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
683 * FBC compressed surface pitch.
684 * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
685 * Surface Pitch) / (Row Size * Number of Channels *
686 * Number of Banks)). */
687 rows_per_channel = 0;
688 lpt_alignment = lpt_size_alignment(cp80);
689 source_view_width =
690 align_to_chunks_number_per_line(
691 cp80,
692 params->source_view_width);
693 source_view_height = (params->source_view_height + 1) & (~0x1);
694
695 if (lpt_alignment != 0) {
696 rows_per_channel = source_view_width * source_view_height * 4;
697 rows_per_channel =
698 (rows_per_channel % lpt_alignment) ?
699 (rows_per_channel / lpt_alignment + 1) :
700 rows_per_channel / lpt_alignment;
701 }
702
703 set_reg_field_value(
704 lpt_control,
705 rows_per_channel,
706 LOW_POWER_TILING_CONTROL,
707 LOW_POWER_TILING_ROWS_PER_CHAN);
708
709 dm_write_reg(compressor->ctx,
710 mmLOW_POWER_TILING_CONTROL, lpt_control);
711}
712
713/*
714 * DCE 11 Frame Buffer Compression Implementation
715 */
716
717void dce80_compressor_set_fbc_invalidation_triggers(
718 struct compressor *compressor,
719 uint32_t fbc_trigger)
720{
721 /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
722 * for DCE 11 regions cannot be used - does not work with S/G
723 */
724 uint32_t addr = mmFBC_CLIENT_REGION_MASK;
725 uint32_t value = dm_read_reg(compressor->ctx, addr);
726
727 set_reg_field_value(
728 value,
729 0,
730 FBC_CLIENT_REGION_MASK,
731 FBC_MEMORY_REGION_MASK);
732 dm_write_reg(compressor->ctx, addr, value);
733
734 /* Setup events when to clear all CSM entries (effectively marking
735 * current compressed data invalid)
736 * For DCE 11 CSM metadata 11111 means - "Not Compressed"
737 * Used as the initial value of the metadata sent to the compressor
738 * after invalidation, to indicate that the compressor should attempt
739 * to compress all chunks on the current pass. Also used when the chunk
740 * is not successfully written to memory.
741 * When this CSM value is detected, FBC reads from the uncompressed
742 * buffer. Set events according to passed in value, these events are
743 * valid for DCE8:
744 * - bit 0 - display register updated
745 * - bit 28 - memory write from any client except from MCIF
746 * - bit 29 - CG static screen signal is inactive
747 * In addition, DCE8.1 also needs to set new DCE8.1 specific events
748 * that are used to trigger invalidation on certain register changes,
749 * for example enabling of Alpha Compression may trigger invalidation of
750 * FBC once bit is set. These events are as follows:
751 * - Bit 2 - FBC_GRPH_COMP_EN register updated
752 * - Bit 3 - FBC_SRC_SEL register updated
753 * - Bit 4 - FBC_MIN_COMPRESSION register updated
754 * - Bit 5 - FBC_ALPHA_COMP_EN register updated
755 * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
756 * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
757 */
758 addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
759 value = dm_read_reg(compressor->ctx, addr);
760 set_reg_field_value(
761 value,
762 fbc_trigger |
763 FBC_IDLE_FORCE_GRPH_COMP_EN |
764 FBC_IDLE_FORCE_SRC_SEL_CHANGE |
765 FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
766 FBC_IDLE_FORCE_ALPHA_COMP_EN |
767 FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
768 FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
769 FBC_IDLE_FORCE_CLEAR_MASK,
770 FBC_IDLE_FORCE_CLEAR_MASK);
771 dm_write_reg(compressor->ctx, addr, value);
772}
773
774void dce80_compressor_construct(struct dce80_compressor *compressor,
775 struct dc_context *ctx)
776{
777 struct dc_bios *bp = ctx->dc_bios;
778 struct embedded_panel_info panel_info;
779
780 compressor->base.options.raw = 0;
781 compressor->base.options.bits.FBC_SUPPORT = true;
782 compressor->base.options.bits.LPT_SUPPORT = true;
783 /* For DCE 11 always use one DRAM channel for LPT */
784 compressor->base.lpt_channels_num = 1;
785 compressor->base.options.bits.DUMMY_BACKEND = false;
786
787 /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
788 * should not be supported */
789 if (compressor->base.memory_bus_width == 64)
790 compressor->base.options.bits.LPT_SUPPORT = false;
791
792 compressor->base.options.bits.CLK_GATING_DISABLED = false;
793
794 compressor->base.ctx = ctx;
795 compressor->base.embedded_panel_h_size = 0;
796 compressor->base.embedded_panel_v_size = 0;
797 compressor->base.memory_bus_width = ctx->asic_id.vram_width;
798 compressor->base.allocated_size = 0;
799 compressor->base.preferred_requested_size = 0;
800 compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
801 compressor->base.banks_num = 0;
802 compressor->base.raw_size = 0;
803 compressor->base.channel_interleave_size = 0;
804 compressor->base.dram_channels_num = 0;
805 compressor->base.lpt_channels_num = 0;
806 compressor->base.attached_inst = 0;
807 compressor->base.is_enabled = false;
808
809 if (BP_RESULT_OK ==
810 bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
811 compressor->base.embedded_panel_h_size =
812 panel_info.lcd_timing.horizontal_addressable;
813 compressor->base.embedded_panel_v_size =
814 panel_info.lcd_timing.vertical_addressable;
815 }
816}
817
818struct compressor *dce80_compressor_create(struct dc_context *ctx)
819{
820 struct dce80_compressor *cp80 =
821 kzalloc(sizeof(struct dce80_compressor), GFP_KERNEL);
822
823 if (!cp80)
824 return NULL;
825
826 dce80_compressor_construct(cp80, ctx);
827 return &cp80->base;
828}
829
830void dce80_compressor_destroy(struct compressor **compressor)
831{
832 kfree(TO_DCE80_COMPRESSOR(*compressor));
833 *compressor = NULL;
834}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
deleted file mode 100644
index cca58b044402..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/* Copyright 2012-15 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25#ifndef __DC_COMPRESSOR_DCE80_H__
26#define __DC_COMPRESSOR_DCE80_H__
27
28#include "../inc/compressor.h"
29
30#define TO_DCE80_COMPRESSOR(compressor)\
31 container_of(compressor, struct dce80_compressor, base)
32
33struct dce80_compressor_reg_offsets {
34 uint32_t dcp_offset;
35 uint32_t dmif_offset;
36};
37
38struct dce80_compressor {
39 struct compressor base;
40 struct dce80_compressor_reg_offsets offsets;
41};
42
43struct compressor *dce80_compressor_create(struct dc_context *ctx);
44
45void dce80_compressor_construct(struct dce80_compressor *cp80,
46 struct dc_context *ctx);
47
48void dce80_compressor_destroy(struct compressor **cp);
49
50/* FBC RELATED */
51void dce80_compressor_power_up_fbc(struct compressor *cp);
52
53void dce80_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
54 struct compr_addr_and_pitch_params *params);
55
56void dce80_compressor_disable_fbc(struct compressor *cp);
57
58void dce80_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
59 uint32_t fbc_trigger);
60
61void dce80_compressor_program_compressed_surface_address_and_pitch(
62 struct compressor *cp,
63 struct compr_addr_and_pitch_params *params);
64
65bool dce80_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
66 uint32_t *fbc_mapped_crtc_id);
67
68/* LPT RELATED */
69void dce80_compressor_enable_lpt(struct compressor *cp);
70
71void dce80_compressor_disable_lpt(struct compressor *cp);
72
73void dce80_compressor_program_lpt_control(struct compressor *cp,
74 struct compr_addr_and_pitch_params *params);
75
76bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
77
78#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index ccfcf1c0eeb3..6c6a1a16af19 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -70,47 +70,11 @@ static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
70 70
71/***************************PIPE_CONTROL***********************************/ 71/***************************PIPE_CONTROL***********************************/
72 72
73static bool dce80_enable_display_power_gating(
74 struct dc *dc,
75 uint8_t controller_id,
76 struct dc_bios *dcb,
77 enum pipe_gating_control power_gating)
78{
79 enum bp_result bp_result = BP_RESULT_OK;
80 enum bp_pipe_control_action cntl;
81 struct dc_context *ctx = dc->ctx;
82
83 if (power_gating == PIPE_GATING_CONTROL_INIT)
84 cntl = ASIC_PIPE_INIT;
85 else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
86 cntl = ASIC_PIPE_ENABLE;
87 else
88 cntl = ASIC_PIPE_DISABLE;
89
90 if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
91
92 bp_result = dcb->funcs->enable_disp_power_gating(
93 dcb, controller_id + 1, cntl);
94
95 /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
96 * by default when command table is called
97 */
98 dm_write_reg(ctx,
99 HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
100 0);
101 }
102
103 if (bp_result == BP_RESULT_OK)
104 return true;
105 else
106 return false;
107}
108
109void dce80_hw_sequencer_construct(struct dc *dc) 73void dce80_hw_sequencer_construct(struct dc *dc)
110{ 74{
111 dce110_hw_sequencer_construct(dc); 75 dce110_hw_sequencer_construct(dc);
112 76
113 dc->hwss.enable_display_power_gating = dce80_enable_display_power_gating; 77 dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
114 dc->hwss.pipe_control_lock = dce_pipe_control_lock; 78 dc->hwss.pipe_control_lock = dce_pipe_control_lock;
115 dc->hwss.set_bandwidth = dce100_set_bandwidth; 79 dc->hwss.set_bandwidth = dce100_set_bandwidth;
116} 80}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 8f2bd56f3461..5d854a37a978 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -53,6 +53,8 @@
53 53
54#include "reg_helper.h" 54#include "reg_helper.h"
55 55
56#include "dce/dce_dmcu.h"
57#include "dce/dce_abm.h"
56/* TODO remove this include */ 58/* TODO remove this include */
57 59
58#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT 60#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -364,6 +366,29 @@ static const struct resource_caps res_cap_83 = {
364 .num_pll = 2, 366 .num_pll = 2,
365}; 367};
366 368
369static const struct dce_dmcu_registers dmcu_regs = {
370 DMCU_DCE80_REG_LIST()
371};
372
373static const struct dce_dmcu_shift dmcu_shift = {
374 DMCU_MASK_SH_LIST_DCE80(__SHIFT)
375};
376
377static const struct dce_dmcu_mask dmcu_mask = {
378 DMCU_MASK_SH_LIST_DCE80(_MASK)
379};
380static const struct dce_abm_registers abm_regs = {
381 ABM_DCE110_COMMON_REG_LIST()
382};
383
384static const struct dce_abm_shift abm_shift = {
385 ABM_MASK_SH_LIST_DCE110(__SHIFT)
386};
387
388static const struct dce_abm_mask abm_mask = {
389 ABM_MASK_SH_LIST_DCE110(_MASK)
390};
391
367#define CTX ctx 392#define CTX ctx
368#define REG(reg) mm ## reg 393#define REG(reg) mm ## reg
369 394
@@ -643,6 +668,12 @@ static void destruct(struct dce110_resource_pool *pool)
643 } 668 }
644 } 669 }
645 670
671 if (pool->base.abm != NULL)
672 dce_abm_destroy(&pool->base.abm);
673
674 if (pool->base.dmcu != NULL)
675 dce_dmcu_destroy(&pool->base.dmcu);
676
646 if (pool->base.dp_clock_source != NULL) 677 if (pool->base.dp_clock_source != NULL)
647 dce80_clock_source_destroy(&pool->base.dp_clock_source); 678 dce80_clock_source_destroy(&pool->base.dp_clock_source);
648 679
@@ -790,9 +821,11 @@ static bool dce80_construct(
790 *************************************************/ 821 *************************************************/
791 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 822 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
792 pool->base.pipe_count = res_cap.num_timing_generator; 823 pool->base.pipe_count = res_cap.num_timing_generator;
824 pool->base.timing_generator_count = res_cap.num_timing_generator;
793 dc->caps.max_downscale_ratio = 200; 825 dc->caps.max_downscale_ratio = 200;
794 dc->caps.i2c_speed_in_khz = 40; 826 dc->caps.i2c_speed_in_khz = 40;
795 dc->caps.max_cursor_size = 128; 827 dc->caps.max_cursor_size = 128;
828 dc->caps.dual_link_dvi = true;
796 829
797 /************************************************* 830 /*************************************************
798 * Create resources * 831 * Create resources *
@@ -848,7 +881,25 @@ static bool dce80_construct(
848 goto res_create_fail; 881 goto res_create_fail;
849 } 882 }
850 883
884 pool->base.dmcu = dce_dmcu_create(ctx,
885 &dmcu_regs,
886 &dmcu_shift,
887 &dmcu_mask);
888 if (pool->base.dmcu == NULL) {
889 dm_error("DC: failed to create dmcu!\n");
890 BREAK_TO_DEBUGGER();
891 goto res_create_fail;
892 }
851 893
894 pool->base.abm = dce_abm_create(ctx,
895 &abm_regs,
896 &abm_shift,
897 &abm_mask);
898 if (pool->base.abm == NULL) {
899 dm_error("DC: failed to create abm!\n");
900 BREAK_TO_DEBUGGER();
901 goto res_create_fail;
902 }
852 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 903 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
853 pool->base.display_clock->max_clks_state = 904 pool->base.display_clock->max_clks_state =
854 static_clk_info.max_clocks_state; 905 static_clk_info.max_clocks_state;
@@ -954,6 +1005,7 @@ static bool dce81_construct(
954 *************************************************/ 1005 *************************************************/
955 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1006 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
956 pool->base.pipe_count = res_cap_81.num_timing_generator; 1007 pool->base.pipe_count = res_cap_81.num_timing_generator;
1008 pool->base.timing_generator_count = res_cap_81.num_timing_generator;
957 dc->caps.max_downscale_ratio = 200; 1009 dc->caps.max_downscale_ratio = 200;
958 dc->caps.i2c_speed_in_khz = 40; 1010 dc->caps.i2c_speed_in_khz = 40;
959 dc->caps.max_cursor_size = 128; 1011 dc->caps.max_cursor_size = 128;
@@ -1013,6 +1065,25 @@ static bool dce81_construct(
1013 goto res_create_fail; 1065 goto res_create_fail;
1014 } 1066 }
1015 1067
1068 pool->base.dmcu = dce_dmcu_create(ctx,
1069 &dmcu_regs,
1070 &dmcu_shift,
1071 &dmcu_mask);
1072 if (pool->base.dmcu == NULL) {
1073 dm_error("DC: failed to create dmcu!\n");
1074 BREAK_TO_DEBUGGER();
1075 goto res_create_fail;
1076 }
1077
1078 pool->base.abm = dce_abm_create(ctx,
1079 &abm_regs,
1080 &abm_shift,
1081 &abm_mask);
1082 if (pool->base.abm == NULL) {
1083 dm_error("DC: failed to create abm!\n");
1084 BREAK_TO_DEBUGGER();
1085 goto res_create_fail;
1086 }
1016 1087
1017 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1088 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1018 pool->base.display_clock->max_clks_state = 1089 pool->base.display_clock->max_clks_state =
@@ -1119,6 +1190,7 @@ static bool dce83_construct(
1119 *************************************************/ 1190 *************************************************/
1120 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1191 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
1121 pool->base.pipe_count = res_cap_83.num_timing_generator; 1192 pool->base.pipe_count = res_cap_83.num_timing_generator;
1193 pool->base.timing_generator_count = res_cap_83.num_timing_generator;
1122 dc->caps.max_downscale_ratio = 200; 1194 dc->caps.max_downscale_ratio = 200;
1123 dc->caps.i2c_speed_in_khz = 40; 1195 dc->caps.i2c_speed_in_khz = 40;
1124 dc->caps.max_cursor_size = 128; 1196 dc->caps.max_cursor_size = 128;
@@ -1174,6 +1246,25 @@ static bool dce83_construct(
1174 goto res_create_fail; 1246 goto res_create_fail;
1175 } 1247 }
1176 1248
1249 pool->base.dmcu = dce_dmcu_create(ctx,
1250 &dmcu_regs,
1251 &dmcu_shift,
1252 &dmcu_mask);
1253 if (pool->base.dmcu == NULL) {
1254 dm_error("DC: failed to create dmcu!\n");
1255 BREAK_TO_DEBUGGER();
1256 goto res_create_fail;
1257 }
1258
1259 pool->base.abm = dce_abm_create(ctx,
1260 &abm_regs,
1261 &abm_shift,
1262 &abm_mask);
1263 if (pool->base.abm == NULL) {
1264 dm_error("DC: failed to create abm!\n");
1265 BREAK_TO_DEBUGGER();
1266 goto res_create_fail;
1267 }
1177 1268
1178 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1269 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1179 pool->base.display_clock->max_clks_state = 1270 pool->base.display_clock->max_clks_state =
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 265894851493..3ba4712a35ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -84,7 +84,7 @@ static const struct dce110_timing_generator_offsets reg_offsets[] = {
84#define DCP_REG(reg) (reg + tg110->offsets.dcp) 84#define DCP_REG(reg) (reg + tg110->offsets.dcp)
85#define DMIF_REG(reg) (reg + tg110->offsets.dmif) 85#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
86 86
87void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz) 87static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz)
88{ 88{
89 uint64_t pix_dur; 89 uint64_t pix_dur;
90 uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 90 uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
@@ -115,6 +115,68 @@ static void program_timing(struct timing_generator *tg,
115 dce110_tg_program_timing(tg, timing, use_vbios); 115 dce110_tg_program_timing(tg, timing, use_vbios);
116} 116}
117 117
118static void dce80_timing_generator_enable_advanced_request(
119 struct timing_generator *tg,
120 bool enable,
121 const struct dc_crtc_timing *timing)
122{
123 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
124 uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
125 uint32_t value = dm_read_reg(tg->ctx, addr);
126
127 if (enable) {
128 set_reg_field_value(
129 value,
130 0,
131 CRTC_START_LINE_CONTROL,
132 CRTC_LEGACY_REQUESTOR_EN);
133 } else {
134 set_reg_field_value(
135 value,
136 1,
137 CRTC_START_LINE_CONTROL,
138 CRTC_LEGACY_REQUESTOR_EN);
139 }
140
141 if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
142 set_reg_field_value(
143 value,
144 3,
145 CRTC_START_LINE_CONTROL,
146 CRTC_ADVANCED_START_LINE_POSITION);
147 set_reg_field_value(
148 value,
149 0,
150 CRTC_START_LINE_CONTROL,
151 CRTC_PREFETCH_EN);
152 } else {
153 set_reg_field_value(
154 value,
155 4,
156 CRTC_START_LINE_CONTROL,
157 CRTC_ADVANCED_START_LINE_POSITION);
158 set_reg_field_value(
159 value,
160 1,
161 CRTC_START_LINE_CONTROL,
162 CRTC_PREFETCH_EN);
163 }
164
165 set_reg_field_value(
166 value,
167 1,
168 CRTC_START_LINE_CONTROL,
169 CRTC_PROGRESSIVE_START_LINE_EARLY);
170
171 set_reg_field_value(
172 value,
173 1,
174 CRTC_START_LINE_CONTROL,
175 CRTC_INTERLACE_START_LINE_EARLY);
176
177 dm_write_reg(tg->ctx, addr, value);
178}
179
118static const struct timing_generator_funcs dce80_tg_funcs = { 180static const struct timing_generator_funcs dce80_tg_funcs = {
119 .validate_timing = dce110_tg_validate_timing, 181 .validate_timing = dce110_tg_validate_timing,
120 .program_timing = program_timing, 182 .program_timing = program_timing,
@@ -150,6 +212,8 @@ static const struct timing_generator_funcs dce80_tg_funcs = {
150 /* DCE8.0 overrides */ 212 /* DCE8.0 overrides */
151 .enable_advanced_request = 213 .enable_advanced_request =
152 dce80_timing_generator_enable_advanced_request, 214 dce80_timing_generator_enable_advanced_request,
215 .configure_crc = dce110_configure_crc,
216 .get_crc = dce110_get_crc,
153}; 217};
154 218
155void dce80_timing_generator_construct( 219void dce80_timing_generator_construct(
@@ -176,64 +240,3 @@ void dce80_timing_generator_construct(
176 tg110->min_h_back_porch = 4; 240 tg110->min_h_back_porch = 4;
177} 241}
178 242
179void dce80_timing_generator_enable_advanced_request(
180 struct timing_generator *tg,
181 bool enable,
182 const struct dc_crtc_timing *timing)
183{
184 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
185 uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
186 uint32_t value = dm_read_reg(tg->ctx, addr);
187
188 if (enable) {
189 set_reg_field_value(
190 value,
191 0,
192 CRTC_START_LINE_CONTROL,
193 CRTC_LEGACY_REQUESTOR_EN);
194 } else {
195 set_reg_field_value(
196 value,
197 1,
198 CRTC_START_LINE_CONTROL,
199 CRTC_LEGACY_REQUESTOR_EN);
200 }
201
202 if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
203 set_reg_field_value(
204 value,
205 3,
206 CRTC_START_LINE_CONTROL,
207 CRTC_ADVANCED_START_LINE_POSITION);
208 set_reg_field_value(
209 value,
210 0,
211 CRTC_START_LINE_CONTROL,
212 CRTC_PREFETCH_EN);
213 } else {
214 set_reg_field_value(
215 value,
216 4,
217 CRTC_START_LINE_CONTROL,
218 CRTC_ADVANCED_START_LINE_POSITION);
219 set_reg_field_value(
220 value,
221 1,
222 CRTC_START_LINE_CONTROL,
223 CRTC_PREFETCH_EN);
224 }
225
226 set_reg_field_value(
227 value,
228 1,
229 CRTC_START_LINE_CONTROL,
230 CRTC_PROGRESSIVE_START_LINE_EARLY);
231
232 set_reg_field_value(
233 value,
234 1,
235 CRTC_START_LINE_CONTROL,
236 CRTC_INTERLACE_START_LINE_EARLY);
237
238 dm_write_reg(tg->ctx, addr, value);
239}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
index 9cebb24c94c8..8ff1b06bcd8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
@@ -36,10 +36,4 @@ void dce80_timing_generator_construct(
36 uint32_t instance, 36 uint32_t instance,
37 const struct dce110_timing_generator_offsets *offsets); 37 const struct dce110_timing_generator_offsets *offsets);
38 38
39/******** HW programming ************/
40void dce80_timing_generator_enable_advanced_request(
41 struct timing_generator *tg,
42 bool enable,
43 const struct dc_crtc_timing *timing);
44
45#endif /* __DC_TIMING_GENERATOR_DCE80_H__ */ 39#endif /* __DC_TIMING_GENERATOR_DCE80_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 53ba3600ee6a..881a1bff94d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -232,10 +232,11 @@ bool cm_helper_convert_to_custom_float(
232 return true; 232 return true;
233} 233}
234 234
235 235/* driver uses 32 regions or less, but DCN HW has 34, extra 2 are set to 0 */
236#define MAX_REGIONS_NUMBER 34 236#define MAX_REGIONS_NUMBER 34
237#define MAX_LOW_POINT 25 237#define MAX_LOW_POINT 25
238#define NUMBER_SEGMENTS 32 238#define NUMBER_REGIONS 32
239#define NUMBER_SW_SEGMENTS 16
239 240
240bool cm_helper_translate_curve_to_hw_format( 241bool cm_helper_translate_curve_to_hw_format(
241 const struct dc_transfer_func *output_tf, 242 const struct dc_transfer_func *output_tf,
@@ -251,7 +252,7 @@ bool cm_helper_translate_curve_to_hw_format(
251 struct fixed31_32 y1_min; 252 struct fixed31_32 y1_min;
252 struct fixed31_32 y3_max; 253 struct fixed31_32 y3_max;
253 254
254 int32_t segment_start, segment_end; 255 int32_t region_start, region_end;
255 int32_t i; 256 int32_t i;
256 uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points; 257 uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
257 258
@@ -271,11 +272,11 @@ bool cm_helper_translate_curve_to_hw_format(
271 /* 32 segments 272 /* 32 segments
272 * segments are from 2^-25 to 2^7 273 * segments are from 2^-25 to 2^7
273 */ 274 */
274 for (i = 0; i < 32 ; i++) 275 for (i = 0; i < NUMBER_REGIONS ; i++)
275 seg_distr[i] = 3; 276 seg_distr[i] = 3;
276 277
277 segment_start = -25; 278 region_start = -MAX_LOW_POINT;
278 segment_end = 7; 279 region_end = NUMBER_REGIONS - MAX_LOW_POINT;
279 } else { 280 } else {
280 /* 10 segments 281 /* 10 segments
281 * segment is from 2^-10 to 2^0 282 * segment is from 2^-10 to 2^0
@@ -289,14 +290,14 @@ bool cm_helper_translate_curve_to_hw_format(
289 seg_distr[5] = 4; 290 seg_distr[5] = 4;
290 seg_distr[6] = 4; 291 seg_distr[6] = 4;
291 seg_distr[7] = 4; 292 seg_distr[7] = 4;
292 seg_distr[8] = 5; 293 seg_distr[8] = 4;
293 seg_distr[9] = 5; 294 seg_distr[9] = 4;
294 295
295 segment_start = -10; 296 region_start = -10;
296 segment_end = 0; 297 region_end = 0;
297 } 298 }
298 299
299 for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++) 300 for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
300 seg_distr[i] = -1; 301 seg_distr[i] = -1;
301 302
302 for (k = 0; k < MAX_REGIONS_NUMBER; k++) { 303 for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
@@ -305,10 +306,12 @@ bool cm_helper_translate_curve_to_hw_format(
305 } 306 }
306 307
307 j = 0; 308 j = 0;
308 for (k = 0; k < (segment_end - segment_start); k++) { 309 for (k = 0; k < (region_end - region_start); k++) {
309 increment = NUMBER_SEGMENTS / (1 << seg_distr[k]); 310 increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
310 start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS; 311 start_index = (region_start + k + MAX_LOW_POINT) *
311 for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) { 312 NUMBER_SW_SEGMENTS;
313 for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
314 i += increment) {
312 if (j == hw_points - 1) 315 if (j == hw_points - 1)
313 break; 316 break;
314 rgb_resulted[j].red = output_tf->tf_pts.red[i]; 317 rgb_resulted[j].red = output_tf->tf_pts.red[i];
@@ -319,15 +322,15 @@ bool cm_helper_translate_curve_to_hw_format(
319 } 322 }
320 323
321 /* last point */ 324 /* last point */
322 start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS; 325 start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
323 rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index]; 326 rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
324 rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; 327 rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
325 rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; 328 rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
326 329
327 arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), 330 arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
328 dal_fixed31_32_from_int(segment_start)); 331 dal_fixed31_32_from_int(region_start));
329 arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), 332 arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
330 dal_fixed31_32_from_int(segment_end)); 333 dal_fixed31_32_from_int(region_end));
331 334
332 y_r = rgb_resulted[0].red; 335 y_r = rgb_resulted[0].red;
333 y_g = rgb_resulted[0].green; 336 y_g = rgb_resulted[0].green;
@@ -413,3 +416,156 @@ bool cm_helper_translate_curve_to_hw_format(
413 416
414 return true; 417 return true;
415} 418}
419
420#define NUM_DEGAMMA_REGIONS 12
421
422
423bool cm_helper_translate_curve_to_degamma_hw_format(
424 const struct dc_transfer_func *output_tf,
425 struct pwl_params *lut_params)
426{
427 struct curve_points *arr_points;
428 struct pwl_result_data *rgb_resulted;
429 struct pwl_result_data *rgb;
430 struct pwl_result_data *rgb_plus_1;
431 struct fixed31_32 y_r;
432 struct fixed31_32 y_g;
433 struct fixed31_32 y_b;
434 struct fixed31_32 y1_min;
435 struct fixed31_32 y3_max;
436
437 int32_t region_start, region_end;
438 int32_t i;
439 uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
440
441 if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
442 return false;
443
444 PERF_TRACE();
445
446 arr_points = lut_params->arr_points;
447 rgb_resulted = lut_params->rgb_resulted;
448 hw_points = 0;
449
450 memset(lut_params, 0, sizeof(struct pwl_params));
451 memset(seg_distr, 0, sizeof(seg_distr));
452
453 region_start = -NUM_DEGAMMA_REGIONS;
454 region_end = 0;
455
456
457 for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
458 seg_distr[i] = -1;
459 /* 12 segments
460 * segments are from 2^-12 to 0
461 */
462 for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
463 seg_distr[i] = 4;
464
465 for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
466 if (seg_distr[k] != -1)
467 hw_points += (1 << seg_distr[k]);
468 }
469
470 j = 0;
471 for (k = 0; k < (region_end - region_start); k++) {
472 increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
473 start_index = (region_start + k + MAX_LOW_POINT) *
474 NUMBER_SW_SEGMENTS;
475 for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
476 i += increment) {
477 if (j == hw_points - 1)
478 break;
479 rgb_resulted[j].red = output_tf->tf_pts.red[i];
480 rgb_resulted[j].green = output_tf->tf_pts.green[i];
481 rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
482 j++;
483 }
484 }
485
486 /* last point */
487 start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
488 rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
489 rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
490 rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
491
492 arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
493 dal_fixed31_32_from_int(region_start));
494 arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
495 dal_fixed31_32_from_int(region_end));
496
497 y_r = rgb_resulted[0].red;
498 y_g = rgb_resulted[0].green;
499 y_b = rgb_resulted[0].blue;
500
501 y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
502
503 arr_points[0].y = y1_min;
504 arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
505 y_r = rgb_resulted[hw_points - 1].red;
506 y_g = rgb_resulted[hw_points - 1].green;
507 y_b = rgb_resulted[hw_points - 1].blue;
508
509 /* see comment above, m_arrPoints[1].y should be the Y value for the
510 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
511 */
512 y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
513
514 arr_points[1].y = y3_max;
515
516 arr_points[1].slope = dal_fixed31_32_zero;
517
518 if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
519 /* for PQ, we want to have a straight line from last HW X point,
520 * and the slope to be such that we hit 1.0 at 10000 nits.
521 */
522 const struct fixed31_32 end_value =
523 dal_fixed31_32_from_int(125);
524
525 arr_points[1].slope = dal_fixed31_32_div(
526 dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
527 dal_fixed31_32_sub(end_value, arr_points[1].x));
528 }
529
530 lut_params->hw_points_num = hw_points;
531
532 i = 1;
533 for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
534 if (seg_distr[k] != -1) {
535 lut_params->arr_curve_points[k].segments_num =
536 seg_distr[k];
537 lut_params->arr_curve_points[i].offset =
538 lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
539 }
540 i++;
541 }
542
543 if (seg_distr[k] != -1)
544 lut_params->arr_curve_points[k].segments_num = seg_distr[k];
545
546 rgb = rgb_resulted;
547 rgb_plus_1 = rgb_resulted + 1;
548
549 i = 1;
550 while (i != hw_points + 1) {
551 if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
552 rgb_plus_1->red = rgb->red;
553 if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
554 rgb_plus_1->green = rgb->green;
555 if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
556 rgb_plus_1->blue = rgb->blue;
557
558 rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
559 rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
560 rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
561
562 ++rgb_plus_1;
563 ++rgb;
564 ++i;
565 }
566 cm_helper_convert_to_custom_float(rgb_resulted,
567 lut_params->arr_points,
568 hw_points, false);
569
570 return true;
571}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 64e476b83bcb..7a531b02871f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -106,4 +106,9 @@ bool cm_helper_translate_curve_to_hw_format(
106 const struct dc_transfer_func *output_tf, 106 const struct dc_transfer_func *output_tf,
107 struct pwl_params *lut_params, bool fixpoint); 107 struct pwl_params *lut_params, bool fixpoint);
108 108
109bool cm_helper_translate_curve_to_degamma_hw_format(
110 const struct dc_transfer_func *output_tf,
111 struct pwl_params *lut_params);
112
113
109#endif 114#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index f2a08b156cf0..e305c28c98de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -196,7 +196,7 @@ static void dpp1_cm_set_regamma_pwl(
196 case OPP_REGAMMA_SRGB: 196 case OPP_REGAMMA_SRGB:
197 re_mode = 1; 197 re_mode = 1;
198 break; 198 break;
199 case OPP_REGAMMA_3_6: 199 case OPP_REGAMMA_XVYCC:
200 re_mode = 2; 200 re_mode = 2;
201 break; 201 break;
202 case OPP_REGAMMA_USER: 202 case OPP_REGAMMA_USER:
@@ -424,6 +424,24 @@ void dpp1_set_cursor_position(
424 424
425} 425}
426 426
427void dpp1_dppclk_control(
428 struct dpp *dpp_base,
429 bool dppclk_div,
430 bool enable)
431{
432 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
433
434 if (enable) {
435 if (dpp->tf_mask->DPPCLK_RATE_CONTROL)
436 REG_UPDATE_2(DPP_CONTROL,
437 DPPCLK_RATE_CONTROL, dppclk_div,
438 DPP_CLOCK_ENABLE, 1);
439 else
440 REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1);
441 } else
442 REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
443}
444
427static const struct dpp_funcs dcn10_dpp_funcs = { 445static const struct dpp_funcs dcn10_dpp_funcs = {
428 .dpp_reset = dpp_reset, 446 .dpp_reset = dpp_reset,
429 .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, 447 .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
@@ -445,6 +463,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
445 .dpp_full_bypass = dpp1_full_bypass, 463 .dpp_full_bypass = dpp1_full_bypass,
446 .set_cursor_attributes = dpp1_set_cursor_attributes, 464 .set_cursor_attributes = dpp1_set_cursor_attributes,
447 .set_cursor_position = dpp1_set_cursor_position, 465 .set_cursor_position = dpp1_set_cursor_position,
466 .dpp_dppclk_control = dpp1_dppclk_control,
467 .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
448}; 468};
449 469
450static struct dpp_caps dcn10_dpp_cap = { 470static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index f56ee4d08d89..17b062a8f88a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -112,7 +112,9 @@
112 SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ 112 SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
113 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ 113 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
114 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ 114 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
115 SRI(CURSOR0_COLOR1, CNVC_CUR, id) 115 SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
116 SRI(DPP_CONTROL, DPP_TOP, id), \
117 SRI(CM_HDR_MULT_COEF, CM, id)
116 118
117 119
118 120
@@ -306,7 +308,9 @@
306 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ 308 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
307 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ 309 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
308 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ 310 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
309 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh) 311 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
312 TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
313 TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
310 314
311#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\ 315#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
312 TF_REG_LIST_SH_MASK_DCN(mask_sh),\ 316 TF_REG_LIST_SH_MASK_DCN(mask_sh),\
@@ -410,7 +414,8 @@
410 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ 414 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
411 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ 415 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
412 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ 416 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
413 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh) 417 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
418 TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh)
414 419
415#define TF_REG_FIELD_LIST(type) \ 420#define TF_REG_FIELD_LIST(type) \
416 type EXT_OVERSCAN_LEFT; \ 421 type EXT_OVERSCAN_LEFT; \
@@ -1007,7 +1012,10 @@
1007 type CM_BYPASS; \ 1012 type CM_BYPASS; \
1008 type FORMAT_CONTROL__ALPHA_EN; \ 1013 type FORMAT_CONTROL__ALPHA_EN; \
1009 type CUR0_COLOR0; \ 1014 type CUR0_COLOR0; \
1010 type CUR0_COLOR1; 1015 type CUR0_COLOR1; \
1016 type DPPCLK_RATE_CONTROL; \
1017 type DPP_CLOCK_ENABLE; \
1018 type CM_HDR_MULT_COEF;
1011 1019
1012struct dcn_dpp_shift { 1020struct dcn_dpp_shift {
1013 TF_REG_FIELD_LIST(uint8_t) 1021 TF_REG_FIELD_LIST(uint8_t)
@@ -1252,7 +1260,9 @@ struct dcn_dpp_mask {
1252 uint32_t CURSOR_CONTROL; \ 1260 uint32_t CURSOR_CONTROL; \
1253 uint32_t CURSOR0_CONTROL; \ 1261 uint32_t CURSOR0_CONTROL; \
1254 uint32_t CURSOR0_COLOR0; \ 1262 uint32_t CURSOR0_COLOR0; \
1255 uint32_t CURSOR0_COLOR1; 1263 uint32_t CURSOR0_COLOR1; \
1264 uint32_t DPP_CONTROL; \
1265 uint32_t CM_HDR_MULT_COEF;
1256 1266
1257struct dcn_dpp_registers { 1267struct dcn_dpp_registers {
1258 DPP_COMMON_REG_VARIABLE_LIST 1268 DPP_COMMON_REG_VARIABLE_LIST
@@ -1287,6 +1297,12 @@ void dpp1_set_cursor_attributes(
1287 struct dpp *dpp_base, 1297 struct dpp *dpp_base,
1288 enum dc_cursor_color_format color_format); 1298 enum dc_cursor_color_format color_format);
1289 1299
1300void dpp1_set_cursor_position(
1301 struct dpp *dpp_base,
1302 const struct dc_cursor_position *pos,
1303 const struct dc_cursor_mi_param *param,
1304 uint32_t width);
1305
1290bool dpp1_dscl_is_lb_conf_valid( 1306bool dpp1_dscl_is_lb_conf_valid(
1291 int ceil_vratio, 1307 int ceil_vratio,
1292 int num_partitions, 1308 int num_partitions,
@@ -1397,6 +1413,15 @@ void dpp1_cnv_setup (
1397 1413
1398void dpp1_full_bypass(struct dpp *dpp_base); 1414void dpp1_full_bypass(struct dpp *dpp_base);
1399 1415
1416void dpp1_dppclk_control(
1417 struct dpp *dpp_base,
1418 bool dppclk_div,
1419 bool enable);
1420
1421void dpp1_set_hdr_multiplier(
1422 struct dpp *dpp_base,
1423 uint32_t multiplier);
1424
1400void dpp1_construct(struct dcn10_dpp *dpp1, 1425void dpp1_construct(struct dcn10_dpp *dpp1,
1401 struct dc_context *ctx, 1426 struct dc_context *ctx,
1402 uint32_t inst, 1427 uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index a5b099023652..fb32975e4b67 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -193,6 +193,7 @@ void dpp1_cm_set_gamut_remap(
193 const struct dpp_grph_csc_adjustment *adjust) 193 const struct dpp_grph_csc_adjustment *adjust)
194{ 194{
195 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 195 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
196 int i = 0;
196 197
197 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) 198 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
198 /* Bypass if type is bypass or hw */ 199 /* Bypass if type is bypass or hw */
@@ -201,20 +202,8 @@ void dpp1_cm_set_gamut_remap(
201 struct fixed31_32 arr_matrix[12]; 202 struct fixed31_32 arr_matrix[12];
202 uint16_t arr_reg_val[12]; 203 uint16_t arr_reg_val[12];
203 204
204 arr_matrix[0] = adjust->temperature_matrix[0]; 205 for (i = 0; i < 12; i++)
205 arr_matrix[1] = adjust->temperature_matrix[1]; 206 arr_matrix[i] = adjust->temperature_matrix[i];
206 arr_matrix[2] = adjust->temperature_matrix[2];
207 arr_matrix[3] = dal_fixed31_32_zero;
208
209 arr_matrix[4] = adjust->temperature_matrix[3];
210 arr_matrix[5] = adjust->temperature_matrix[4];
211 arr_matrix[6] = adjust->temperature_matrix[5];
212 arr_matrix[7] = dal_fixed31_32_zero;
213
214 arr_matrix[8] = adjust->temperature_matrix[6];
215 arr_matrix[9] = adjust->temperature_matrix[7];
216 arr_matrix[10] = adjust->temperature_matrix[8];
217 arr_matrix[11] = dal_fixed31_32_zero;
218 207
219 convert_float_matrix( 208 convert_float_matrix(
220 arr_reg_val, arr_matrix, 12); 209 arr_reg_val, arr_matrix, 12);
@@ -309,6 +298,32 @@ static void dpp1_cm_get_reg_field(
309 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; 298 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
310} 299}
311 300
301static void dpp1_cm_get_degamma_reg_field(
302 struct dcn10_dpp *dpp,
303 struct xfer_func_reg *reg)
304{
305 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
306 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
307 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
308 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
309 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
310 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
311 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
312 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
313
314 reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B;
315 reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B;
316 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
317 reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
318 reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
319 reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
320 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
321 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
322 reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B;
323 reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B;
324 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
325 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
326}
312void dpp1_cm_set_output_csc_adjustment( 327void dpp1_cm_set_output_csc_adjustment(
313 struct dpp *dpp_base, 328 struct dpp *dpp_base,
314 const uint16_t *regval) 329 const uint16_t *regval)
@@ -513,7 +528,7 @@ void dpp1_program_degamma_lutb_settings(
513 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 528 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
514 struct xfer_func_reg gam_regs; 529 struct xfer_func_reg gam_regs;
515 530
516 dpp1_cm_get_reg_field(dpp, &gam_regs); 531 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
517 532
518 gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B); 533 gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
519 gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G); 534 gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
@@ -542,7 +557,7 @@ void dpp1_program_degamma_luta_settings(
542 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 557 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
543 struct xfer_func_reg gam_regs; 558 struct xfer_func_reg gam_regs;
544 559
545 dpp1_cm_get_reg_field(dpp, &gam_regs); 560 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
546 561
547 gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B); 562 gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
548 gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G); 563 gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
@@ -789,3 +804,12 @@ void dpp1_program_input_lut(
789 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); 804 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
790 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); 805 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
791} 806}
807
808void dpp1_set_hdr_multiplier(
809 struct dpp *dpp_base,
810 uint32_t multiplier)
811{
812 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
813
814 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
815}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index eb8317187f30..738f67ffd1b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -30,6 +30,8 @@
30 30
31#define CTX \ 31#define CTX \
32 hubbub->ctx 32 hubbub->ctx
33#define DC_LOGGER \
34 hubbub->ctx->logger
33#define REG(reg)\ 35#define REG(reg)\
34 hubbub->regs->reg 36 hubbub->regs->reg
35 37
@@ -100,7 +102,6 @@ bool hubbub1_verify_allow_pstate_change_high(
100 static unsigned int max_sampled_pstate_wait_us; /* data collection */ 102 static unsigned int max_sampled_pstate_wait_us; /* data collection */
101 static bool forced_pstate_allow; /* help with revert wa */ 103 static bool forced_pstate_allow; /* help with revert wa */
102 104
103 unsigned int debug_index = 0x7;
104 unsigned int debug_data; 105 unsigned int debug_data;
105 unsigned int i; 106 unsigned int i;
106 107
@@ -115,7 +116,9 @@ bool hubbub1_verify_allow_pstate_change_high(
115 forced_pstate_allow = false; 116 forced_pstate_allow = false;
116 } 117 }
117 118
118 /* description "3-0: Pipe0 cursor0 QOS 119 /* RV1:
120 * dchubbubdebugind, at: 0x7
121 * description "3-0: Pipe0 cursor0 QOS
119 * 7-4: Pipe1 cursor0 QOS 122 * 7-4: Pipe1 cursor0 QOS
120 * 11-8: Pipe2 cursor0 QOS 123 * 11-8: Pipe2 cursor0 QOS
121 * 15-12: Pipe3 cursor0 QOS 124 * 15-12: Pipe3 cursor0 QOS
@@ -137,7 +140,8 @@ bool hubbub1_verify_allow_pstate_change_high(
137 * 31: SOC pstate change request 140 * 31: SOC pstate change request
138 */ 141 */
139 142
140 REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, debug_index); 143
144 REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate);
141 145
142 for (i = 0; i < pstate_wait_timeout_us; i++) { 146 for (i = 0; i < pstate_wait_timeout_us; i++) {
143 debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); 147 debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
@@ -145,8 +149,7 @@ bool hubbub1_verify_allow_pstate_change_high(
145 if (debug_data & (1 << 30)) { 149 if (debug_data & (1 << 30)) {
146 150
147 if (i > pstate_wait_expected_timeout_us) 151 if (i > pstate_wait_expected_timeout_us)
148 dm_logger_write(hubbub->ctx->logger, LOG_WARNING, 152 DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
149 "pstate took longer than expected ~%dus\n",
150 i); 153 i);
151 154
152 return true; 155 return true;
@@ -165,8 +168,7 @@ bool hubbub1_verify_allow_pstate_change_high(
165 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); 168 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
166 forced_pstate_allow = true; 169 forced_pstate_allow = true;
167 170
168 dm_logger_write(hubbub->ctx->logger, LOG_WARNING, 171 DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
169 "pstate TEST_DEBUG_DATA: 0x%X\n",
170 debug_data); 172 debug_data);
171 173
172 return false; 174 return false;
@@ -209,16 +211,14 @@ void hubbub1_program_watermarks(
209 refclk_mhz, 0x1fffff); 211 refclk_mhz, 0x1fffff);
210 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 212 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
211 213
212 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 214 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
213 "URGENCY_WATERMARK_A calculated =%d\n"
214 "HW register value = 0x%x\n", 215 "HW register value = 0x%x\n",
215 watermarks->a.urgent_ns, prog_wm_value); 216 watermarks->a.urgent_ns, prog_wm_value);
216 217
217 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns, 218 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
218 refclk_mhz, 0x1fffff); 219 refclk_mhz, 0x1fffff);
219 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value); 220 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
220 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 221 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
221 "PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
222 "HW register value = 0x%x\n", 222 "HW register value = 0x%x\n",
223 watermarks->a.pte_meta_urgent_ns, prog_wm_value); 223 watermarks->a.pte_meta_urgent_ns, prog_wm_value);
224 224
@@ -227,8 +227,7 @@ void hubbub1_program_watermarks(
227 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, 227 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
228 refclk_mhz, 0x1fffff); 228 refclk_mhz, 0x1fffff);
229 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); 229 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
230 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 230 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
231 "SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
232 "HW register value = 0x%x\n", 231 "HW register value = 0x%x\n",
233 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 232 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
234 233
@@ -237,8 +236,7 @@ void hubbub1_program_watermarks(
237 watermarks->a.cstate_pstate.cstate_exit_ns, 236 watermarks->a.cstate_pstate.cstate_exit_ns,
238 refclk_mhz, 0x1fffff); 237 refclk_mhz, 0x1fffff);
239 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); 238 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
240 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 239 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
241 "SR_EXIT_WATERMARK_A calculated =%d\n"
242 "HW register value = 0x%x\n", 240 "HW register value = 0x%x\n",
243 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); 241 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
244 } 242 }
@@ -247,8 +245,7 @@ void hubbub1_program_watermarks(
247 watermarks->a.cstate_pstate.pstate_change_ns, 245 watermarks->a.cstate_pstate.pstate_change_ns,
248 refclk_mhz, 0x1fffff); 246 refclk_mhz, 0x1fffff);
249 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); 247 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
250 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 248 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
251 "DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
252 "HW register value = 0x%x\n\n", 249 "HW register value = 0x%x\n\n",
253 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); 250 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
254 251
@@ -257,8 +254,7 @@ void hubbub1_program_watermarks(
257 prog_wm_value = convert_and_clamp( 254 prog_wm_value = convert_and_clamp(
258 watermarks->b.urgent_ns, refclk_mhz, 0x1fffff); 255 watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
259 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 256 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
260 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 257 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
261 "URGENCY_WATERMARK_B calculated =%d\n"
262 "HW register value = 0x%x\n", 258 "HW register value = 0x%x\n",
263 watermarks->b.urgent_ns, prog_wm_value); 259 watermarks->b.urgent_ns, prog_wm_value);
264 260
@@ -267,8 +263,7 @@ void hubbub1_program_watermarks(
267 watermarks->b.pte_meta_urgent_ns, 263 watermarks->b.pte_meta_urgent_ns,
268 refclk_mhz, 0x1fffff); 264 refclk_mhz, 0x1fffff);
269 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value); 265 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
270 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 266 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
271 "PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
272 "HW register value = 0x%x\n", 267 "HW register value = 0x%x\n",
273 watermarks->b.pte_meta_urgent_ns, prog_wm_value); 268 watermarks->b.pte_meta_urgent_ns, prog_wm_value);
274 269
@@ -278,8 +273,7 @@ void hubbub1_program_watermarks(
278 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, 273 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
279 refclk_mhz, 0x1fffff); 274 refclk_mhz, 0x1fffff);
280 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); 275 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
281 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 276 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
282 "SR_ENTER_WATERMARK_B calculated =%d\n"
283 "HW register value = 0x%x\n", 277 "HW register value = 0x%x\n",
284 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 278 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
285 279
@@ -288,8 +282,7 @@ void hubbub1_program_watermarks(
288 watermarks->b.cstate_pstate.cstate_exit_ns, 282 watermarks->b.cstate_pstate.cstate_exit_ns,
289 refclk_mhz, 0x1fffff); 283 refclk_mhz, 0x1fffff);
290 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); 284 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
291 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 285 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
292 "SR_EXIT_WATERMARK_B calculated =%d\n"
293 "HW register value = 0x%x\n", 286 "HW register value = 0x%x\n",
294 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); 287 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
295 } 288 }
@@ -298,8 +291,7 @@ void hubbub1_program_watermarks(
298 watermarks->b.cstate_pstate.pstate_change_ns, 291 watermarks->b.cstate_pstate.pstate_change_ns,
299 refclk_mhz, 0x1fffff); 292 refclk_mhz, 0x1fffff);
300 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); 293 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
301 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 294 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
302 "DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
303 "HW register value = 0x%x\n", 295 "HW register value = 0x%x\n",
304 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); 296 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
305 297
@@ -307,8 +299,7 @@ void hubbub1_program_watermarks(
307 prog_wm_value = convert_and_clamp( 299 prog_wm_value = convert_and_clamp(
308 watermarks->c.urgent_ns, refclk_mhz, 0x1fffff); 300 watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
309 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 301 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
310 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 302 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
311 "URGENCY_WATERMARK_C calculated =%d\n"
312 "HW register value = 0x%x\n", 303 "HW register value = 0x%x\n",
313 watermarks->c.urgent_ns, prog_wm_value); 304 watermarks->c.urgent_ns, prog_wm_value);
314 305
@@ -317,8 +308,7 @@ void hubbub1_program_watermarks(
317 watermarks->c.pte_meta_urgent_ns, 308 watermarks->c.pte_meta_urgent_ns,
318 refclk_mhz, 0x1fffff); 309 refclk_mhz, 0x1fffff);
319 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value); 310 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
320 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 311 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
321 "PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
322 "HW register value = 0x%x\n", 312 "HW register value = 0x%x\n",
323 watermarks->c.pte_meta_urgent_ns, prog_wm_value); 313 watermarks->c.pte_meta_urgent_ns, prog_wm_value);
324 314
@@ -328,8 +318,7 @@ void hubbub1_program_watermarks(
328 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, 318 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
329 refclk_mhz, 0x1fffff); 319 refclk_mhz, 0x1fffff);
330 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); 320 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
331 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 321 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
332 "SR_ENTER_WATERMARK_C calculated =%d\n"
333 "HW register value = 0x%x\n", 322 "HW register value = 0x%x\n",
334 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 323 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
335 324
@@ -338,8 +327,7 @@ void hubbub1_program_watermarks(
338 watermarks->c.cstate_pstate.cstate_exit_ns, 327 watermarks->c.cstate_pstate.cstate_exit_ns,
339 refclk_mhz, 0x1fffff); 328 refclk_mhz, 0x1fffff);
340 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); 329 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
341 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 330 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
342 "SR_EXIT_WATERMARK_C calculated =%d\n"
343 "HW register value = 0x%x\n", 331 "HW register value = 0x%x\n",
344 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); 332 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
345 } 333 }
@@ -348,8 +336,7 @@ void hubbub1_program_watermarks(
348 watermarks->c.cstate_pstate.pstate_change_ns, 336 watermarks->c.cstate_pstate.pstate_change_ns,
349 refclk_mhz, 0x1fffff); 337 refclk_mhz, 0x1fffff);
350 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); 338 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
351 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 339 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
352 "DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
353 "HW register value = 0x%x\n", 340 "HW register value = 0x%x\n",
354 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); 341 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
355 342
@@ -357,8 +344,7 @@ void hubbub1_program_watermarks(
357 prog_wm_value = convert_and_clamp( 344 prog_wm_value = convert_and_clamp(
358 watermarks->d.urgent_ns, refclk_mhz, 0x1fffff); 345 watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
359 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 346 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
360 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 347 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
361 "URGENCY_WATERMARK_D calculated =%d\n"
362 "HW register value = 0x%x\n", 348 "HW register value = 0x%x\n",
363 watermarks->d.urgent_ns, prog_wm_value); 349 watermarks->d.urgent_ns, prog_wm_value);
364 350
@@ -366,8 +352,7 @@ void hubbub1_program_watermarks(
366 watermarks->d.pte_meta_urgent_ns, 352 watermarks->d.pte_meta_urgent_ns,
367 refclk_mhz, 0x1fffff); 353 refclk_mhz, 0x1fffff);
368 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value); 354 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
369 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 355 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
370 "PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
371 "HW register value = 0x%x\n", 356 "HW register value = 0x%x\n",
372 watermarks->d.pte_meta_urgent_ns, prog_wm_value); 357 watermarks->d.pte_meta_urgent_ns, prog_wm_value);
373 358
@@ -377,8 +362,7 @@ void hubbub1_program_watermarks(
377 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, 362 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
378 refclk_mhz, 0x1fffff); 363 refclk_mhz, 0x1fffff);
379 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); 364 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
380 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 365 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
381 "SR_ENTER_WATERMARK_D calculated =%d\n"
382 "HW register value = 0x%x\n", 366 "HW register value = 0x%x\n",
383 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 367 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
384 368
@@ -387,8 +371,7 @@ void hubbub1_program_watermarks(
387 watermarks->d.cstate_pstate.cstate_exit_ns, 371 watermarks->d.cstate_pstate.cstate_exit_ns,
388 refclk_mhz, 0x1fffff); 372 refclk_mhz, 0x1fffff);
389 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); 373 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
390 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 374 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
391 "SR_EXIT_WATERMARK_D calculated =%d\n"
392 "HW register value = 0x%x\n", 375 "HW register value = 0x%x\n",
393 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); 376 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
394 } 377 }
@@ -398,8 +381,7 @@ void hubbub1_program_watermarks(
398 watermarks->d.cstate_pstate.pstate_change_ns, 381 watermarks->d.cstate_pstate.pstate_change_ns,
399 refclk_mhz, 0x1fffff); 382 refclk_mhz, 0x1fffff);
400 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); 383 REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
401 dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS, 384 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
402 "DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
403 "HW register value = 0x%x\n\n", 385 "HW register value = 0x%x\n\n",
404 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); 386 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
405 387
@@ -512,5 +494,6 @@ void hubbub1_construct(struct hubbub *hubbub,
512 hubbub->shifts = hubbub_shift; 494 hubbub->shifts = hubbub_shift;
513 hubbub->masks = hubbub_mask; 495 hubbub->masks = hubbub_mask;
514 496
497 hubbub->debug_test_index_pstate = 0x7;
515} 498}
516 499
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index d5c97844312f..a16e908821a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -185,6 +185,7 @@ struct hubbub {
185 const struct dcn_hubbub_registers *regs; 185 const struct dcn_hubbub_registers *regs;
186 const struct dcn_hubbub_shift *shifts; 186 const struct dcn_hubbub_shift *shifts;
187 const struct dcn_hubbub_mask *masks; 187 const struct dcn_hubbub_mask *masks;
188 unsigned int debug_test_index_pstate;
188}; 189};
189 190
190void hubbub1_update_dchub( 191void hubbub1_update_dchub(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 585b33384002..39b72f696ae9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -73,6 +73,9 @@ static void hubp1_disconnect(struct hubp *hubp)
73 73
74 REG_UPDATE(DCHUBP_CNTL, 74 REG_UPDATE(DCHUBP_CNTL,
75 HUBP_TTU_DISABLE, 1); 75 HUBP_TTU_DISABLE, 1);
76
77 REG_UPDATE(CURSOR_CONTROL,
78 CURSOR_ENABLE, 0);
76} 79}
77 80
78static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank) 81static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
@@ -296,8 +299,9 @@ bool hubp1_program_surface_flip_and_addr(
296 if (address->grph.addr.quad_part == 0) 299 if (address->grph.addr.quad_part == 0)
297 break; 300 break;
298 301
299 REG_UPDATE(DCSURF_SURFACE_CONTROL, 302 REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
300 PRIMARY_SURFACE_TMZ, address->tmz_surface); 303 PRIMARY_SURFACE_TMZ, address->tmz_surface,
304 PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
301 305
302 if (address->grph.meta_addr.quad_part != 0) { 306 if (address->grph.meta_addr.quad_part != 0) {
303 REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0, 307 REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
@@ -322,8 +326,11 @@ bool hubp1_program_surface_flip_and_addr(
322 || address->video_progressive.chroma_addr.quad_part == 0) 326 || address->video_progressive.chroma_addr.quad_part == 0)
323 break; 327 break;
324 328
325 REG_UPDATE(DCSURF_SURFACE_CONTROL, 329 REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
326 PRIMARY_SURFACE_TMZ, address->tmz_surface); 330 PRIMARY_SURFACE_TMZ, address->tmz_surface,
331 PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
332 PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
333 PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
327 334
328 if (address->video_progressive.luma_meta_addr.quad_part != 0) { 335 if (address->video_progressive.luma_meta_addr.quad_part != 0) {
329 REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0, 336 REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
@@ -365,8 +372,11 @@ bool hubp1_program_surface_flip_and_addr(
365 if (address->grph_stereo.right_addr.quad_part == 0) 372 if (address->grph_stereo.right_addr.quad_part == 0)
366 break; 373 break;
367 374
368 REG_UPDATE(DCSURF_SURFACE_CONTROL, 375 REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
369 PRIMARY_SURFACE_TMZ, address->tmz_surface); 376 PRIMARY_SURFACE_TMZ, address->tmz_surface,
377 PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
378 PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
379 PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
370 380
371 if (address->grph_stereo.right_meta_addr.quad_part != 0) { 381 if (address->grph_stereo.right_meta_addr.quad_part != 0) {
372 382
@@ -909,6 +919,21 @@ void hubp1_cursor_set_position(
909 /* TODO Handle surface pixel formats other than 4:4:4 */ 919 /* TODO Handle surface pixel formats other than 4:4:4 */
910} 920}
911 921
922void hubp1_clk_cntl(struct hubp *hubp, bool enable)
923{
924 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
925 uint32_t clk_enable = enable ? 1 : 0;
926
927 REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable);
928}
929
930void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
931{
932 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
933
934 REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
935}
936
912static struct hubp_funcs dcn10_hubp_funcs = { 937static struct hubp_funcs dcn10_hubp_funcs = {
913 .hubp_program_surface_flip_and_addr = 938 .hubp_program_surface_flip_and_addr =
914 hubp1_program_surface_flip_and_addr, 939 hubp1_program_surface_flip_and_addr,
@@ -925,6 +950,8 @@ static struct hubp_funcs dcn10_hubp_funcs = {
925 .set_cursor_attributes = hubp1_cursor_set_attributes, 950 .set_cursor_attributes = hubp1_cursor_set_attributes,
926 .set_cursor_position = hubp1_cursor_set_position, 951 .set_cursor_position = hubp1_cursor_set_position,
927 .hubp_disconnect = hubp1_disconnect, 952 .hubp_disconnect = hubp1_disconnect,
953 .hubp_clk_cntl = hubp1_clk_cntl,
954 .hubp_vtg_sel = hubp1_vtg_sel,
928}; 955};
929 956
930/*****************************************/ 957/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 33e91d9c010f..4a3703e12ea1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -96,7 +96,8 @@
96 SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\ 96 SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
97 SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\ 97 SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
98 SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\ 98 SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
99 SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) 99 SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id),\
100 SRI(HUBP_CLK_CNTL, HUBP, id)
100 101
101#define HUBP_REG_LIST_DCN10(id)\ 102#define HUBP_REG_LIST_DCN10(id)\
102 HUBP_REG_LIST_DCN(id),\ 103 HUBP_REG_LIST_DCN(id),\
@@ -230,7 +231,8 @@
230 uint32_t CURSOR_CONTROL; \ 231 uint32_t CURSOR_CONTROL; \
231 uint32_t CURSOR_POSITION; \ 232 uint32_t CURSOR_POSITION; \
232 uint32_t CURSOR_HOT_SPOT; \ 233 uint32_t CURSOR_HOT_SPOT; \
233 uint32_t CURSOR_DST_OFFSET 234 uint32_t CURSOR_DST_OFFSET; \
235 uint32_t HUBP_CLK_CNTL
234 236
235#define HUBP_SF(reg_name, field_name, post_fix)\ 237#define HUBP_SF(reg_name, field_name, post_fix)\
236 .field_name = reg_name ## __ ## field_name ## post_fix 238 .field_name = reg_name ## __ ## field_name ## post_fix
@@ -240,6 +242,7 @@
240 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ 242 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
241 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ 243 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
242 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\ 244 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
245 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
243 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\ 246 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
244 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\ 247 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
245 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\ 248 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
@@ -293,6 +296,9 @@
293 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\ 296 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
294 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\ 297 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
295 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\ 298 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
299 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ_C, mask_sh),\
300 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ, mask_sh),\
301 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\
296 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\ 302 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
297 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ 303 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
298 HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ 304 HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
@@ -352,7 +358,8 @@
352 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\ 358 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
353 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ 359 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
354 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\ 360 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
355 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh) 361 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\
362 HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
356 363
357#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\ 364#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
358 HUBP_MASK_SH_LIST_DCN(mask_sh),\ 365 HUBP_MASK_SH_LIST_DCN(mask_sh),\
@@ -398,6 +405,7 @@
398 type HUBP_BLANK_EN;\ 405 type HUBP_BLANK_EN;\
399 type HUBP_TTU_DISABLE;\ 406 type HUBP_TTU_DISABLE;\
400 type HUBP_NO_OUTSTANDING_REQ;\ 407 type HUBP_NO_OUTSTANDING_REQ;\
408 type HUBP_VTG_SEL;\
401 type HUBP_UNDERFLOW_STATUS;\ 409 type HUBP_UNDERFLOW_STATUS;\
402 type NUM_PIPES;\ 410 type NUM_PIPES;\
403 type NUM_BANKS;\ 411 type NUM_BANKS;\
@@ -452,6 +460,13 @@
452 type SURFACE_EARLIEST_INUSE_ADDRESS_C;\ 460 type SURFACE_EARLIEST_INUSE_ADDRESS_C;\
453 type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C;\ 461 type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C;\
454 type PRIMARY_SURFACE_TMZ;\ 462 type PRIMARY_SURFACE_TMZ;\
463 type PRIMARY_SURFACE_TMZ_C;\
464 type SECONDARY_SURFACE_TMZ;\
465 type SECONDARY_SURFACE_TMZ_C;\
466 type PRIMARY_META_SURFACE_TMZ;\
467 type PRIMARY_META_SURFACE_TMZ_C;\
468 type SECONDARY_META_SURFACE_TMZ;\
469 type SECONDARY_META_SURFACE_TMZ_C;\
455 type PRIMARY_SURFACE_DCC_EN;\ 470 type PRIMARY_SURFACE_DCC_EN;\
456 type PRIMARY_SURFACE_DCC_IND_64B_BLK;\ 471 type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
457 type DET_BUF_PLANE1_BASE_ADDRESS;\ 472 type DET_BUF_PLANE1_BASE_ADDRESS;\
@@ -524,6 +539,7 @@
524 type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\ 539 type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\
525 type ENABLE_L1_TLB;\ 540 type ENABLE_L1_TLB;\
526 type SYSTEM_ACCESS_MODE;\ 541 type SYSTEM_ACCESS_MODE;\
542 type HUBP_CLOCK_ENABLE;\
527 type MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\ 543 type MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
528 type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\ 544 type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
529 type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\ 545 type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
@@ -653,6 +669,9 @@ void min_set_viewport(struct hubp *hubp,
653 const struct rect *viewport, 669 const struct rect *viewport,
654 const struct rect *viewport_c); 670 const struct rect *viewport_c);
655 671
672void hubp1_clk_cntl(struct hubp *hubp, bool enable);
673void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
674
656void dcn10_hubp_construct( 675void dcn10_hubp_construct(
657 struct dcn10_hubp *hubp1, 676 struct dcn10_hubp *hubp1,
658 struct dc_context *ctx, 677 struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 82572863acab..8b0f6b8a5627 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,6 +45,8 @@
45#include "dcn10_hubbub.h" 45#include "dcn10_hubbub.h"
46#include "dcn10_cm_common.h" 46#include "dcn10_cm_common.h"
47 47
48#define DC_LOGGER \
49 ctx->logger
48#define CTX \ 50#define CTX \
49 hws->ctx 51 hws->ctx
50#define REG(reg)\ 52#define REG(reg)\
@@ -133,7 +135,7 @@ void dcn10_log_hw_state(struct dc *dc)
133 DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t " 135 DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t "
134 "%xh \t %xh \t %xh \t " 136 "%xh \t %xh \t %xh \t "
135 "%d \t %d \t %d \t %xh \t", 137 "%d \t %d \t %d \t %xh \t",
136 i, 138 hubp->inst,
137 s.pixel_format, 139 s.pixel_format,
138 s.inuse_addr_hi, 140 s.inuse_addr_hi,
139 s.viewport_width, 141 s.viewport_width,
@@ -155,7 +157,7 @@ void dcn10_log_hw_state(struct dc *dc)
155 DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t " 157 DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
156 "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n"); 158 "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
157 159
158 for (i = 0; i < pool->res_cap->num_timing_generator; i++) { 160 for (i = 0; i < pool->timing_generator_count; i++) {
159 struct timing_generator *tg = pool->timing_generators[i]; 161 struct timing_generator *tg = pool->timing_generators[i];
160 struct dcn_otg_state s = {0}; 162 struct dcn_otg_state s = {0};
161 163
@@ -168,7 +170,7 @@ void dcn10_log_hw_state(struct dc *dc)
168 DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t " 170 DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t "
169 "%d \t %d \t %d \t %d \t %d \t %d \t " 171 "%d \t %d \t %d \t %d \t %d \t %d \t "
170 "%d \t %d \t %d \t %d \t %d \t ", 172 "%d \t %d \t %d \t %d \t %d \t ",
171 i, 173 tg->inst,
172 s.v_blank_start, 174 s.v_blank_start,
173 s.v_blank_end, 175 s.v_blank_end,
174 s.v_sync_a_start, 176 s.v_sync_a_start,
@@ -193,26 +195,6 @@ void dcn10_log_hw_state(struct dc *dc)
193 DTN_INFO_END(); 195 DTN_INFO_END();
194} 196}
195 197
196static void enable_dppclk(
197 struct dce_hwseq *hws,
198 uint8_t plane_id,
199 uint32_t requested_pix_clk,
200 bool dppclk_div)
201{
202 dm_logger_write(hws->ctx->logger, LOG_SURFACE,
203 "dppclk_rate_control for pipe %d programed to %d\n",
204 plane_id,
205 dppclk_div);
206
207 if (hws->shifts->DPPCLK_RATE_CONTROL)
208 REG_UPDATE_2(DPP_CONTROL[plane_id],
209 DPPCLK_RATE_CONTROL, dppclk_div,
210 DPP_CLOCK_ENABLE, 1);
211 else
212 REG_UPDATE(DPP_CONTROL[plane_id],
213 DPP_CLOCK_ENABLE, 1);
214}
215
216static void enable_power_gating_plane( 198static void enable_power_gating_plane(
217 struct dce_hwseq *hws, 199 struct dce_hwseq *hws,
218 bool enable) 200 bool enable)
@@ -238,10 +220,34 @@ static void enable_power_gating_plane(
238static void disable_vga( 220static void disable_vga(
239 struct dce_hwseq *hws) 221 struct dce_hwseq *hws)
240{ 222{
223 unsigned int in_vga1_mode = 0;
224 unsigned int in_vga2_mode = 0;
225 unsigned int in_vga3_mode = 0;
226 unsigned int in_vga4_mode = 0;
227
228 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
229 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
230 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
231 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
232
233 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
234 in_vga3_mode == 0 && in_vga4_mode == 0)
235 return;
236
241 REG_WRITE(D1VGA_CONTROL, 0); 237 REG_WRITE(D1VGA_CONTROL, 0);
242 REG_WRITE(D2VGA_CONTROL, 0); 238 REG_WRITE(D2VGA_CONTROL, 0);
243 REG_WRITE(D3VGA_CONTROL, 0); 239 REG_WRITE(D3VGA_CONTROL, 0);
244 REG_WRITE(D4VGA_CONTROL, 0); 240 REG_WRITE(D4VGA_CONTROL, 0);
241
242 /* HW Engineer's Notes:
243 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
244 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
245 *
246 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
247 * VGA_TEST_ENABLE, to leave it in the same state as before.
248 */
249 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
250 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
245} 251}
246 252
247static void dpp_pg_control( 253static void dpp_pg_control(
@@ -348,6 +354,7 @@ static void power_on_plane(
348 struct dce_hwseq *hws, 354 struct dce_hwseq *hws,
349 int plane_id) 355 int plane_id)
350{ 356{
357 struct dc_context *ctx = hws->ctx;
351 if (REG(DC_IP_REQUEST_CNTL)) { 358 if (REG(DC_IP_REQUEST_CNTL)) {
352 REG_SET(DC_IP_REQUEST_CNTL, 0, 359 REG_SET(DC_IP_REQUEST_CNTL, 0,
353 IP_REQUEST_EN, 1); 360 IP_REQUEST_EN, 1);
@@ -355,7 +362,7 @@ static void power_on_plane(
355 hubp_pg_control(hws, plane_id, true); 362 hubp_pg_control(hws, plane_id, true);
356 REG_SET(DC_IP_REQUEST_CNTL, 0, 363 REG_SET(DC_IP_REQUEST_CNTL, 0,
357 IP_REQUEST_EN, 0); 364 IP_REQUEST_EN, 0);
358 dm_logger_write(hws->ctx->logger, LOG_DEBUG, 365 DC_LOG_DEBUG(
359 "Un-gated front end for pipe %d\n", plane_id); 366 "Un-gated front end for pipe %d\n", plane_id);
360 } 367 }
361} 368}
@@ -546,7 +553,7 @@ static void reset_back_end_for_pipe(
546 struct dc_state *context) 553 struct dc_state *context)
547{ 554{
548 int i; 555 int i;
549 556 struct dc_context *ctx = dc->ctx;
550 if (pipe_ctx->stream_res.stream_enc == NULL) { 557 if (pipe_ctx->stream_res.stream_enc == NULL) {
551 pipe_ctx->stream = NULL; 558 pipe_ctx->stream = NULL;
552 return; 559 return;
@@ -556,6 +563,22 @@ static void reset_back_end_for_pipe(
556 /* DPMS may already disable */ 563 /* DPMS may already disable */
557 if (!pipe_ctx->stream->dpms_off) 564 if (!pipe_ctx->stream->dpms_off)
558 core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); 565 core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
566 else if (pipe_ctx->stream_res.audio) {
567 /*
568 * if stream is already disabled outside of commit streams path,
569 * audio disable was skipped. Need to do it here
570 */
571 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
572
573 if (dc->caps.dynamic_audio == true) {
574 /*we have to dynamic arbitrate the audio endpoints*/
575 pipe_ctx->stream_res.audio = NULL;
576 /*we free the resource, need reset is_audio_acquired*/
577 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
578 }
579
580 }
581
559 } 582 }
560 583
561 /* by upper caller loop, parent pipe: pipe0, will be reset last. 584 /* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -576,8 +599,7 @@ static void reset_back_end_for_pipe(
576 return; 599 return;
577 600
578 pipe_ctx->stream = NULL; 601 pipe_ctx->stream = NULL;
579 dm_logger_write(dc->ctx->logger, LOG_DEBUG, 602 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
580 "Reset back end for pipe %d, tg:%d\n",
581 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 603 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
582} 604}
583 605
@@ -597,29 +619,22 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
597/* trigger HW to start disconnect plane from stream on the next vsync */ 619/* trigger HW to start disconnect plane from stream on the next vsync */
598static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) 620static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
599{ 621{
600 int fe_idx = pipe_ctx->pipe_idx; 622 struct hubp *hubp = pipe_ctx->plane_res.hubp;
601 struct hubp *hubp = dc->res_pool->hubps[fe_idx]; 623 int dpp_id = pipe_ctx->plane_res.dpp->inst;
602 struct mpc *mpc = dc->res_pool->mpc; 624 struct mpc *mpc = dc->res_pool->mpc;
603 int opp_id;
604 struct mpc_tree *mpc_tree_params; 625 struct mpc_tree *mpc_tree_params;
605 struct mpcc *mpcc_to_remove = NULL; 626 struct mpcc *mpcc_to_remove = NULL;
627 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
606 628
607 /* look at tree rather than mi here to know if we already reset */ 629 mpc_tree_params = &(opp->mpc_tree_params);
608 for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) { 630 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
609 struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
610
611 mpc_tree_params = &(opp->mpc_tree_params);
612 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, fe_idx);
613 if (mpcc_to_remove != NULL)
614 break;
615 }
616 631
617 /*Already reset*/ 632 /*Already reset*/
618 if (opp_id == dc->res_pool->pipe_count) 633 if (mpcc_to_remove == NULL)
619 return; 634 return;
620 635
621 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); 636 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
622 dc->res_pool->opps[opp_id]->mpcc_disconnect_pending[fe_idx] = true; 637 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
623 638
624 dc->optimized_required = true; 639 dc->optimized_required = true;
625 640
@@ -630,21 +645,22 @@ static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
630 dcn10_verify_allow_pstate_change_high(dc); 645 dcn10_verify_allow_pstate_change_high(dc);
631} 646}
632 647
633static void plane_atomic_power_down(struct dc *dc, int fe_idx) 648static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
634{ 649{
635 struct dce_hwseq *hws = dc->hwseq; 650 struct dce_hwseq *hws = dc->hwseq;
636 struct dpp *dpp = dc->res_pool->dpps[fe_idx]; 651 struct dpp *dpp = pipe_ctx->plane_res.dpp;
652 struct dc_context *ctx = dc->ctx;
637 653
638 if (REG(DC_IP_REQUEST_CNTL)) { 654 if (REG(DC_IP_REQUEST_CNTL)) {
639 REG_SET(DC_IP_REQUEST_CNTL, 0, 655 REG_SET(DC_IP_REQUEST_CNTL, 0,
640 IP_REQUEST_EN, 1); 656 IP_REQUEST_EN, 1);
641 dpp_pg_control(hws, fe_idx, false); 657 dpp_pg_control(hws, dpp->inst, false);
642 hubp_pg_control(hws, fe_idx, false); 658 hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
643 dpp->funcs->dpp_reset(dpp); 659 dpp->funcs->dpp_reset(dpp);
644 REG_SET(DC_IP_REQUEST_CNTL, 0, 660 REG_SET(DC_IP_REQUEST_CNTL, 0,
645 IP_REQUEST_EN, 0); 661 IP_REQUEST_EN, 0);
646 dm_logger_write(dc->ctx->logger, LOG_DEBUG, 662 DC_LOG_DEBUG(
647 "Power gated front end %d\n", fe_idx); 663 "Power gated front end %d\n", pipe_ctx->pipe_idx);
648 } 664 }
649} 665}
650 666
@@ -653,26 +669,25 @@ static void plane_atomic_power_down(struct dc *dc, int fe_idx)
653 */ 669 */
654static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) 670static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
655{ 671{
656 int fe_idx = pipe_ctx->pipe_idx; 672 struct hubp *hubp = pipe_ctx->plane_res.hubp;
657 struct dce_hwseq *hws = dc->hwseq; 673 struct dpp *dpp = pipe_ctx->plane_res.dpp;
658 struct hubp *hubp = dc->res_pool->hubps[fe_idx];
659 int opp_id = hubp->opp_id; 674 int opp_id = hubp->opp_id;
660 675
661 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); 676 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
662 677
663 REG_UPDATE(HUBP_CLK_CNTL[fe_idx], 678 hubp->funcs->hubp_clk_cntl(hubp, false);
664 HUBP_CLOCK_ENABLE, 0); 679
665 REG_UPDATE(DPP_CONTROL[fe_idx], 680 dpp->funcs->dpp_dppclk_control(dpp, false, false);
666 DPP_CLOCK_ENABLE, 0);
667 681
668 if (opp_id != 0xf && dc->res_pool->opps[opp_id]->mpc_tree_params.opp_list == NULL) 682 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
669 REG_UPDATE(OPP_PIPE_CONTROL[opp_id], 683 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
670 OPP_PIPE_CLOCK_EN, 0); 684 pipe_ctx->stream_res.opp,
685 false);
671 686
672 hubp->power_gated = true; 687 hubp->power_gated = true;
673 dc->optimized_required = false; /* We're powering off, no need to optimize */ 688 dc->optimized_required = false; /* We're powering off, no need to optimize */
674 689
675 plane_atomic_power_down(dc, fe_idx); 690 plane_atomic_power_down(dc, pipe_ctx);
676 691
677 pipe_ctx->stream = NULL; 692 pipe_ctx->stream = NULL;
678 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); 693 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
@@ -684,15 +699,16 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
684 699
685static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 700static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
686{ 701{
687 if (dc->res_pool->hubps[pipe_ctx->pipe_idx]->power_gated) 702 struct dc_context *ctx = dc->ctx;
703
704 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
688 return; 705 return;
689 706
690 plane_atomic_disable(dc, pipe_ctx); 707 plane_atomic_disable(dc, pipe_ctx);
691 708
692 apply_DEGVIDCN10_253_wa(dc); 709 apply_DEGVIDCN10_253_wa(dc);
693 710
694 dm_logger_write(dc->ctx->logger, LOG_DC, 711 DC_LOG_DC("Power down front end %d\n",
695 "Power down front end %d\n",
696 pipe_ctx->pipe_idx); 712 pipe_ctx->pipe_idx);
697} 713}
698 714
@@ -720,26 +736,25 @@ static void dcn10_init_hw(struct dc *dc)
720 } 736 }
721 737
722 enable_power_gating_plane(dc->hwseq, true); 738 enable_power_gating_plane(dc->hwseq, true);
723 return; 739 } else {
724 }
725 /* end of FPGA. Below if real ASIC */
726 740
727 if (!dcb->funcs->is_accelerated_mode(dcb)) { 741 if (!dcb->funcs->is_accelerated_mode(dcb)) {
728 bios_golden_init(dc); 742 bios_golden_init(dc);
729 disable_vga(dc->hwseq); 743 disable_vga(dc->hwseq);
730 } 744 }
731 745
732 for (i = 0; i < dc->link_count; i++) { 746 for (i = 0; i < dc->link_count; i++) {
733 /* Power up AND update implementation according to the 747 /* Power up AND update implementation according to the
734 * required signal (which may be different from the 748 * required signal (which may be different from the
735 * default signal on connector). 749 * default signal on connector).
736 */ 750 */
737 struct dc_link *link = dc->links[i]; 751 struct dc_link *link = dc->links[i];
738 752
739 if (link->link_enc->connector.id == CONNECTOR_ID_EDP) 753 if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
740 dc->hwss.edp_power_control(link, true); 754 dc->hwss.edp_power_control(link, true);
741 755
742 link->link_enc->funcs->hw_init(link->link_enc); 756 link->link_enc->funcs->hw_init(link->link_enc);
757 }
743 } 758 }
744 759
745 for (i = 0; i < dc->res_pool->pipe_count; i++) { 760 for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -768,18 +783,21 @@ static void dcn10_init_hw(struct dc *dc)
768 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 783 struct timing_generator *tg = dc->res_pool->timing_generators[i];
769 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 784 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
770 struct hubp *hubp = dc->res_pool->hubps[i]; 785 struct hubp *hubp = dc->res_pool->hubps[i];
786 struct dpp *dpp = dc->res_pool->dpps[i];
771 787
772 pipe_ctx->stream_res.tg = tg; 788 pipe_ctx->stream_res.tg = tg;
773 pipe_ctx->pipe_idx = i; 789 pipe_ctx->pipe_idx = i;
774 790
775 pipe_ctx->plane_res.hubp = hubp; 791 pipe_ctx->plane_res.hubp = hubp;
776 hubp->mpcc_id = i; 792 pipe_ctx->plane_res.dpp = dpp;
793 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
794 hubp->mpcc_id = dpp->inst;
777 hubp->opp_id = 0xf; 795 hubp->opp_id = 0xf;
778 hubp->power_gated = false; 796 hubp->power_gated = false;
779 797
780 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 798 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
781 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 799 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
782 dc->res_pool->opps[i]->mpcc_disconnect_pending[i] = true; 800 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
783 pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; 801 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
784 802
785 plane_atomic_disconnect(dc, pipe_ctx); 803 plane_atomic_disconnect(dc, pipe_ctx);
@@ -804,6 +822,10 @@ static void dcn10_init_hw(struct dc *dc)
804 tg->funcs->tg_init(tg); 822 tg->funcs->tg_init(tg);
805 } 823 }
806 824
825 /* end of FPGA. Below if real ASIC */
826 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
827 return;
828
807 for (i = 0; i < dc->res_pool->audio_count; i++) { 829 for (i = 0; i < dc->res_pool->audio_count; i++) {
808 struct audio *audio = dc->res_pool->audios[i]; 830 struct audio *audio = dc->res_pool->audios[i];
809 831
@@ -922,7 +944,10 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
922 if (plane_state->in_transfer_func) 944 if (plane_state->in_transfer_func)
923 tf = plane_state->in_transfer_func; 945 tf = plane_state->in_transfer_func;
924 946
925 if (plane_state->gamma_correction && dce_use_lut(plane_state)) 947 if (plane_state->gamma_correction &&
948 plane_state->gamma_correction->is_identity)
949 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
950 else if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
926 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); 951 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
927 952
928 if (tf == NULL) 953 if (tf == NULL)
@@ -993,8 +1018,6 @@ static void dcn10_pipe_control_lock(
993 struct pipe_ctx *pipe, 1018 struct pipe_ctx *pipe,
994 bool lock) 1019 bool lock)
995{ 1020{
996 struct hubp *hubp = NULL;
997 hubp = dc->res_pool->hubps[pipe->pipe_idx];
998 /* use TG master update lock to lock everything on the TG 1021 /* use TG master update lock to lock everything on the TG
999 * therefore only top pipe need to lock 1022 * therefore only top pipe need to lock
1000 */ 1023 */
@@ -1097,7 +1120,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
1097 1120
1098 DC_SYNC_INFO("Waiting for trigger\n"); 1121 DC_SYNC_INFO("Waiting for trigger\n");
1099 1122
1100 for (i = 1; i < group_size; i++) 1123 for (i = 0; i < group_size; i++)
1101 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg); 1124 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1102 1125
1103 DC_SYNC_INFO("Multi-display sync is complete\n"); 1126 DC_SYNC_INFO("Multi-display sync is complete\n");
@@ -1107,7 +1130,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
1107 struct dc *core_dc, 1130 struct dc *core_dc,
1108 struct pipe_ctx *pipe_ctx) 1131 struct pipe_ctx *pipe_ctx)
1109{ 1132{
1110 dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS, 1133 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1111 "\n============== DML TTU Output parameters [%d] ==============\n" 1134 "\n============== DML TTU Output parameters [%d] ==============\n"
1112 "qos_level_low_wm: %d, \n" 1135 "qos_level_low_wm: %d, \n"
1113 "qos_level_high_wm: %d, \n" 1136 "qos_level_high_wm: %d, \n"
@@ -1137,7 +1160,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
1137 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c 1160 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1138 ); 1161 );
1139 1162
1140 dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS, 1163 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1141 "\n============== DML DLG Output parameters [%d] ==============\n" 1164 "\n============== DML DLG Output parameters [%d] ==============\n"
1142 "refcyc_h_blank_end: %d, \n" 1165 "refcyc_h_blank_end: %d, \n"
1143 "dlg_vblank_end: %d, \n" 1166 "dlg_vblank_end: %d, \n"
@@ -1172,7 +1195,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
1172 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l 1195 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1173 ); 1196 );
1174 1197
1175 dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS, 1198 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1176 "\ndst_y_per_meta_row_nom_l: %d, \n" 1199 "\ndst_y_per_meta_row_nom_l: %d, \n"
1177 "refcyc_per_meta_chunk_nom_l: %d, \n" 1200 "refcyc_per_meta_chunk_nom_l: %d, \n"
1178 "refcyc_per_line_delivery_pre_l: %d, \n" 1201 "refcyc_per_line_delivery_pre_l: %d, \n"
@@ -1202,7 +1225,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
1202 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c 1225 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1203 ); 1226 );
1204 1227
1205 dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS, 1228 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1206 "\n============== DML RQ Output parameters [%d] ==============\n" 1229 "\n============== DML RQ Output parameters [%d] ==============\n"
1207 "chunk_size: %d \n" 1230 "chunk_size: %d \n"
1208 "min_chunk_size: %d \n" 1231 "min_chunk_size: %d \n"
@@ -1323,19 +1346,19 @@ static void dcn10_enable_plane(
1323 undo_DEGVIDCN10_253_wa(dc); 1346 undo_DEGVIDCN10_253_wa(dc);
1324 1347
1325 power_on_plane(dc->hwseq, 1348 power_on_plane(dc->hwseq,
1326 pipe_ctx->pipe_idx); 1349 pipe_ctx->plane_res.hubp->inst);
1327 1350
1328 /* enable DCFCLK current DCHUB */ 1351 /* enable DCFCLK current DCHUB */
1329 REG_UPDATE(HUBP_CLK_CNTL[pipe_ctx->pipe_idx], 1352 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1330 HUBP_CLOCK_ENABLE, 1);
1331 1353
1332 /* make sure OPP_PIPE_CLOCK_EN = 1 */ 1354 /* make sure OPP_PIPE_CLOCK_EN = 1 */
1333 REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst], 1355 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1334 OPP_PIPE_CLOCK_EN, 1); 1356 pipe_ctx->stream_res.opp,
1357 true);
1335 1358
1336/* TODO: enable/disable in dm as per update type. 1359/* TODO: enable/disable in dm as per update type.
1337 if (plane_state) { 1360 if (plane_state) {
1338 dm_logger_write(dc->ctx->logger, LOG_DC, 1361 DC_LOG_DC(dc->ctx->logger,
1339 "Pipe:%d 0x%x: addr hi:0x%x, " 1362 "Pipe:%d 0x%x: addr hi:0x%x, "
1340 "addr low:0x%x, " 1363 "addr low:0x%x, "
1341 "src: %d, %d, %d," 1364 "src: %d, %d, %d,"
@@ -1353,7 +1376,7 @@ static void dcn10_enable_plane(
1353 plane_state->dst_rect.width, 1376 plane_state->dst_rect.width,
1354 plane_state->dst_rect.height); 1377 plane_state->dst_rect.height);
1355 1378
1356 dm_logger_write(dc->ctx->logger, LOG_DC, 1379 DC_LOG_DC(dc->ctx->logger,
1357 "Pipe %d: width, height, x, y format:%d\n" 1380 "Pipe %d: width, height, x, y format:%d\n"
1358 "viewport:%d, %d, %d, %d\n" 1381 "viewport:%d, %d, %d, %d\n"
1359 "recout: %d, %d, %d, %d\n", 1382 "recout: %d, %d, %d, %d\n",
@@ -1380,6 +1403,7 @@ static void dcn10_enable_plane(
1380 1403
1381static void program_gamut_remap(struct pipe_ctx *pipe_ctx) 1404static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
1382{ 1405{
1406 int i = 0;
1383 struct dpp_grph_csc_adjustment adjust; 1407 struct dpp_grph_csc_adjustment adjust;
1384 memset(&adjust, 0, sizeof(adjust)); 1408 memset(&adjust, 0, sizeof(adjust));
1385 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 1409 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -1387,33 +1411,9 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
1387 1411
1388 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { 1412 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
1389 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 1413 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1390 adjust.temperature_matrix[0] = 1414 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
1391 pipe_ctx->stream-> 1415 adjust.temperature_matrix[i] =
1392 gamut_remap_matrix.matrix[0]; 1416 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
1393 adjust.temperature_matrix[1] =
1394 pipe_ctx->stream->
1395 gamut_remap_matrix.matrix[1];
1396 adjust.temperature_matrix[2] =
1397 pipe_ctx->stream->
1398 gamut_remap_matrix.matrix[2];
1399 adjust.temperature_matrix[3] =
1400 pipe_ctx->stream->
1401 gamut_remap_matrix.matrix[4];
1402 adjust.temperature_matrix[4] =
1403 pipe_ctx->stream->
1404 gamut_remap_matrix.matrix[5];
1405 adjust.temperature_matrix[5] =
1406 pipe_ctx->stream->
1407 gamut_remap_matrix.matrix[6];
1408 adjust.temperature_matrix[6] =
1409 pipe_ctx->stream->
1410 gamut_remap_matrix.matrix[8];
1411 adjust.temperature_matrix[7] =
1412 pipe_ctx->stream->
1413 gamut_remap_matrix.matrix[9];
1414 adjust.temperature_matrix[8] =
1415 pipe_ctx->stream->
1416 gamut_remap_matrix.matrix[10];
1417 } 1417 }
1418 1418
1419 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); 1419 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
@@ -1474,7 +1474,7 @@ static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1474 return false; 1474 return false;
1475} 1475}
1476 1476
1477static bool is_rgb_cspace(enum dc_color_space output_color_space) 1477bool is_rgb_cspace(enum dc_color_space output_color_space)
1478{ 1478{
1479 switch (output_color_space) { 1479 switch (output_color_space) {
1480 case COLOR_SPACE_SRGB: 1480 case COLOR_SPACE_SRGB:
@@ -1612,6 +1612,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
1612 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); 1612 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
1613} 1613}
1614 1614
1615
1615static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) 1616static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
1616{ 1617{
1617 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1618 struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1702,22 +1703,28 @@ static void update_dchubp_dpp(
1702 struct pipe_ctx *pipe_ctx, 1703 struct pipe_ctx *pipe_ctx,
1703 struct dc_state *context) 1704 struct dc_state *context)
1704{ 1705{
1705 struct dce_hwseq *hws = dc->hwseq;
1706 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1706 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1707 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1707 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1708 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1708 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1709 union plane_size size = plane_state->plane_size; 1709 union plane_size size = plane_state->plane_size;
1710 1710
1711 /* depends on DML calculation, DPP clock value may change dynamically */ 1711 /* depends on DML calculation, DPP clock value may change dynamically */
1712 /* If request max dpp clk is lower than current dispclk, no need to
1713 * divided by 2
1714 */
1712 if (plane_state->update_flags.bits.full_update) { 1715 if (plane_state->update_flags.bits.full_update) {
1713 enable_dppclk( 1716 bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
1714 dc->hwseq, 1717 context->bw.dcn.cur_clk.dispclk_khz / 2;
1715 pipe_ctx->pipe_idx, 1718
1716 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk, 1719 dpp->funcs->dpp_dppclk_control(
1717 context->bw.dcn.calc_clk.dppclk_div); 1720 dpp,
1718 dc->current_state->bw.dcn.cur_clk.dppclk_div = 1721 should_divided_by_2,
1719 context->bw.dcn.calc_clk.dppclk_div; 1722 true);
1720 context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div; 1723
1724 dc->current_state->bw.dcn.cur_clk.dppclk_khz =
1725 should_divided_by_2 ?
1726 context->bw.dcn.cur_clk.dispclk_khz / 2 :
1727 context->bw.dcn.cur_clk.dispclk_khz;
1721 } 1728 }
1722 1729
1723 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1730 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -1725,7 +1732,7 @@ static void update_dchubp_dpp(
1725 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG 1732 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
1726 */ 1733 */
1727 if (plane_state->update_flags.bits.full_update) { 1734 if (plane_state->update_flags.bits.full_update) {
1728 REG_UPDATE(DCHUBP_CNTL[pipe_ctx->pipe_idx], HUBP_VTG_SEL, pipe_ctx->stream_res.tg->inst); 1735 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
1729 1736
1730 hubp->funcs->hubp_setup( 1737 hubp->funcs->hubp_setup(
1731 hubp, 1738 hubp,
@@ -1761,6 +1768,11 @@ static void update_dchubp_dpp(
1761 &pipe_ctx->plane_res.scl_data.viewport_c); 1768 &pipe_ctx->plane_res.scl_data.viewport_c);
1762 } 1769 }
1763 1770
1771 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
1772 dc->hwss.set_cursor_position(pipe_ctx);
1773 dc->hwss.set_cursor_attribute(pipe_ctx);
1774 }
1775
1764 if (plane_state->update_flags.bits.full_update) { 1776 if (plane_state->update_flags.bits.full_update) {
1765 /*gamut remap*/ 1777 /*gamut remap*/
1766 program_gamut_remap(pipe_ctx); 1778 program_gamut_remap(pipe_ctx);
@@ -1773,6 +1785,7 @@ static void update_dchubp_dpp(
1773 } 1785 }
1774 1786
1775 if (plane_state->update_flags.bits.full_update || 1787 if (plane_state->update_flags.bits.full_update ||
1788 plane_state->update_flags.bits.pixel_format_change ||
1776 plane_state->update_flags.bits.horizontal_mirror_change || 1789 plane_state->update_flags.bits.horizontal_mirror_change ||
1777 plane_state->update_flags.bits.rotation_change || 1790 plane_state->update_flags.bits.rotation_change ||
1778 plane_state->update_flags.bits.swizzle_change || 1791 plane_state->update_flags.bits.swizzle_change ||
@@ -1797,14 +1810,62 @@ static void update_dchubp_dpp(
1797 hubp->funcs->set_blank(hubp, false); 1810 hubp->funcs->set_blank(hubp, false);
1798} 1811}
1799 1812
1813static void dcn10_otg_blank(
1814 struct dc *dc,
1815 struct stream_resource stream_res,
1816 struct dc_stream_state *stream,
1817 bool blank)
1818{
1819 enum dc_color_space color_space;
1820 struct tg_color black_color = {0};
1821
1822 /* program otg blank color */
1823 color_space = stream->output_color_space;
1824 color_space_to_black_color(dc, color_space, &black_color);
1825
1826 if (stream_res.tg->funcs->set_blank_color)
1827 stream_res.tg->funcs->set_blank_color(
1828 stream_res.tg,
1829 &black_color);
1830
1831 if (!blank) {
1832 if (stream_res.tg->funcs->set_blank)
1833 stream_res.tg->funcs->set_blank(stream_res.tg, blank);
1834 if (stream_res.abm)
1835 stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
1836 } else if (blank) {
1837 if (stream_res.abm)
1838 stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
1839 if (stream_res.tg->funcs->set_blank)
1840 stream_res.tg->funcs->set_blank(stream_res.tg, blank);
1841 }
1842}
1843
1844static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
1845{
1846 struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
1847 pipe_ctx->plane_state->sdr_white_level, 80);
1848 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
1849 struct custom_float_format fmt;
1850
1851 fmt.exponenta_bits = 6;
1852 fmt.mantissa_bits = 12;
1853 fmt.sign = true;
1854
1855 if (pipe_ctx->plane_state->sdr_white_level > 80)
1856 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
1857
1858 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
1859 pipe_ctx->plane_res.dpp, hw_mult);
1860}
1800 1861
1801static void program_all_pipe_in_tree( 1862static void program_all_pipe_in_tree(
1802 struct dc *dc, 1863 struct dc *dc,
1803 struct pipe_ctx *pipe_ctx, 1864 struct pipe_ctx *pipe_ctx,
1804 struct dc_state *context) 1865 struct dc_state *context)
1805{ 1866{
1806
1807 if (pipe_ctx->top_pipe == NULL) { 1867 if (pipe_ctx->top_pipe == NULL) {
1868 bool blank = !is_pipe_tree_visible(pipe_ctx);
1808 1869
1809 pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset; 1870 pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
1810 pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start; 1871 pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
@@ -1815,22 +1876,21 @@ static void program_all_pipe_in_tree(
1815 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1876 pipe_ctx->stream_res.tg->funcs->program_global_sync(
1816 pipe_ctx->stream_res.tg); 1877 pipe_ctx->stream_res.tg);
1817 1878
1818 if (pipe_ctx->stream_res.tg->funcs->set_blank) 1879 dcn10_otg_blank(dc, pipe_ctx->stream_res,
1819 pipe_ctx->stream_res.tg->funcs->set_blank( 1880 pipe_ctx->stream, blank);
1820 pipe_ctx->stream_res.tg,
1821 !is_pipe_tree_visible(pipe_ctx));
1822 } 1881 }
1823 1882
1824 if (pipe_ctx->plane_state != NULL) { 1883 if (pipe_ctx->plane_state != NULL) {
1825 struct pipe_ctx *cur_pipe_ctx =
1826 &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
1827
1828 if (pipe_ctx->plane_state->update_flags.bits.full_update) 1884 if (pipe_ctx->plane_state->update_flags.bits.full_update)
1829 dcn10_enable_plane(dc, pipe_ctx, context); 1885 dcn10_enable_plane(dc, pipe_ctx, context);
1830 1886
1831 update_dchubp_dpp(dc, pipe_ctx, context); 1887 update_dchubp_dpp(dc, pipe_ctx, context);
1832 1888
1833 if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) 1889 set_hdr_multiplier(pipe_ctx);
1890
1891 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
1892 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
1893 pipe_ctx->plane_state->update_flags.bits.gamma_change)
1834 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); 1894 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
1835 1895
1836 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 1896 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1843,8 +1903,9 @@ static void program_all_pipe_in_tree(
1843 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); 1903 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
1844 } 1904 }
1845 1905
1846 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) 1906 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
1847 program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); 1907 program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
1908 }
1848} 1909}
1849 1910
1850static void dcn10_pplib_apply_display_requirements( 1911static void dcn10_pplib_apply_display_requirements(
@@ -1853,16 +1914,10 @@ static void dcn10_pplib_apply_display_requirements(
1853{ 1914{
1854 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 1915 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
1855 1916
1856 pp_display_cfg->all_displays_in_sync = false;/*todo*/
1857 pp_display_cfg->nb_pstate_switch_disable = false;
1858 pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 1917 pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
1859 pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz; 1918 pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
1860 pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 1919 pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
1861 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 1920 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
1862 pp_display_cfg->avail_mclk_switch_time_us =
1863 context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
1864 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
1865 context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
1866 pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 1921 pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
1867 pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz; 1922 pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
1868 dce110_fill_display_configs(context, pp_display_cfg); 1923 dce110_fill_display_configs(context, pp_display_cfg);
@@ -1925,28 +1980,23 @@ static void dcn10_apply_ctx_for_surface(
1925{ 1980{
1926 int i; 1981 int i;
1927 struct timing_generator *tg; 1982 struct timing_generator *tg;
1928 struct output_pixel_processor *opp;
1929 bool removed_pipe[4] = { false }; 1983 bool removed_pipe[4] = { false };
1930 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000; 1984 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
1931 bool program_water_mark = false; 1985 bool program_water_mark = false;
1932 1986 struct dc_context *ctx = dc->ctx;
1933 struct pipe_ctx *top_pipe_to_program = 1987 struct pipe_ctx *top_pipe_to_program =
1934 find_top_pipe_for_stream(dc, context, stream); 1988 find_top_pipe_for_stream(dc, context, stream);
1935 1989
1936 if (!top_pipe_to_program) 1990 if (!top_pipe_to_program)
1937 return; 1991 return;
1938 1992
1939 opp = top_pipe_to_program->stream_res.opp;
1940
1941 tg = top_pipe_to_program->stream_res.tg; 1993 tg = top_pipe_to_program->stream_res.tg;
1942 1994
1943 tg->funcs->lock(tg); 1995 dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
1944 1996
1945 if (num_planes == 0) { 1997 if (num_planes == 0) {
1946
1947 /* OTG blank before remove all front end */ 1998 /* OTG blank before remove all front end */
1948 if (tg->funcs->set_blank) 1999 dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
1949 tg->funcs->set_blank(tg, true);
1950 } 2000 }
1951 2001
1952 /* Disconnect unused mpcc */ 2002 /* Disconnect unused mpcc */
@@ -1964,7 +2014,7 @@ static void dcn10_apply_ctx_for_surface(
1964 if (old_pipe_ctx->stream_res.tg == tg && 2014 if (old_pipe_ctx->stream_res.tg == tg &&
1965 old_pipe_ctx->plane_res.hubp && 2015 old_pipe_ctx->plane_res.hubp &&
1966 old_pipe_ctx->plane_res.hubp->opp_id != 0xf) { 2016 old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
1967 dcn10_disable_plane(dc, pipe_ctx); 2017 dcn10_disable_plane(dc, old_pipe_ctx);
1968 /* 2018 /*
1969 * power down fe will unlock when calling reset, need 2019 * power down fe will unlock when calling reset, need
1970 * to lock it back here. Messy, need rework. 2020 * to lock it back here. Messy, need rework.
@@ -1980,7 +2030,7 @@ static void dcn10_apply_ctx_for_surface(
1980 plane_atomic_disconnect(dc, old_pipe_ctx); 2030 plane_atomic_disconnect(dc, old_pipe_ctx);
1981 removed_pipe[i] = true; 2031 removed_pipe[i] = true;
1982 2032
1983 dm_logger_write(dc->ctx->logger, LOG_DC, 2033 DC_LOG_DC(
1984 "Reset mpcc for pipe %d\n", 2034 "Reset mpcc for pipe %d\n",
1985 old_pipe_ctx->pipe_idx); 2035 old_pipe_ctx->pipe_idx);
1986 } 2036 }
@@ -1989,7 +2039,7 @@ static void dcn10_apply_ctx_for_surface(
1989 if (num_planes > 0) 2039 if (num_planes > 0)
1990 program_all_pipe_in_tree(dc, top_pipe_to_program, context); 2040 program_all_pipe_in_tree(dc, top_pipe_to_program, context);
1991 2041
1992 tg->funcs->unlock(tg); 2042 dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
1993 2043
1994 if (num_planes == 0) 2044 if (num_planes == 0)
1995 false_optc_underflow_wa(dc, stream, tg); 2045 false_optc_underflow_wa(dc, stream, tg);
@@ -2023,7 +2073,7 @@ static void dcn10_apply_ctx_for_surface(
2023 dcn10_verify_allow_pstate_change_high(dc); 2073 dcn10_verify_allow_pstate_change_high(dc);
2024 } 2074 }
2025 } 2075 }
2026/* dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS, 2076/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2027 "\n============== Watermark parameters ==============\n" 2077 "\n============== Watermark parameters ==============\n"
2028 "a.urgent_ns: %d \n" 2078 "a.urgent_ns: %d \n"
2029 "a.cstate_enter_plus_exit: %d \n" 2079 "a.cstate_enter_plus_exit: %d \n"
@@ -2046,7 +2096,7 @@ static void dcn10_apply_ctx_for_surface(
2046 context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns, 2096 context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
2047 context->bw.dcn.watermarks.b.pte_meta_urgent_ns 2097 context->bw.dcn.watermarks.b.pte_meta_urgent_ns
2048 ); 2098 );
2049 dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS, 2099 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2050 "\nc.urgent_ns: %d \n" 2100 "\nc.urgent_ns: %d \n"
2051 "c.cstate_enter_plus_exit: %d \n" 2101 "c.cstate_enter_plus_exit: %d \n"
2052 "c.cstate_exit: %d \n" 2102 "c.cstate_exit: %d \n"
@@ -2072,6 +2122,101 @@ static void dcn10_apply_ctx_for_surface(
2072*/ 2122*/
2073} 2123}
2074 2124
2125static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
2126{
2127 return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
2128}
2129
2130static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
2131{
2132 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2133 context->bw.dcn.calc_clk.dppclk_khz;
2134 bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
2135 context->bw.dcn.cur_clk.dispclk_khz;
2136 int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2137 bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
2138 context->bw.dcn.cur_clk.dppclk_khz;
2139
2140 /* increase clock, looking for div is 0 for current, request div is 1*/
2141 if (dispclk_increase) {
2142 /* already divided by 2, no need to reach target clk with 2 steps*/
2143 if (cur_dpp_div)
2144 return context->bw.dcn.calc_clk.dispclk_khz;
2145
2146 /* request disp clk is lower than maximum supported dpp clk,
2147 * no need to reach target clk with two steps.
2148 */
2149 if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
2150 return context->bw.dcn.calc_clk.dispclk_khz;
2151
2152 /* target dpp clk not request divided by 2, still within threshold */
2153 if (!request_dpp_div)
2154 return context->bw.dcn.calc_clk.dispclk_khz;
2155
2156 } else {
2157 /* decrease clock, looking for current dppclk divided by 2,
2158 * request dppclk not divided by 2.
2159 */
2160
2161 /* current dpp clk not divided by 2, no need to ramp*/
2162 if (!cur_dpp_div)
2163 return context->bw.dcn.calc_clk.dispclk_khz;
2164
2165 /* current disp clk is lower than current maximum dpp clk,
2166 * no need to ramp
2167 */
2168 if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
2169 return context->bw.dcn.calc_clk.dispclk_khz;
2170
2171 /* request dpp clk need to be divided by 2 */
2172 if (request_dpp_div)
2173 return context->bw.dcn.calc_clk.dispclk_khz;
2174 }
2175
2176 return disp_clk_threshold;
2177}
2178
2179static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
2180{
2181 int i;
2182 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2183 context->bw.dcn.calc_clk.dppclk_khz;
2184
2185 int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
2186
2187 /* set disp clk to dpp clk threshold */
2188 dc->res_pool->display_clock->funcs->set_clock(
2189 dc->res_pool->display_clock,
2190 dispclk_to_dpp_threshold);
2191
2192 /* update request dpp clk division option */
2193 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2194 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
2195
2196 if (!pipe_ctx->plane_state)
2197 continue;
2198
2199 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
2200 pipe_ctx->plane_res.dpp,
2201 request_dpp_div,
2202 true);
2203 }
2204
2205 /* If target clk not same as dppclk threshold, set to target clock */
2206 if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
2207 dc->res_pool->display_clock->funcs->set_clock(
2208 dc->res_pool->display_clock,
2209 context->bw.dcn.calc_clk.dispclk_khz);
2210 }
2211
2212 context->bw.dcn.cur_clk.dispclk_khz =
2213 context->bw.dcn.calc_clk.dispclk_khz;
2214 context->bw.dcn.cur_clk.dppclk_khz =
2215 context->bw.dcn.calc_clk.dppclk_khz;
2216 context->bw.dcn.cur_clk.max_supported_dppclk_khz =
2217 context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2218}
2219
2075static void dcn10_set_bandwidth( 2220static void dcn10_set_bandwidth(
2076 struct dc *dc, 2221 struct dc *dc,
2077 struct dc_state *context, 2222 struct dc_state *context,
@@ -2089,31 +2234,33 @@ static void dcn10_set_bandwidth(
2089 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) 2234 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
2090 return; 2235 return;
2091 2236
2092 if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz 2237 if (should_set_clock(
2093 > dc->current_state->bw.dcn.cur_clk.dispclk_khz) { 2238 decrease_allowed,
2094 dc->res_pool->display_clock->funcs->set_clock( 2239 context->bw.dcn.calc_clk.dcfclk_khz,
2095 dc->res_pool->display_clock, 2240 dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
2096 context->bw.dcn.calc_clk.dispclk_khz); 2241 context->bw.dcn.cur_clk.dcfclk_khz =
2097 dc->current_state->bw.dcn.cur_clk.dispclk_khz = 2242 context->bw.dcn.calc_clk.dcfclk_khz;
2098 context->bw.dcn.calc_clk.dispclk_khz;
2099 }
2100 if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
2101 > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
2102 smu_req.hard_min_dcefclk_khz = 2243 smu_req.hard_min_dcefclk_khz =
2103 context->bw.dcn.calc_clk.dcfclk_khz; 2244 context->bw.dcn.calc_clk.dcfclk_khz;
2104 } 2245 }
2105 if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz 2246
2106 > dc->current_state->bw.dcn.cur_clk.fclk_khz) { 2247 if (should_set_clock(
2107 smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz; 2248 decrease_allowed,
2108 } 2249 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
2109 if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz 2250 dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
2110 > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
2111 dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz =
2112 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
2113 context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz = 2251 context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
2114 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz; 2252 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
2115 } 2253 }
2116 2254
2255 if (should_set_clock(
2256 decrease_allowed,
2257 context->bw.dcn.calc_clk.fclk_khz,
2258 dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
2259 context->bw.dcn.cur_clk.fclk_khz =
2260 context->bw.dcn.calc_clk.fclk_khz;
2261 smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
2262 }
2263
2117 smu_req.display_count = context->stream_count; 2264 smu_req.display_count = context->stream_count;
2118 2265
2119 if (pp_smu->set_display_requirement) 2266 if (pp_smu->set_display_requirement)
@@ -2121,21 +2268,17 @@ static void dcn10_set_bandwidth(
2121 2268
2122 *smu_req_cur = smu_req; 2269 *smu_req_cur = smu_req;
2123 2270
2124 /* Decrease in freq is increase in period so opposite comparison for dram_ccm */ 2271 /* make sure dcf clk is before dpp clk to
2125 if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us 2272 * make sure we have enough voltage to run dpp clk
2126 < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) { 2273 */
2127 dc->current_state->bw.dcn.calc_clk.dram_ccm_us = 2274 if (should_set_clock(
2128 context->bw.dcn.calc_clk.dram_ccm_us; 2275 decrease_allowed,
2129 context->bw.dcn.cur_clk.dram_ccm_us = 2276 context->bw.dcn.calc_clk.dispclk_khz,
2130 context->bw.dcn.calc_clk.dram_ccm_us; 2277 dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
2131 } 2278
2132 if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us 2279 ramp_up_dispclk_with_dpp(dc, context);
2133 < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
2134 dc->current_state->bw.dcn.calc_clk.min_active_dram_ccm_us =
2135 context->bw.dcn.calc_clk.min_active_dram_ccm_us;
2136 context->bw.dcn.cur_clk.min_active_dram_ccm_us =
2137 context->bw.dcn.calc_clk.min_active_dram_ccm_us;
2138 } 2280 }
2281
2139 dcn10_pplib_apply_display_requirements(dc, context); 2282 dcn10_pplib_apply_display_requirements(dc, context);
2140 2283
2141 if (dc->debug.sanity_checks) { 2284 if (dc->debug.sanity_checks) {
@@ -2184,6 +2327,8 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
2184 value |= 0x80; 2327 value |= 0x80;
2185 if (events->cursor_update) 2328 if (events->cursor_update)
2186 value |= 0x2; 2329 value |= 0x2;
2330 if (events->force_trigger)
2331 value |= 0x1;
2187 2332
2188 for (i = 0; i < num_pipes; i++) 2333 for (i = 0; i < num_pipes; i++)
2189 pipe_ctx[i]->stream_res.tg->funcs-> 2334 pipe_ctx[i]->stream_res.tg->funcs->
@@ -2256,12 +2401,24 @@ static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
2256 return; 2401 return;
2257} 2402}
2258 2403
2404static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
2405{
2406 int i;
2407
2408 for (i = 0; i < res_pool->pipe_count; i++) {
2409 if (res_pool->hubps[i]->inst == mpcc_inst)
2410 return res_pool->hubps[i];
2411 }
2412 ASSERT(false);
2413 return NULL;
2414}
2415
2259static void dcn10_wait_for_mpcc_disconnect( 2416static void dcn10_wait_for_mpcc_disconnect(
2260 struct dc *dc, 2417 struct dc *dc,
2261 struct resource_pool *res_pool, 2418 struct resource_pool *res_pool,
2262 struct pipe_ctx *pipe_ctx) 2419 struct pipe_ctx *pipe_ctx)
2263{ 2420{
2264 int i; 2421 int mpcc_inst;
2265 2422
2266 if (dc->debug.sanity_checks) { 2423 if (dc->debug.sanity_checks) {
2267 dcn10_verify_allow_pstate_change_high(dc); 2424 dcn10_verify_allow_pstate_change_high(dc);
@@ -2270,12 +2427,14 @@ static void dcn10_wait_for_mpcc_disconnect(
2270 if (!pipe_ctx->stream_res.opp) 2427 if (!pipe_ctx->stream_res.opp)
2271 return; 2428 return;
2272 2429
2273 for (i = 0; i < MAX_PIPES; i++) { 2430 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
2274 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i]) { 2431 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
2275 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, i); 2432 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
2276 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i] = false; 2433
2277 res_pool->hubps[i]->funcs->set_blank(res_pool->hubps[i], true); 2434 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
2278 /*dm_logger_write(dc->ctx->logger, LOG_ERROR, 2435 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
2436 hubp->funcs->set_blank(hubp, true);
2437 /*DC_LOG_ERROR(dc->ctx->logger,
2279 "[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n", 2438 "[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n",
2280 i);*/ 2439 i);*/
2281 } 2440 }
@@ -2296,7 +2455,7 @@ static bool dcn10_dummy_display_power_gating(
2296 return true; 2455 return true;
2297} 2456}
2298 2457
2299void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) 2458static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2300{ 2459{
2301 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2460 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2302 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2461 struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2316,12 +2475,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2316 } 2475 }
2317} 2476}
2318 2477
2319void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) 2478static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2320{ 2479{
2321 if (hws->ctx->dc->res_pool->hubbub != NULL) 2480 if (hws->ctx->dc->res_pool->hubbub != NULL)
2322 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); 2481 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
2323} 2482}
2324 2483
2484static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2485{
2486 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2487 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2488 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2489 struct dc_cursor_mi_param param = {
2490 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2491 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2492 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2493 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2494 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2495 };
2496
2497 if (pipe_ctx->plane_state->address.type
2498 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2499 pos_cpy.enable = false;
2500
2501 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2502 pos_cpy.enable = false;
2503
2504 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
2505 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
2506}
2507
2508static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2509{
2510 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2511
2512 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
2513 pipe_ctx->plane_res.hubp, attributes);
2514 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
2515 pipe_ctx->plane_res.dpp, attributes->color_format);
2516}
2517
2325static const struct hw_sequencer_funcs dcn10_funcs = { 2518static const struct hw_sequencer_funcs dcn10_funcs = {
2326 .program_gamut_remap = program_gamut_remap, 2519 .program_gamut_remap = program_gamut_remap,
2327 .program_csc_matrix = program_csc_matrix, 2520 .program_csc_matrix = program_csc_matrix,
@@ -2342,6 +2535,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2342 .enable_stream = dce110_enable_stream, 2535 .enable_stream = dce110_enable_stream,
2343 .disable_stream = dce110_disable_stream, 2536 .disable_stream = dce110_disable_stream,
2344 .unblank_stream = dce110_unblank_stream, 2537 .unblank_stream = dce110_unblank_stream,
2538 .blank_stream = dce110_blank_stream,
2345 .enable_display_power_gating = dcn10_dummy_display_power_gating, 2539 .enable_display_power_gating = dcn10_dummy_display_power_gating,
2346 .disable_plane = dcn10_disable_plane, 2540 .disable_plane = dcn10_disable_plane,
2347 .pipe_control_lock = dcn10_pipe_control_lock, 2541 .pipe_control_lock = dcn10_pipe_control_lock,
@@ -2362,6 +2556,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2362 .edp_backlight_control = hwss_edp_backlight_control, 2556 .edp_backlight_control = hwss_edp_backlight_control,
2363 .edp_power_control = hwss_edp_power_control, 2557 .edp_power_control = hwss_edp_power_control,
2364 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 2558 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
2559 .set_cursor_position = dcn10_set_cursor_position,
2560 .set_cursor_attribute = dcn10_set_cursor_attribute
2365}; 2561};
2366 2562
2367 2563
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index b9d326082717..6c526b5095d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -35,5 +35,6 @@ extern void fill_display_configs(
35 const struct dc_state *context, 35 const struct dc_state *context,
36 struct dm_pp_display_configuration *pp_display_cfg); 36 struct dm_pp_display_configuration *pp_display_cfg);
37 37
38bool is_rgb_cspace(enum dc_color_space output_color_space);
38 39
39#endif /* __DC_HWSS_DCN10_H__ */ 40#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
index d7b5bd20352a..819b749c6e31 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -33,7 +33,6 @@
33 33
34#define IPP_REG_LIST_DCN(id) \ 34#define IPP_REG_LIST_DCN(id) \
35 SRI(FORMAT_CONTROL, CNVC_CFG, id), \ 35 SRI(FORMAT_CONTROL, CNVC_CFG, id), \
36 SRI(DPP_CONTROL, DPP_TOP, id), \
37 SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ 36 SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
38 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ 37 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
39 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ 38 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
@@ -130,7 +129,6 @@ struct dcn10_ipp_mask {
130}; 129};
131 130
132struct dcn10_ipp_registers { 131struct dcn10_ipp_registers {
133 uint32_t DPP_CONTROL;
134 uint32_t CURSOR_SETTINS; 132 uint32_t CURSOR_SETTINS;
135 uint32_t CURSOR_SETTINGS; 133 uint32_t CURSOR_SETTINGS;
136 uint32_t CNVC_SURFACE_PIXEL_FORMAT; 134 uint32_t CNVC_SURFACE_PIXEL_FORMAT;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index f6ba0eef4489..77a1a9d541a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -367,6 +367,14 @@ void opp1_program_oppbuf(
367 367
368} 368}
369 369
370void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
371{
372 struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
373 uint32_t regval = enable ? 1 : 0;
374
375 REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
376}
377
370/*****************************************/ 378/*****************************************/
371/* Constructor, Destructor */ 379/* Constructor, Destructor */
372/*****************************************/ 380/*****************************************/
@@ -382,6 +390,7 @@ static struct opp_funcs dcn10_opp_funcs = {
382 .opp_program_fmt = opp1_program_fmt, 390 .opp_program_fmt = opp1_program_fmt,
383 .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, 391 .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
384 .opp_program_stereo = opp1_program_stereo, 392 .opp_program_stereo = opp1_program_stereo,
393 .opp_pipe_clock_control = opp1_pipe_clock_control,
385 .opp_destroy = opp1_destroy 394 .opp_destroy = opp1_destroy
386}; 395};
387 396
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index bc5058af6266..0f10adea000c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -44,7 +44,8 @@
44 SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id), \ 44 SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id), \
45 SRI(OPPBUF_CONTROL, OPPBUF, id),\ 45 SRI(OPPBUF_CONTROL, OPPBUF, id),\
46 SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \ 46 SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
47 SRI(OPPBUF_3D_PARAMETERS_1, OPPBUF, id) 47 SRI(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \
48 SRI(OPP_PIPE_CONTROL, OPP_PIPE, id)
48 49
49#define OPP_REG_LIST_DCN10(id) \ 50#define OPP_REG_LIST_DCN10(id) \
50 OPP_REG_LIST_DCN(id) 51 OPP_REG_LIST_DCN(id)
@@ -61,7 +62,8 @@
61 uint32_t OPPBUF_CONTROL; \ 62 uint32_t OPPBUF_CONTROL; \
62 uint32_t OPPBUF_CONTROL1; \ 63 uint32_t OPPBUF_CONTROL1; \
63 uint32_t OPPBUF_3D_PARAMETERS_0; \ 64 uint32_t OPPBUF_3D_PARAMETERS_0; \
64 uint32_t OPPBUF_3D_PARAMETERS_1 65 uint32_t OPPBUF_3D_PARAMETERS_1; \
66 uint32_t OPP_PIPE_CONTROL
65 67
66#define OPP_MASK_SH_LIST_DCN(mask_sh) \ 68#define OPP_MASK_SH_LIST_DCN(mask_sh) \
67 OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \ 69 OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
@@ -89,7 +91,8 @@
89 OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\ 91 OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
90 OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, mask_sh),\ 92 OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, mask_sh),\
91 OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh), \ 93 OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh), \
92 OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, mask_sh) 94 OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, mask_sh), \
95 OPP_SF(OPP_PIPE0_OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh)
93 96
94#define OPP_MASK_SH_LIST_DCN10(mask_sh) \ 97#define OPP_MASK_SH_LIST_DCN10(mask_sh) \
95 OPP_MASK_SH_LIST_DCN(mask_sh), \ 98 OPP_MASK_SH_LIST_DCN(mask_sh), \
@@ -125,7 +128,8 @@
125 type OPPBUF_OVERLAP_PIXEL_NUM;\ 128 type OPPBUF_OVERLAP_PIXEL_NUM;\
126 type OPPBUF_NUM_SEGMENT_PADDED_PIXELS; \ 129 type OPPBUF_NUM_SEGMENT_PADDED_PIXELS; \
127 type OPPBUF_3D_VACT_SPACE1_SIZE; \ 130 type OPPBUF_3D_VACT_SPACE1_SIZE; \
128 type OPPBUF_3D_VACT_SPACE2_SIZE 131 type OPPBUF_3D_VACT_SPACE2_SIZE; \
132 type OPP_PIPE_CLOCK_EN
129 133
130struct dcn10_opp_registers { 134struct dcn10_opp_registers {
131 OPP_COMMON_REG_VARIABLE_LIST; 135 OPP_COMMON_REG_VARIABLE_LIST;
@@ -176,6 +180,8 @@ void opp1_program_stereo(
176 bool enable, 180 bool enable,
177 const struct dc_crtc_timing *timing); 181 const struct dc_crtc_timing *timing);
178 182
183void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable);
184
179void opp1_destroy(struct output_pixel_processor **opp); 185void opp1_destroy(struct output_pixel_processor **opp);
180 186
181#endif 187#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index a3c7c2012f05..d25e7bf0d0d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -83,6 +83,8 @@
83 83
84 84
85struct dcn_optc_registers { 85struct dcn_optc_registers {
86 uint32_t OTG_GLOBAL_CONTROL1;
87 uint32_t OTG_GLOBAL_CONTROL2;
86 uint32_t OTG_VERT_SYNC_CONTROL; 88 uint32_t OTG_VERT_SYNC_CONTROL;
87 uint32_t OTG_MASTER_UPDATE_MODE; 89 uint32_t OTG_MASTER_UPDATE_MODE;
88 uint32_t OTG_GSL_CONTROL; 90 uint32_t OTG_GSL_CONTROL;
@@ -126,12 +128,12 @@ struct dcn_optc_registers {
126 uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; 128 uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
127 uint32_t OPTC_INPUT_CLOCK_CONTROL; 129 uint32_t OPTC_INPUT_CLOCK_CONTROL;
128 uint32_t OPTC_DATA_SOURCE_SELECT; 130 uint32_t OPTC_DATA_SOURCE_SELECT;
131 uint32_t OPTC_MEMORY_CONFIG;
129 uint32_t OPTC_INPUT_GLOBAL_CONTROL; 132 uint32_t OPTC_INPUT_GLOBAL_CONTROL;
130 uint32_t CONTROL; 133 uint32_t CONTROL;
131 uint32_t OTG_GSL_WINDOW_X; 134 uint32_t OTG_GSL_WINDOW_X;
132 uint32_t OTG_GSL_WINDOW_Y; 135 uint32_t OTG_GSL_WINDOW_Y;
133 uint32_t OTG_VUPDATE_KEEPOUT; 136 uint32_t OTG_VUPDATE_KEEPOUT;
134 uint32_t OTG_DSC_START_POSITION;
135}; 137};
136 138
137#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ 139#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -241,7 +243,7 @@ struct dcn_optc_registers {
241 SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\ 243 SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\
242 SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh) 244 SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh)
243 245
244#define TG_REG_FIELD_LIST(type) \ 246#define TG_REG_FIELD_LIST_DCN1_0(type) \
245 type VSTARTUP_START;\ 247 type VSTARTUP_START;\
246 type VUPDATE_OFFSET;\ 248 type VUPDATE_OFFSET;\
247 type VUPDATE_WIDTH;\ 249 type VUPDATE_WIDTH;\
@@ -326,10 +328,9 @@ struct dcn_optc_registers {
326 type OPTC_INPUT_CLK_EN;\ 328 type OPTC_INPUT_CLK_EN;\
327 type OPTC_INPUT_CLK_ON;\ 329 type OPTC_INPUT_CLK_ON;\
328 type OPTC_INPUT_CLK_GATE_DIS;\ 330 type OPTC_INPUT_CLK_GATE_DIS;\
329 type OPTC_SRC_SEL;\
330 type OPTC_SEG0_SRC_SEL;\
331 type OPTC_UNDERFLOW_OCCURRED_STATUS;\ 331 type OPTC_UNDERFLOW_OCCURRED_STATUS;\
332 type OPTC_UNDERFLOW_CLEAR;\ 332 type OPTC_UNDERFLOW_CLEAR;\
333 type OPTC_SRC_SEL;\
333 type VTG0_ENABLE;\ 334 type VTG0_ENABLE;\
334 type VTG0_FP2;\ 335 type VTG0_FP2;\
335 type VTG0_VCOUNT_INIT;\ 336 type VTG0_VCOUNT_INIT;\
@@ -352,10 +353,11 @@ struct dcn_optc_registers {
352 type OTG_MASTER_UPDATE_LOCK_GSL_EN;\ 353 type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
353 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\ 354 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
354 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\ 355 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
355 type OTG_DSC_START_POSITION_X;\
356 type OTG_DSC_START_POSITION_LINE_NUM;\
357 type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN; 356 type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
358 357
358#define TG_REG_FIELD_LIST(type) \
359 TG_REG_FIELD_LIST_DCN1_0(type)
360
359 361
360struct dcn_optc_shift { 362struct dcn_optc_shift {
361 TG_REG_FIELD_LIST(uint8_t) 363 TG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 44825e2c9ebb..02bd664aed3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -50,7 +50,8 @@
50#include "dcn10_hubp.h" 50#include "dcn10_hubp.h"
51#include "dcn10_hubbub.h" 51#include "dcn10_hubbub.h"
52 52
53#include "soc15ip.h" 53#include "soc15_hw_ip.h"
54#include "vega10_ip_offset.h"
54 55
55#include "dcn/dcn_1_0_offset.h" 56#include "dcn/dcn_1_0_offset.h"
56#include "dcn/dcn_1_0_sh_mask.h" 57#include "dcn/dcn_1_0_sh_mask.h"
@@ -365,6 +366,7 @@ static const struct dcn_optc_mask tg_mask = {
365 366
366 367
367static const struct bios_registers bios_regs = { 368static const struct bios_registers bios_regs = {
369 NBIO_SR(BIOS_SCRATCH_3),
368 NBIO_SR(BIOS_SCRATCH_6) 370 NBIO_SR(BIOS_SCRATCH_6)
369}; 371};
370 372
@@ -438,7 +440,11 @@ static const struct dc_debug debug_defaults_drv = {
438 .timing_trace = false, 440 .timing_trace = false,
439 .clock_trace = true, 441 .clock_trace = true,
440 442
441 .min_disp_clk_khz = 300000, 443 /* raven smu dones't allow 0 disp clk,
444 * smu min disp clk limit is 50Mhz
445 * keep min disp clk 100Mhz avoid smu hang
446 */
447 .min_disp_clk_khz = 100000,
442 448
443 .disable_pplib_clock_request = true, 449 .disable_pplib_clock_request = true,
444 .disable_pplib_wm_range = false, 450 .disable_pplib_wm_range = false,
@@ -450,6 +456,7 @@ static const struct dc_debug debug_defaults_drv = {
450 .disable_stereo_support = true, 456 .disable_stereo_support = true,
451 .vsr_support = true, 457 .vsr_support = true,
452 .performance_trace = false, 458 .performance_trace = false,
459 .az_endpoint_mute_only = true,
453}; 460};
454 461
455static const struct dc_debug debug_defaults_diags = { 462static const struct dc_debug debug_defaults_diags = {
@@ -818,7 +825,7 @@ static void get_pixel_clock_parameters(
818 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; 825 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
819 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; 826 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
820 pixel_clk_params->signal_type = pipe_ctx->stream->signal; 827 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
821 pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1; 828 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
822 /* TODO: un-hardcode*/ 829 /* TODO: un-hardcode*/
823 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * 830 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
824 LINK_RATE_REF_FREQ_IN_KHZ; 831 LINK_RATE_REF_FREQ_IN_KHZ;
@@ -960,11 +967,13 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
960 967
961 idle_pipe->stream = head_pipe->stream; 968 idle_pipe->stream = head_pipe->stream;
962 idle_pipe->stream_res.tg = head_pipe->stream_res.tg; 969 idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
970 idle_pipe->stream_res.abm = head_pipe->stream_res.abm;
963 idle_pipe->stream_res.opp = head_pipe->stream_res.opp; 971 idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
964 972
965 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; 973 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
966 idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; 974 idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
967 idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; 975 idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
976 idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
968 977
969 return idle_pipe; 978 return idle_pipe;
970} 979}
@@ -1316,13 +1325,11 @@ static bool construct(
1316 } 1325 }
1317 } 1326 }
1318 1327
1319 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 1328 pool->base.display_clock = dce120_disp_clk_create(ctx);
1320 pool->base.display_clock = dce120_disp_clk_create(ctx); 1329 if (pool->base.display_clock == NULL) {
1321 if (pool->base.display_clock == NULL) { 1330 dm_error("DC: failed to create display clock!\n");
1322 dm_error("DC: failed to create display clock!\n"); 1331 BREAK_TO_DEBUGGER();
1323 BREAK_TO_DEBUGGER(); 1332 goto fail;
1324 goto fail;
1325 }
1326 } 1333 }
1327 1334
1328 pool->base.dmcu = dcn10_dmcu_create(ctx, 1335 pool->base.dmcu = dcn10_dmcu_create(ctx,
@@ -1445,6 +1452,7 @@ static bool construct(
1445 1452
1446 /* valid pipe num */ 1453 /* valid pipe num */
1447 pool->base.pipe_count = j; 1454 pool->base.pipe_count = j;
1455 pool->base.timing_generator_count = j;
1448 1456
1449 /* within dml lib, it is hard code to 4. If ASIC pipe is fused, 1457 /* within dml lib, it is hard code to 4. If ASIC pipe is fused,
1450 * the value may be changed 1458 * the value may be changed
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index ab88f07772a3..034369fbb9e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -50,6 +50,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
50 bool enable); 50 bool enable);
51 51
52/* 52/*
53 * Clear payload allocation table before enable MST DP link.
54 */
55void dm_helpers_dp_mst_clear_payload_allocation_table(
56 struct dc_context *ctx,
57 const struct dc_link *link);
58
59/*
53 * Polls for ACT (allocation change trigger) handled and 60 * Polls for ACT (allocation change trigger) handled and
54 */ 61 */
55bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( 62bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -101,5 +108,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
101 struct dc_link *link, 108 struct dc_link *link,
102 struct dc_sink *sink); 109 struct dc_sink *sink);
103 110
111void dm_set_dcn_clocks(
112 struct dc_context *ctx,
113 struct dc_clocks *clks);
104 114
105#endif /* __DM_HELPERS__ */ 115#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index bbfa83252fc1..eac4bfe12257 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -91,7 +91,8 @@ struct pp_smu_funcs_rv {
91 /* which SMU message? are reader and writer WM separate SMU msg? */ 91 /* which SMU message? are reader and writer WM separate SMU msg? */
92 void (*set_wm_ranges)(struct pp_smu *pp, 92 void (*set_wm_ranges)(struct pp_smu *pp,
93 struct pp_smu_wm_range_sets *ranges); 93 struct pp_smu_wm_range_sets *ranges);
94 94 /* PME w/a */
95 void (*set_pme_wa_enable)(struct pp_smu *pp);
95}; 96};
96 97
97#if 0 98#if 0
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 225b7bfb09a9..22e7ee7dcd26 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -192,37 +192,6 @@ unsigned int generic_reg_wait(const struct dc_context *ctx,
192 * Power Play (PP) interfaces 192 * Power Play (PP) interfaces
193 **************************************/ 193 **************************************/
194 194
195/* DAL calls this function to notify PP about clocks it needs for the Mode Set.
196 * This is done *before* it changes DCE clock.
197 *
198 * If required clock is higher than current, then PP will increase the voltage.
199 *
200 * If required clock is lower than current, then PP will defer reduction of
201 * voltage until the call to dc_service_pp_post_dce_clock_change().
202 *
203 * \input - Contains clocks needed for Mode Set.
204 *
205 * \output - Contains clocks adjusted by PP which DAL should use for Mode Set.
206 * Valid only if function returns zero.
207 *
208 * \returns true - call is successful
209 * false - call failed
210 */
211bool dm_pp_pre_dce_clock_change(
212 struct dc_context *ctx,
213 struct dm_pp_gpu_clock_range *requested_state,
214 struct dm_pp_gpu_clock_range *actual_state);
215
216/* The returned clocks range are 'static' system clocks which will be used for
217 * mode validation purposes.
218 *
219 * \returns true - call is successful
220 * false - call failed
221 */
222bool dc_service_get_system_clocks_range(
223 const struct dc_context *ctx,
224 struct dm_pp_gpu_clock_range *sys_clks);
225
226/* Gets valid clocks levels from pplib 195/* Gets valid clocks levels from pplib
227 * 196 *
228 * input: clk_type - display clk / sclk / mem clk 197 * input: clk_type - display clk / sclk / mem clk
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index fa26cf488b3c..ab8c77d4e6df 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -29,7 +29,7 @@
29#include "os_types.h" 29#include "os_types.h"
30#include "dc_types.h" 30#include "dc_types.h"
31 31
32#include "dm_pp_smu.h" 32struct pp_smu_funcs_rv;
33 33
34struct dm_pp_clock_range { 34struct dm_pp_clock_range {
35 int min_khz; 35 int min_khz;
@@ -239,25 +239,8 @@ enum dm_acpi_display_type {
239 AcpiDisplayType_DFP6 = 12 239 AcpiDisplayType_DFP6 = 12
240}; 240};
241 241
242enum dm_pp_power_level {
243 DM_PP_POWER_LEVEL_INVALID,
244 DM_PP_POWER_LEVEL_ULTRA_LOW,
245 DM_PP_POWER_LEVEL_LOW,
246 DM_PP_POWER_LEVEL_NOMINAL,
247 DM_PP_POWER_LEVEL_PERFORMANCE,
248
249 DM_PP_POWER_LEVEL_0 = DM_PP_POWER_LEVEL_ULTRA_LOW,
250 DM_PP_POWER_LEVEL_1 = DM_PP_POWER_LEVEL_LOW,
251 DM_PP_POWER_LEVEL_2 = DM_PP_POWER_LEVEL_NOMINAL,
252 DM_PP_POWER_LEVEL_3 = DM_PP_POWER_LEVEL_PERFORMANCE,
253 DM_PP_POWER_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
254 DM_PP_POWER_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
255 DM_PP_POWER_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
256 DM_PP_POWER_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
257};
258
259struct dm_pp_power_level_change_request { 242struct dm_pp_power_level_change_request {
260 enum dm_pp_power_level power_level; 243 enum dm_pp_clocks_state power_level;
261}; 244};
262 245
263struct dm_pp_clock_for_voltage_req { 246struct dm_pp_clock_for_voltage_req {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 3488af2b5786..f83a608f93e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -24,19 +24,23 @@
24# It provides the general basic services required by other DAL 24# It provides the general basic services required by other DAL
25# subcomponents. 25# subcomponents.
26 26
27CFLAGS_display_mode_vba.o := -mhard-float -msse -mpreferred-stack-boundary=4 27ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
28CFLAGS_display_mode_lib.o := -mhard-float -msse -mpreferred-stack-boundary=4 28 cc_stack_align := -mpreferred-stack-boundary=4
29CFLAGS_display_pipe_clocks.o := -mhard-float -msse -mpreferred-stack-boundary=4 29else ifneq ($(call cc-option, -mstack-alignment=16),)
30CFLAGS_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4 30 cc_stack_align := -mstack-alignment=16
31CFLAGS_dml1_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4 31endif
32CFLAGS_display_rq_dlg_helpers.o := -mhard-float -msse -mpreferred-stack-boundary=4
33CFLAGS_soc_bounding_box.o := -mhard-float -msse -mpreferred-stack-boundary=4
34CFLAGS_dml_common_defs.o := -mhard-float -msse -mpreferred-stack-boundary=4
35 32
33dml_ccflags := -mhard-float -msse $(cc_stack_align)
36 34
37DML = display_mode_lib.o display_rq_dlg_calc.o \ 35CFLAGS_display_mode_lib.o := $(dml_ccflags)
38 display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ 36CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
39 soc_bounding_box.o dml_common_defs.o display_mode_vba.o 37CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
38CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
39CFLAGS_soc_bounding_box.o := $(dml_ccflags)
40CFLAGS_dml_common_defs.o := $(dml_ccflags)
41
42DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
43 soc_bounding_box.o dml_common_defs.o
40 44
41AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) 45AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
42 46
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 26f4f2a3d90d..3c2abcb8a1b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -28,8 +28,6 @@
28 28
29#include "dml_common_defs.h" 29#include "dml_common_defs.h"
30#include "soc_bounding_box.h" 30#include "soc_bounding_box.h"
31#include "display_mode_vba.h"
32#include "display_rq_dlg_calc.h"
33#include "dml1_display_rq_dlg_calc.h" 31#include "dml1_display_rq_dlg_calc.h"
34 32
35enum dml_project { 33enum dml_project {
@@ -41,7 +39,6 @@ struct display_mode_lib {
41 struct _vcs_dpi_ip_params_st ip; 39 struct _vcs_dpi_ip_params_st ip;
42 struct _vcs_dpi_soc_bounding_box_st soc; 40 struct _vcs_dpi_soc_bounding_box_st soc;
43 enum dml_project project; 41 enum dml_project project;
44 struct vba_vars_st vba;
45 struct dal_logger *logger; 42 struct dal_logger *logger;
46}; 43};
47 44
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index aeebd8bee628..09affa16cc43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -140,7 +140,6 @@ struct _vcs_dpi_ip_params_st {
140 unsigned int max_hscl_taps; 140 unsigned int max_hscl_taps;
141 unsigned int max_vscl_taps; 141 unsigned int max_vscl_taps;
142 unsigned int xfc_supported; 142 unsigned int xfc_supported;
143 unsigned int ptoi_supported;
144 unsigned int xfc_fill_constant_bytes; 143 unsigned int xfc_fill_constant_bytes;
145 double dispclk_ramp_margin_percent; 144 double dispclk_ramp_margin_percent;
146 double xfc_fill_bw_overhead_percent; 145 double xfc_fill_bw_overhead_percent;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
deleted file mode 100644
index 260e113fcc02..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ /dev/null
@@ -1,6085 +0,0 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "display_mode_lib.h"
27#include "display_mode_vba.h"
28
29#include "dml_inline_defs.h"
30
31/*
32 * NOTE:
33 * This file is gcc-parseable HW gospel, coming straight from HW engineers.
34 *
35 * It doesn't adhere to Linux kernel style and sometimes will do things in odd
36 * ways. Unless there is something clearly wrong with it the code should
37 * remain as-is as it provides us with a guarantee from HW that it is correct.
38 */
39
40#define BPP_INVALID 0
41#define BPP_BLENDED_PIPE 0xffffffff
42static const unsigned int NumberOfStates = DC__VOLTAGE_STATES;
43
44static void fetch_socbb_params(struct display_mode_lib *mode_lib);
45static void fetch_ip_params(struct display_mode_lib *mode_lib);
46static void fetch_pipe_params(struct display_mode_lib *mode_lib);
47static void recalculate_params(
48 struct display_mode_lib *mode_lib,
49 const display_e2e_pipe_params_st *pipes,
50 unsigned int num_pipes);
51static void recalculate(struct display_mode_lib *mode_lib);
52static double adjust_ReturnBW(
53 struct display_mode_lib *mode_lib,
54 double ReturnBW,
55 bool DCCEnabledAnyPlane,
56 double ReturnBandwidthToDCN);
57static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
58static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
59static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
60 struct display_mode_lib *mode_lib);
61static unsigned int dscceComputeDelay(
62 unsigned int bpc,
63 double bpp,
64 unsigned int sliceWidth,
65 unsigned int numSlices,
66 enum output_format_class pixelFormat);
67static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
68// Super monster function with some 45 argument
69static bool CalculatePrefetchSchedule(
70 struct display_mode_lib *mode_lib,
71 double DPPCLK,
72 double DISPCLK,
73 double PixelClock,
74 double DCFClkDeepSleep,
75 unsigned int DSCDelay,
76 unsigned int DPPPerPlane,
77 bool ScalerEnabled,
78 unsigned int NumberOfCursors,
79 double DPPCLKDelaySubtotal,
80 double DPPCLKDelaySCL,
81 double DPPCLKDelaySCLLBOnly,
82 double DPPCLKDelayCNVCFormater,
83 double DPPCLKDelayCNVCCursor,
84 double DISPCLKDelaySubtotal,
85 unsigned int ScalerRecoutWidth,
86 enum output_format_class OutputFormat,
87 unsigned int VBlank,
88 unsigned int HTotal,
89 unsigned int MaxInterDCNTileRepeaters,
90 unsigned int VStartup,
91 unsigned int PageTableLevels,
92 bool VirtualMemoryEnable,
93 bool DynamicMetadataEnable,
94 unsigned int DynamicMetadataLinesBeforeActiveRequired,
95 unsigned int DynamicMetadataTransmittedBytes,
96 bool DCCEnable,
97 double UrgentLatency,
98 double UrgentExtraLatency,
99 double TCalc,
100 unsigned int PDEAndMetaPTEBytesFrame,
101 unsigned int MetaRowByte,
102 unsigned int PixelPTEBytesPerRow,
103 double PrefetchSourceLinesY,
104 unsigned int SwathWidthY,
105 double BytePerPixelDETY,
106 double VInitPreFillY,
107 unsigned int MaxNumSwathY,
108 double PrefetchSourceLinesC,
109 double BytePerPixelDETC,
110 double VInitPreFillC,
111 unsigned int MaxNumSwathC,
112 unsigned int SwathHeightY,
113 unsigned int SwathHeightC,
114 double TWait,
115 bool XFCEnabled,
116 double XFCRemoteSurfaceFlipDelay,
117 bool InterlaceEnable,
118 bool ProgressiveToInterlaceUnitInOPP,
119 double *DSTXAfterScaler,
120 double *DSTYAfterScaler,
121 double *DestinationLinesForPrefetch,
122 double *PrefetchBandwidth,
123 double *DestinationLinesToRequestVMInVBlank,
124 double *DestinationLinesToRequestRowInVBlank,
125 double *VRatioPrefetchY,
126 double *VRatioPrefetchC,
127 double *RequiredPrefetchPixDataBW,
128 unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
129 double *Tno_bw,
130 unsigned int *VUpdateOffsetPix,
131 unsigned int *VUpdateWidthPix,
132 unsigned int *VReadyOffsetPix);
133static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
134static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
135static double CalculatePrefetchSourceLines(
136 struct display_mode_lib *mode_lib,
137 double VRatio,
138 double vtaps,
139 bool Interlace,
140 bool ProgressiveToInterlaceUnitInOPP,
141 unsigned int SwathHeight,
142 unsigned int ViewportYStart,
143 double *VInitPreFill,
144 unsigned int *MaxNumSwath);
145static unsigned int CalculateVMAndRowBytes(
146 struct display_mode_lib *mode_lib,
147 bool DCCEnable,
148 unsigned int BlockHeight256Bytes,
149 unsigned int BlockWidth256Bytes,
150 enum source_format_class SourcePixelFormat,
151 unsigned int SurfaceTiling,
152 unsigned int BytePerPixel,
153 enum scan_direction_class ScanDirection,
154 unsigned int ViewportWidth,
155 unsigned int ViewportHeight,
156 unsigned int SwathWidthY,
157 bool VirtualMemoryEnable,
158 unsigned int VMMPageSize,
159 unsigned int PTEBufferSizeInRequests,
160 unsigned int PDEProcessingBufIn64KBReqs,
161 unsigned int Pitch,
162 unsigned int DCCMetaPitch,
163 unsigned int *MacroTileWidth,
164 unsigned int *MetaRowByte,
165 unsigned int *PixelPTEBytesPerRow,
166 bool *PTEBufferSizeNotExceeded,
167 unsigned int *dpte_row_height,
168 unsigned int *meta_row_height);
169static double CalculateTWait(
170 unsigned int PrefetchMode,
171 double DRAMClockChangeLatency,
172 double UrgentLatency,
173 double SREnterPlusExitTime);
174static double CalculateRemoteSurfaceFlipDelay(
175 struct display_mode_lib *mode_lib,
176 double VRatio,
177 double SwathWidth,
178 double Bpp,
179 double LineTime,
180 double XFCTSlvVupdateOffset,
181 double XFCTSlvVupdateWidth,
182 double XFCTSlvVreadyOffset,
183 double XFCXBUFLatencyTolerance,
184 double XFCFillBWOverhead,
185 double XFCSlvChunkSize,
186 double XFCBusTransportTime,
187 double TCalc,
188 double TWait,
189 double *SrcActiveDrainRate,
190 double *TInitXFill,
191 double *TslvChk);
192static double CalculateWriteBackDISPCLK(
193 enum source_format_class WritebackPixelFormat,
194 double PixelClock,
195 double WritebackHRatio,
196 double WritebackVRatio,
197 unsigned int WritebackLumaHTaps,
198 unsigned int WritebackLumaVTaps,
199 unsigned int WritebackChromaHTaps,
200 unsigned int WritebackChromaVTaps,
201 double WritebackDestinationWidth,
202 unsigned int HTotal,
203 unsigned int WritebackChromaLineBufferWidth);
204static void CalculateActiveRowBandwidth(
205 bool VirtualMemoryEnable,
206 enum source_format_class SourcePixelFormat,
207 double VRatio,
208 bool DCCEnable,
209 double LineTime,
210 unsigned int MetaRowByteLuma,
211 unsigned int MetaRowByteChroma,
212 unsigned int meta_row_height_luma,
213 unsigned int meta_row_height_chroma,
214 unsigned int PixelPTEBytesPerRowLuma,
215 unsigned int PixelPTEBytesPerRowChroma,
216 unsigned int dpte_row_height_luma,
217 unsigned int dpte_row_height_chroma,
218 double *meta_row_bw,
219 double *dpte_row_bw,
220 double *qual_row_bw);
221static void CalculateFlipSchedule(
222 struct display_mode_lib *mode_lib,
223 double UrgentExtraLatency,
224 double UrgentLatency,
225 unsigned int MaxPageTableLevels,
226 bool VirtualMemoryEnable,
227 double BandwidthAvailableForImmediateFlip,
228 unsigned int TotImmediateFlipBytes,
229 enum source_format_class SourcePixelFormat,
230 unsigned int ImmediateFlipBytes,
231 double LineTime,
232 double Tno_bw,
233 double VRatio,
234 double PDEAndMetaPTEBytesFrame,
235 unsigned int MetaRowByte,
236 unsigned int PixelPTEBytesPerRow,
237 bool DCCEnable,
238 unsigned int dpte_row_height,
239 unsigned int meta_row_height,
240 double qual_row_bw,
241 double *DestinationLinesToRequestVMInImmediateFlip,
242 double *DestinationLinesToRequestRowInImmediateFlip,
243 double *final_flip_bw,
244 bool *ImmediateFlipSupportedForPipe);
245static double CalculateWriteBackDelay(
246 enum source_format_class WritebackPixelFormat,
247 double WritebackHRatio,
248 double WritebackVRatio,
249 unsigned int WritebackLumaHTaps,
250 unsigned int WritebackLumaVTaps,
251 unsigned int WritebackChromaHTaps,
252 unsigned int WritebackChromaVTaps,
253 unsigned int WritebackDestinationWidth);
254static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib);
255static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp);
256static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
257
258void set_prefetch_mode(
259 struct display_mode_lib *mode_lib,
260 bool cstate_en,
261 bool pstate_en,
262 bool ignore_viewport_pos,
263 bool immediate_flip_support)
264{
265 unsigned int prefetch_mode;
266
267 if (cstate_en && pstate_en)
268 prefetch_mode = 0;
269 else if (cstate_en)
270 prefetch_mode = 1;
271 else
272 prefetch_mode = 2;
273 if (prefetch_mode != mode_lib->vba.PrefetchMode
274 || ignore_viewport_pos != mode_lib->vba.IgnoreViewportPositioning
275 || immediate_flip_support != mode_lib->vba.ImmediateFlipSupport) {
276 DTRACE(
277 " Prefetch mode has changed from %i to %i. Recalculating.",
278 prefetch_mode,
279 mode_lib->vba.PrefetchMode);
280 mode_lib->vba.PrefetchMode = prefetch_mode;
281 mode_lib->vba.IgnoreViewportPositioning = ignore_viewport_pos;
282 mode_lib->vba.ImmediateFlipSupport = immediate_flip_support;
283 recalculate(mode_lib);
284 }
285}
286
287unsigned int dml_get_voltage_level(
288 struct display_mode_lib *mode_lib,
289 const display_e2e_pipe_params_st *pipes,
290 unsigned int num_pipes)
291{
292 bool need_recalculate = memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
293 || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
294 || num_pipes != mode_lib->vba.cache_num_pipes
295 || memcmp(pipes, mode_lib->vba.cache_pipes,
296 sizeof(display_e2e_pipe_params_st) * num_pipes) != 0;
297
298 mode_lib->vba.soc = mode_lib->soc;
299 mode_lib->vba.ip = mode_lib->ip;
300 memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
301 mode_lib->vba.cache_num_pipes = num_pipes;
302
303 if (need_recalculate && pipes[0].clks_cfg.dppclk_mhz != 0)
304 recalculate(mode_lib);
305 else {
306 fetch_socbb_params(mode_lib);
307 fetch_ip_params(mode_lib);
308 fetch_pipe_params(mode_lib);
309 }
310 ModeSupportAndSystemConfigurationFull(mode_lib);
311
312 return mode_lib->vba.VoltageLevel;
313}
314
315#define dml_get_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes) \
316{ \
317 recalculate_params(mode_lib, pipes, num_pipes); \
318 return var; \
319}
320
321dml_get_attr_func(clk_dcf_deepsleep, mode_lib->vba.DCFClkDeepSleep);
322dml_get_attr_func(wm_urgent, mode_lib->vba.UrgentWatermark);
323dml_get_attr_func(wm_memory_trip, mode_lib->vba.MemoryTripWatermark);
324dml_get_attr_func(wm_writeback_urgent, mode_lib->vba.WritebackUrgentWatermark);
325dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
326dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
327dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
328dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
329dml_get_attr_func(wm_xfc_underflow, mode_lib->vba.UrgentWatermark); // xfc_underflow maps to urgent
330dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
331dml_get_attr_func(stutter_efficiency_no_vblank, mode_lib->vba.StutterEfficiencyNotIncludingVBlank);
332dml_get_attr_func(urgent_latency, mode_lib->vba.MinUrgentLatencySupportUs);
333dml_get_attr_func(urgent_extra_latency, mode_lib->vba.UrgentExtraLatency);
334dml_get_attr_func(nonurgent_latency, mode_lib->vba.NonUrgentLatencyTolerance);
335dml_get_attr_func(
336 dram_clock_change_latency,
337 mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
338dml_get_attr_func(dispclk_calculated, mode_lib->vba.DISPCLK_calculated);
339dml_get_attr_func(total_data_read_bw, mode_lib->vba.TotalDataReadBandwidth);
340dml_get_attr_func(return_bw, mode_lib->vba.ReturnBW);
341dml_get_attr_func(tcalc, mode_lib->vba.TCalc);
342
343#define dml_get_pipe_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe) \
344{\
345 unsigned int which_plane; \
346 recalculate_params(mode_lib, pipes, num_pipes); \
347 which_plane = mode_lib->vba.pipe_plane[which_pipe]; \
348 return var[which_plane]; \
349}
350
351dml_get_pipe_attr_func(dsc_delay, mode_lib->vba.DSCDelay);
352dml_get_pipe_attr_func(dppclk_calculated, mode_lib->vba.DPPCLK_calculated);
353dml_get_pipe_attr_func(dscclk_calculated, mode_lib->vba.DSCCLK_calculated);
354dml_get_pipe_attr_func(min_ttu_vblank, mode_lib->vba.MinTTUVBlank);
355dml_get_pipe_attr_func(vratio_prefetch_l, mode_lib->vba.VRatioPrefetchY);
356dml_get_pipe_attr_func(vratio_prefetch_c, mode_lib->vba.VRatioPrefetchC);
357dml_get_pipe_attr_func(dst_x_after_scaler, mode_lib->vba.DSTXAfterScaler);
358dml_get_pipe_attr_func(dst_y_after_scaler, mode_lib->vba.DSTYAfterScaler);
359dml_get_pipe_attr_func(dst_y_per_vm_vblank, mode_lib->vba.DestinationLinesToRequestVMInVBlank);
360dml_get_pipe_attr_func(dst_y_per_row_vblank, mode_lib->vba.DestinationLinesToRequestRowInVBlank);
361dml_get_pipe_attr_func(dst_y_prefetch, mode_lib->vba.DestinationLinesForPrefetch);
362dml_get_pipe_attr_func(dst_y_per_vm_flip, mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip);
363dml_get_pipe_attr_func(
364 dst_y_per_row_flip,
365 mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
366
367dml_get_pipe_attr_func(xfc_transfer_delay, mode_lib->vba.XFCTransferDelay);
368dml_get_pipe_attr_func(xfc_precharge_delay, mode_lib->vba.XFCPrechargeDelay);
369dml_get_pipe_attr_func(xfc_remote_surface_flip_latency, mode_lib->vba.XFCRemoteSurfaceFlipLatency);
370dml_get_pipe_attr_func(xfc_prefetch_margin, mode_lib->vba.XFCPrefetchMargin);
371
372unsigned int get_vstartup_calculated(
373 struct display_mode_lib *mode_lib,
374 const display_e2e_pipe_params_st *pipes,
375 unsigned int num_pipes,
376 unsigned int which_pipe)
377{
378 unsigned int which_plane;
379
380 recalculate_params(mode_lib, pipes, num_pipes);
381 which_plane = mode_lib->vba.pipe_plane[which_pipe];
382 return mode_lib->vba.VStartup[which_plane];
383}
384
385double get_total_immediate_flip_bytes(
386 struct display_mode_lib *mode_lib,
387 const display_e2e_pipe_params_st *pipes,
388 unsigned int num_pipes)
389{
390 recalculate_params(mode_lib, pipes, num_pipes);
391 return mode_lib->vba.TotImmediateFlipBytes;
392}
393
394double get_total_immediate_flip_bw(
395 struct display_mode_lib *mode_lib,
396 const display_e2e_pipe_params_st *pipes,
397 unsigned int num_pipes)
398{
399 recalculate_params(mode_lib, pipes, num_pipes);
400 return mode_lib->vba.ImmediateFlipBW;
401}
402
403double get_total_prefetch_bw(
404 struct display_mode_lib *mode_lib,
405 const display_e2e_pipe_params_st *pipes,
406 unsigned int num_pipes)
407{
408 unsigned int k;
409 double total_prefetch_bw = 0.0;
410
411 recalculate_params(mode_lib, pipes, num_pipes);
412 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
413 total_prefetch_bw += mode_lib->vba.PrefetchBandwidth[k];
414 return total_prefetch_bw;
415}
416
417static void fetch_socbb_params(struct display_mode_lib *mode_lib)
418{
419 soc_bounding_box_st *soc = &mode_lib->vba.soc;
420 unsigned int i;
421
422 // SOC Bounding Box Parameters
423 mode_lib->vba.ReturnBusWidth = soc->return_bus_width_bytes;
424 mode_lib->vba.NumberOfChannels = soc->num_chans;
425 mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency =
426 soc->ideal_dram_bw_after_urgent_percent; // there's always that one bastard variable that's so long it throws everything out of alignment!
427 mode_lib->vba.UrgentLatency = soc->urgent_latency_us;
428 mode_lib->vba.RoundTripPingLatencyCycles = soc->round_trip_ping_latency_dcfclk_cycles;
429 mode_lib->vba.UrgentOutOfOrderReturnPerChannel =
430 soc->urgent_out_of_order_return_per_channel_bytes;
431 mode_lib->vba.WritebackLatency = soc->writeback_latency_us;
432 mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
433 mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
434 mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
435 mode_lib->vba.Downspreading = soc->downspread_percent;
436 mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
437 mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
438 mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
439 mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
440 mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
441 // Set the voltage scaling clocks as the defaults. Most of these will
442 // be set to different values by the test
443 for (i = 0; i < DC__VOLTAGE_STATES; i++)
444 if (soc->clock_limits[i].state == mode_lib->vba.VoltageLevel)
445 break;
446
447 mode_lib->vba.DCFCLK = soc->clock_limits[i].dcfclk_mhz;
448 mode_lib->vba.SOCCLK = soc->clock_limits[i].socclk_mhz;
449 mode_lib->vba.DRAMSpeed = soc->clock_limits[i].dram_speed_mhz;
450 mode_lib->vba.FabricClock = soc->clock_limits[i].fabricclk_mhz;
451
452 mode_lib->vba.XFCBusTransportTime = soc->xfc_bus_transport_time_us;
453 mode_lib->vba.XFCXBUFLatencyTolerance = soc->xfc_xbuf_latency_tolerance_us;
454
455 mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp = false;
456 mode_lib->vba.MaxHSCLRatio = 4;
457 mode_lib->vba.MaxVSCLRatio = 4;
458 mode_lib->vba.MaxNumWriteback = 0; /*TODO*/
459 mode_lib->vba.WritebackLumaAndChromaScalingSupported = true;
460 mode_lib->vba.Cursor64BppSupport = true;
461 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
462 mode_lib->vba.DCFCLKPerState[i] = soc->clock_limits[i].dcfclk_mhz;
463 mode_lib->vba.FabricClockPerState[i] = soc->clock_limits[i].fabricclk_mhz;
464 mode_lib->vba.SOCCLKPerState[i] = soc->clock_limits[i].socclk_mhz;
465 mode_lib->vba.PHYCLKPerState[i] = soc->clock_limits[i].phyclk_mhz;
466 mode_lib->vba.MaxDppclk[i] = soc->clock_limits[i].dppclk_mhz;
467 mode_lib->vba.MaxDSCCLK[i] = soc->clock_limits[i].dscclk_mhz;
468 mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
469 mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
470 }
471}
472
473static void fetch_ip_params(struct display_mode_lib *mode_lib)
474{
475 ip_params_st *ip = &mode_lib->vba.ip;
476
477 // IP Parameters
478 mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
479 mode_lib->vba.MaxNumOTG = ip->max_num_otg;
480 mode_lib->vba.CursorChunkSize = ip->cursor_chunk_size;
481 mode_lib->vba.CursorBufferSize = ip->cursor_buffer_size;
482
483 mode_lib->vba.MaxDCHUBToPSCLThroughput = ip->max_dchub_pscl_bw_pix_per_clk;
484 mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
485 mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
486 mode_lib->vba.DETBufferSizeInKByte = ip->det_buffer_size_kbytes;
487 mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
488 mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
489 mode_lib->vba.PTEChunkSize = ip->pte_chunk_size_kbytes;
490 mode_lib->vba.WritebackChunkSize = ip->writeback_chunk_size_kbytes;
491 mode_lib->vba.LineBufferSize = ip->line_buffer_size_bits;
492 mode_lib->vba.MaxLineBufferLines = ip->max_line_buffer_lines;
493 mode_lib->vba.PTEBufferSizeInRequests = ip->dpte_buffer_size_in_pte_reqs;
494 mode_lib->vba.DPPOutputBufferPixels = ip->dpp_output_buffer_pixels;
495 mode_lib->vba.OPPOutputBufferLines = ip->opp_output_buffer_lines;
496 mode_lib->vba.WritebackInterfaceLumaBufferSize = ip->writeback_luma_buffer_size_kbytes;
497 mode_lib->vba.WritebackInterfaceChromaBufferSize = ip->writeback_chroma_buffer_size_kbytes;
498 mode_lib->vba.WritebackChromaLineBufferWidth =
499 ip->writeback_chroma_line_buffer_width_pixels;
500 mode_lib->vba.MaxPageTableLevels = ip->max_page_table_levels;
501 mode_lib->vba.MaxInterDCNTileRepeaters = ip->max_inter_dcn_tile_repeaters;
502 mode_lib->vba.NumberOfDSC = ip->num_dsc;
503 mode_lib->vba.ODMCapability = ip->odm_capable;
504 mode_lib->vba.DISPCLKRampingMargin = ip->dispclk_ramp_margin_percent;
505
506 mode_lib->vba.XFCSupported = ip->xfc_supported;
507 mode_lib->vba.XFCFillBWOverhead = ip->xfc_fill_bw_overhead_percent;
508 mode_lib->vba.XFCFillConstant = ip->xfc_fill_constant_bytes;
509 mode_lib->vba.DPPCLKDelaySubtotal = ip->dppclk_delay_subtotal;
510 mode_lib->vba.DPPCLKDelaySCL = ip->dppclk_delay_scl;
511 mode_lib->vba.DPPCLKDelaySCLLBOnly = ip->dppclk_delay_scl_lb_only;
512 mode_lib->vba.DPPCLKDelayCNVCFormater = ip->dppclk_delay_cnvc_formatter;
513 mode_lib->vba.DPPCLKDelayCNVCCursor = ip->dppclk_delay_cnvc_cursor;
514 mode_lib->vba.DISPCLKDelaySubtotal = ip->dispclk_delay_subtotal;
515
516 mode_lib->vba.ProgressiveToInterlaceUnitInOPP = ip->ptoi_supported;
517
518 mode_lib->vba.PDEProcessingBufIn64KBReqs = ip->pde_proc_buffer_size_64k_reqs;
519}
520
521static void fetch_pipe_params(struct display_mode_lib *mode_lib)
522{
523 display_e2e_pipe_params_st *pipes = mode_lib->vba.cache_pipes;
524 ip_params_st *ip = &mode_lib->vba.ip;
525
526 unsigned int OTGInstPlane[DC__NUM_DPP__MAX];
527 unsigned int j, k;
528 bool PlaneVisited[DC__NUM_DPP__MAX];
529 bool visited[DC__NUM_DPP__MAX];
530
531 // Convert Pipes to Planes
532 for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k)
533 visited[k] = false;
534
535 mode_lib->vba.NumberOfActivePlanes = 0;
536 for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {
537 display_pipe_source_params_st *src = &pipes[j].pipe.src;
538 display_pipe_dest_params_st *dst = &pipes[j].pipe.dest;
539 scaler_ratio_depth_st *scl = &pipes[j].pipe.scale_ratio_depth;
540 scaler_taps_st *taps = &pipes[j].pipe.scale_taps;
541 display_output_params_st *dout = &pipes[j].dout;
542 display_clocks_and_cfg_st *clks = &pipes[j].clks_cfg;
543
544 if (visited[j])
545 continue;
546 visited[j] = true;
547
548 mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
549
550 mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
551 mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
552 (enum scan_direction_class) (src->source_scan);
553 mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] =
554 src->viewport_width;
555 mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] =
556 src->viewport_height;
557 mode_lib->vba.ViewportYStartY[mode_lib->vba.NumberOfActivePlanes] =
558 src->viewport_y_y;
559 mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
560 src->viewport_y_c;
561 mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
562 mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
563 mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
564 mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
565 mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] = scl->vscl_ratio;
566 mode_lib->vba.ScalerEnabled[mode_lib->vba.NumberOfActivePlanes] = scl->scl_enable;
567 mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes] = dst->interlaced;
568 if (mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes])
569 mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] *= 2.0;
570 mode_lib->vba.htaps[mode_lib->vba.NumberOfActivePlanes] = taps->htaps;
571 mode_lib->vba.vtaps[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps;
572 mode_lib->vba.HTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->htaps_c;
573 mode_lib->vba.VTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps_c;
574 mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
575 mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
576 mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
577 src->dcc_use_global ?
578 ip->dcc_supported : src->dcc && ip->dcc_supported;
579 mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
580 mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
581 (enum source_format_class) (src->source_format);
582 mode_lib->vba.HActive[mode_lib->vba.NumberOfActivePlanes] = dst->hactive;
583 mode_lib->vba.VActive[mode_lib->vba.NumberOfActivePlanes] = dst->vactive;
584 mode_lib->vba.SurfaceTiling[mode_lib->vba.NumberOfActivePlanes] =
585 (enum dm_swizzle_mode) (src->sw_mode);
586 mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] =
587 dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
588 mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
589 dst->odm_combine;
590 mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
591 (enum output_format_class) (dout->output_format);
592 mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
593 (enum output_encoder_class) (dout->output_type);
594 mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
595 mode_lib->vba.OutputLinkDPLanes[mode_lib->vba.NumberOfActivePlanes] =
596 dout->dp_lanes;
597 mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
598 mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
599 dout->dsc_slices;
600 mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
601 dout->opp_input_bpc == 0 ? 12 : dout->opp_input_bpc;
602 mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
603 mode_lib->vba.WritebackSourceHeight[mode_lib->vba.NumberOfActivePlanes] =
604 dout->wb.wb_src_height;
605 mode_lib->vba.WritebackDestinationWidth[mode_lib->vba.NumberOfActivePlanes] =
606 dout->wb.wb_dst_width;
607 mode_lib->vba.WritebackDestinationHeight[mode_lib->vba.NumberOfActivePlanes] =
608 dout->wb.wb_dst_height;
609 mode_lib->vba.WritebackPixelFormat[mode_lib->vba.NumberOfActivePlanes] =
610 (enum source_format_class) (dout->wb.wb_pixel_format);
611 mode_lib->vba.WritebackLumaHTaps[mode_lib->vba.NumberOfActivePlanes] =
612 dout->wb.wb_htaps_luma;
613 mode_lib->vba.WritebackLumaVTaps[mode_lib->vba.NumberOfActivePlanes] =
614 dout->wb.wb_vtaps_luma;
615 mode_lib->vba.WritebackChromaHTaps[mode_lib->vba.NumberOfActivePlanes] =
616 dout->wb.wb_htaps_chroma;
617 mode_lib->vba.WritebackChromaVTaps[mode_lib->vba.NumberOfActivePlanes] =
618 dout->wb.wb_vtaps_chroma;
619 mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
620 dout->wb.wb_hratio;
621 mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
622 dout->wb.wb_vratio;
623
624 mode_lib->vba.DynamicMetadataEnable[mode_lib->vba.NumberOfActivePlanes] =
625 src->dynamic_metadata_enable;
626 mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[mode_lib->vba.NumberOfActivePlanes] =
627 src->dynamic_metadata_lines_before_active;
628 mode_lib->vba.DynamicMetadataTransmittedBytes[mode_lib->vba.NumberOfActivePlanes] =
629 src->dynamic_metadata_xmit_bytes;
630
631 mode_lib->vba.XFCEnabled[mode_lib->vba.NumberOfActivePlanes] = src->xfc_enable
632 && ip->xfc_supported;
633 mode_lib->vba.XFCSlvChunkSize = src->xfc_params.xfc_slv_chunk_size_bytes;
634 mode_lib->vba.XFCTSlvVupdateOffset = src->xfc_params.xfc_tslv_vupdate_offset_us;
635 mode_lib->vba.XFCTSlvVupdateWidth = src->xfc_params.xfc_tslv_vupdate_width_us;
636 mode_lib->vba.XFCTSlvVreadyOffset = src->xfc_params.xfc_tslv_vready_offset_us;
637 mode_lib->vba.PixelClock[mode_lib->vba.NumberOfActivePlanes] = dst->pixel_rate_mhz;
638 mode_lib->vba.DPPCLK[mode_lib->vba.NumberOfActivePlanes] = clks->dppclk_mhz;
639 if (ip->is_line_buffer_bpp_fixed)
640 mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] =
641 ip->line_buffer_fixed_bpp;
642 else {
643 unsigned int lb_depth;
644
645 switch (scl->lb_depth) {
646 case dm_lb_6:
647 lb_depth = 18;
648 break;
649 case dm_lb_8:
650 lb_depth = 24;
651 break;
652 case dm_lb_10:
653 lb_depth = 30;
654 break;
655 case dm_lb_12:
656 lb_depth = 36;
657 break;
658 case dm_lb_16:
659 lb_depth = 48;
660 break;
661 default:
662 lb_depth = 36;
663 }
664 mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] = lb_depth;
665 }
666 mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes] = 0;
667 // The DML spreadsheet assumes that the two cursors utilize the same amount of bandwidth. We'll
668 // calculate things a little more accurately
669 for (k = 0; k < DC__NUM_CURSOR__MAX; ++k) {
670 switch (k) {
671 case 0:
672 mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][0] =
673 CursorBppEnumToBits(
674 (enum cursor_bpp) (src->cur0_bpp));
675 mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][0] =
676 src->cur0_src_width;
677 if (src->cur0_src_width > 0)
678 mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
679 break;
680 case 1:
681 mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][1] =
682 CursorBppEnumToBits(
683 (enum cursor_bpp) (src->cur1_bpp));
684 mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][1] =
685 src->cur1_src_width;
686 if (src->cur1_src_width > 0)
687 mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
688 break;
689 default:
690 dml_print(
691 "ERROR: Number of cursors specified exceeds supported maximum\n")
692 ;
693 }
694 }
695
696 OTGInstPlane[mode_lib->vba.NumberOfActivePlanes] = dst->otg_inst;
697
698 if (dst->odm_combine && !src->is_hsplit)
699 dml_print(
700 "ERROR: ODM Combine is specified but is_hsplit has not be specified for pipe %i\n",
701 j);
702
703 if (src->is_hsplit) {
704 for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
705 display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
706 display_output_params_st *dout_k = &pipes[k].dout;
707
708 if (src_k->is_hsplit && !visited[k]
709 && src->hsplit_grp == src_k->hsplit_grp) {
710 mode_lib->vba.pipe_plane[k] =
711 mode_lib->vba.NumberOfActivePlanes;
712 mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
713 if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
714 == dm_horz)
715 mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
716 src_k->viewport_width;
717 else
718 mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
719 src_k->viewport_height;
720
721 mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
722 dout_k->dsc_slices;
723 visited[k] = true;
724 }
725 }
726 }
727
728 mode_lib->vba.NumberOfActivePlanes++;
729 }
730
731 // handle overlays through dml_ml->vba.BlendingAndTiming
732 // dml_ml->vba.BlendingAndTiming tells you which instance to look at to get timing, the so called 'master'
733
734 for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
735 PlaneVisited[j] = false;
736
737 for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
738 for (k = j + 1; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
739 if (!PlaneVisited[k] && OTGInstPlane[j] == OTGInstPlane[k]) {
740 // doesn't matter, so choose the smaller one
741 mode_lib->vba.BlendingAndTiming[j] = j;
742 PlaneVisited[j] = true;
743 mode_lib->vba.BlendingAndTiming[k] = j;
744 PlaneVisited[k] = true;
745 }
746 }
747
748 if (!PlaneVisited[j]) {
749 mode_lib->vba.BlendingAndTiming[j] = j;
750 PlaneVisited[j] = true;
751 }
752 }
753
754 // TODO: dml_ml->vba.ODMCombineEnabled => 2 * dml_ml->vba.DPPPerPlane...actually maybe not since all pipes are specified
755 // Do we want the dscclk to automatically be halved? Guess not since the value is specified
756
757 mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
758 for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k)
759 ASSERT(mode_lib->vba.SynchronizedVBlank == pipes[k].pipe.dest.synchronized_vblank_all_planes);
760
761 mode_lib->vba.VirtualMemoryEnable = false;
762 mode_lib->vba.OverridePageTableLevels = 0;
763
764 for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
765 mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable
766 || !!pipes[k].pipe.src.vm;
767 mode_lib->vba.OverridePageTableLevels =
768 (pipes[k].pipe.src.vm_levels_force_en
769 && mode_lib->vba.OverridePageTableLevels
770 < pipes[k].pipe.src.vm_levels_force) ?
771 pipes[k].pipe.src.vm_levels_force :
772 mode_lib->vba.OverridePageTableLevels;
773 }
774
775 if (mode_lib->vba.OverridePageTableLevels)
776 mode_lib->vba.MaxPageTableLevels = mode_lib->vba.OverridePageTableLevels;
777
778 mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable && !!ip->pte_enable;
779
780 mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
781 mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels
782 * mode_lib->vba.DRAMChannelWidth,
783 mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn)
784 / 1000.0;
785
786 // TODO: Must be consistent across all pipes
787 // DCCProgrammingAssumesScanDirectionUnknown = src.dcc_scan_dir_unknown;
788}
789
790static void recalculate(struct display_mode_lib *mode_lib)
791{
792 ModeSupportAndSystemConfiguration(mode_lib);
793 PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
794 DisplayPipeConfiguration(mode_lib);
795 DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
796}
797
798// in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs
799// rather than working them out as in recalculate_ms
800static void recalculate_params(
801 struct display_mode_lib *mode_lib,
802 const display_e2e_pipe_params_st *pipes,
803 unsigned int num_pipes)
804{
805 // This is only safe to use memcmp because there are non-POD types in struct display_mode_lib
806 if (memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
807 || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
808 || num_pipes != mode_lib->vba.cache_num_pipes
809 || memcmp(
810 pipes,
811 mode_lib->vba.cache_pipes,
812 sizeof(display_e2e_pipe_params_st) * num_pipes) != 0) {
813 mode_lib->vba.soc = mode_lib->soc;
814 mode_lib->vba.ip = mode_lib->ip;
815 memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
816 mode_lib->vba.cache_num_pipes = num_pipes;
817 recalculate(mode_lib);
818 }
819}
820
821static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
822{
823 soc_bounding_box_st *soc = &mode_lib->vba.soc;
824 unsigned int i, k;
825 unsigned int total_pipes = 0;
826
827 mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
828 for (i = 1; i < mode_lib->vba.cache_num_pipes; ++i)
829 ASSERT(mode_lib->vba.VoltageLevel == -1 || mode_lib->vba.VoltageLevel == mode_lib->vba.cache_pipes[i].clks_cfg.voltage);
830
831 mode_lib->vba.DCFCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dcfclk_mhz;
832 mode_lib->vba.SOCCLK = mode_lib->vba.cache_pipes[0].clks_cfg.socclk_mhz;
833
834 if (mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz > 0.0)
835 mode_lib->vba.DISPCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz;
836 else
837 mode_lib->vba.DISPCLK = soc->clock_limits[mode_lib->vba.VoltageLevel].dispclk_mhz;
838
839 fetch_socbb_params(mode_lib);
840 fetch_ip_params(mode_lib);
841 fetch_pipe_params(mode_lib);
842
843 // Total Available Pipes Support Check
844 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
845 total_pipes += mode_lib->vba.DPPPerPlane[k];
846 ASSERT(total_pipes <= DC__NUM_DPP__MAX);
847}
848
849static double adjust_ReturnBW(
850 struct display_mode_lib *mode_lib,
851 double ReturnBW,
852 bool DCCEnabledAnyPlane,
853 double ReturnBandwidthToDCN)
854{
855 double CriticalCompression;
856
857 if (DCCEnabledAnyPlane
858 && ReturnBandwidthToDCN
859 > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
860 ReturnBW =
861 dml_min(
862 ReturnBW,
863 ReturnBandwidthToDCN * 4
864 * (1.0
865 - mode_lib->vba.UrgentLatency
866 / ((mode_lib->vba.ROBBufferSizeInKByte
867 - mode_lib->vba.PixelChunkSizeInKByte)
868 * 1024
869 / ReturnBandwidthToDCN
870 - mode_lib->vba.DCFCLK
871 * mode_lib->vba.ReturnBusWidth
872 / 4)
873 + mode_lib->vba.UrgentLatency));
874
875 CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
876 * mode_lib->vba.UrgentLatency
877 / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatency
878 + (mode_lib->vba.ROBBufferSizeInKByte
879 - mode_lib->vba.PixelChunkSizeInKByte)
880 * 1024);
881
882 if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
883 ReturnBW =
884 dml_min(
885 ReturnBW,
886 4.0 * ReturnBandwidthToDCN
887 * (mode_lib->vba.ROBBufferSizeInKByte
888 - mode_lib->vba.PixelChunkSizeInKByte)
889 * 1024
890 * mode_lib->vba.ReturnBusWidth
891 * mode_lib->vba.DCFCLK
892 * mode_lib->vba.UrgentLatency
893 / dml_pow(
894 (ReturnBandwidthToDCN
895 * mode_lib->vba.UrgentLatency
896 + (mode_lib->vba.ROBBufferSizeInKByte
897 - mode_lib->vba.PixelChunkSizeInKByte)
898 * 1024),
899 2));
900
901 return ReturnBW;
902}
903
904static unsigned int dscceComputeDelay(
905 unsigned int bpc,
906 double bpp,
907 unsigned int sliceWidth,
908 unsigned int numSlices,
909 enum output_format_class pixelFormat)
910{
911 // valid bpc = source bits per component in the set of {8, 10, 12}
912 // valid bpp = increments of 1/16 of a bit
913 // min = 6/7/8 in N420/N422/444, respectively
914 // max = such that compression is 1:1
915 //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
916 //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
917 //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
918
919 // fixed value
920 unsigned int rcModelSize = 8192;
921
922 // N422/N420 operate at 2 pixels per clock
923 unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
924 Delay, pixels;
925
926 if (pixelFormat == dm_n422 || pixelFormat == dm_420)
927 pixelsPerClock = 2;
928 // #all other modes operate at 1 pixel per clock
929 else
930 pixelsPerClock = 1;
931
932 //initial transmit delay as per PPS
933 initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
934
935 //compute ssm delay
936 if (bpc == 8)
937 D = 81;
938 else if (bpc == 10)
939 D = 89;
940 else
941 D = 113;
942
943 //divide by pixel per cycle to compute slice width as seen by DSC
944 w = sliceWidth / pixelsPerClock;
945
946 //422 mode has an additional cycle of delay
947 if (pixelFormat == dm_s422)
948 s = 1;
949 else
950 s = 0;
951
952 //main calculation for the dscce
953 ix = initalXmitDelay + 45;
954 wx = (w + 2) / 3;
955 p = 3 * wx - w;
956 l0 = ix / w;
957 a = ix + p * l0;
958 ax = (a + 2) / 3 + D + 6 + 1;
959 l = (ax + wx - 1) / wx;
960 if ((ix % w) == 0 && p != 0)
961 lstall = 1;
962 else
963 lstall = 0;
964 Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
965
966 //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
967 pixels = Delay * 3 * pixelsPerClock;
968 return pixels;
969}
970
971static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
972{
973 unsigned int Delay = 0;
974
975 if (pixelFormat == dm_420) {
976 // sfr
977 Delay = Delay + 2;
978 // dsccif
979 Delay = Delay + 0;
980 // dscc - input deserializer
981 Delay = Delay + 3;
982 // dscc gets pixels every other cycle
983 Delay = Delay + 2;
984 // dscc - input cdc fifo
985 Delay = Delay + 12;
986 // dscc gets pixels every other cycle
987 Delay = Delay + 13;
988 // dscc - cdc uncertainty
989 Delay = Delay + 2;
990 // dscc - output cdc fifo
991 Delay = Delay + 7;
992 // dscc gets pixels every other cycle
993 Delay = Delay + 3;
994 // dscc - cdc uncertainty
995 Delay = Delay + 2;
996 // dscc - output serializer
997 Delay = Delay + 1;
998 // sft
999 Delay = Delay + 1;
1000 } else if (pixelFormat == dm_n422) {
1001 // sfr
1002 Delay = Delay + 2;
1003 // dsccif
1004 Delay = Delay + 1;
1005 // dscc - input deserializer
1006 Delay = Delay + 5;
1007 // dscc - input cdc fifo
1008 Delay = Delay + 25;
1009 // dscc - cdc uncertainty
1010 Delay = Delay + 2;
1011 // dscc - output cdc fifo
1012 Delay = Delay + 10;
1013 // dscc - cdc uncertainty
1014 Delay = Delay + 2;
1015 // dscc - output serializer
1016 Delay = Delay + 1;
1017 // sft
1018 Delay = Delay + 1;
1019 } else {
1020 // sfr
1021 Delay = Delay + 2;
1022 // dsccif
1023 Delay = Delay + 0;
1024 // dscc - input deserializer
1025 Delay = Delay + 3;
1026 // dscc - input cdc fifo
1027 Delay = Delay + 12;
1028 // dscc - cdc uncertainty
1029 Delay = Delay + 2;
1030 // dscc - output cdc fifo
1031 Delay = Delay + 7;
1032 // dscc - output serializer
1033 Delay = Delay + 1;
1034 // dscc - cdc uncertainty
1035 Delay = Delay + 2;
1036 // sft
1037 Delay = Delay + 1;
1038 }
1039
1040 return Delay;
1041}
1042
1043static bool CalculatePrefetchSchedule(
1044 struct display_mode_lib *mode_lib,
1045 double DPPCLK,
1046 double DISPCLK,
1047 double PixelClock,
1048 double DCFClkDeepSleep,
1049 unsigned int DSCDelay,
1050 unsigned int DPPPerPlane,
1051 bool ScalerEnabled,
1052 unsigned int NumberOfCursors,
1053 double DPPCLKDelaySubtotal,
1054 double DPPCLKDelaySCL,
1055 double DPPCLKDelaySCLLBOnly,
1056 double DPPCLKDelayCNVCFormater,
1057 double DPPCLKDelayCNVCCursor,
1058 double DISPCLKDelaySubtotal,
1059 unsigned int ScalerRecoutWidth,
1060 enum output_format_class OutputFormat,
1061 unsigned int VBlank,
1062 unsigned int HTotal,
1063 unsigned int MaxInterDCNTileRepeaters,
1064 unsigned int VStartup,
1065 unsigned int PageTableLevels,
1066 bool VirtualMemoryEnable,
1067 bool DynamicMetadataEnable,
1068 unsigned int DynamicMetadataLinesBeforeActiveRequired,
1069 unsigned int DynamicMetadataTransmittedBytes,
1070 bool DCCEnable,
1071 double UrgentLatency,
1072 double UrgentExtraLatency,
1073 double TCalc,
1074 unsigned int PDEAndMetaPTEBytesFrame,
1075 unsigned int MetaRowByte,
1076 unsigned int PixelPTEBytesPerRow,
1077 double PrefetchSourceLinesY,
1078 unsigned int SwathWidthY,
1079 double BytePerPixelDETY,
1080 double VInitPreFillY,
1081 unsigned int MaxNumSwathY,
1082 double PrefetchSourceLinesC,
1083 double BytePerPixelDETC,
1084 double VInitPreFillC,
1085 unsigned int MaxNumSwathC,
1086 unsigned int SwathHeightY,
1087 unsigned int SwathHeightC,
1088 double TWait,
1089 bool XFCEnabled,
1090 double XFCRemoteSurfaceFlipDelay,
1091 bool InterlaceEnable,
1092 bool ProgressiveToInterlaceUnitInOPP,
1093 double *DSTXAfterScaler,
1094 double *DSTYAfterScaler,
1095 double *DestinationLinesForPrefetch,
1096 double *PrefetchBandwidth,
1097 double *DestinationLinesToRequestVMInVBlank,
1098 double *DestinationLinesToRequestRowInVBlank,
1099 double *VRatioPrefetchY,
1100 double *VRatioPrefetchC,
1101 double *RequiredPrefetchPixDataBW,
1102 unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
1103 double *Tno_bw,
1104 unsigned int *VUpdateOffsetPix,
1105 unsigned int *VUpdateWidthPix,
1106 unsigned int *VReadyOffsetPix)
1107{
1108 bool MyError = false;
1109 unsigned int DPPCycles, DISPCLKCycles;
1110 double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
1111 double Tdm, LineTime, Tsetup;
1112 double dst_y_prefetch_equ;
1113 double Tsw_oto;
1114 double prefetch_bw_oto;
1115 double Tvm_oto;
1116 double Tr0_oto;
1117 double Tpre_oto;
1118 double dst_y_prefetch_oto;
1119 double TimeForFetchingMetaPTE = 0;
1120 double TimeForFetchingRowInVBlank = 0;
1121 double LinesToRequestPrefetchPixelData = 0;
1122
1123 if (ScalerEnabled)
1124 DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
1125 else
1126 DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
1127
1128 DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
1129
1130 DISPCLKCycles = DISPCLKDelaySubtotal;
1131
1132 if (DPPCLK == 0.0 || DISPCLK == 0.0)
1133 return true;
1134
1135 *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
1136 + DSCDelay;
1137
1138 if (DPPPerPlane > 1)
1139 *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
1140
1141 if (OutputFormat == dm_420 || (InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
1142 *DSTYAfterScaler = 1;
1143 else
1144 *DSTYAfterScaler = 0;
1145
1146 DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
1147 *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
1148 *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
1149
1150 *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
1151 TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
1152 *VUpdateWidthPix = (14.0 / DCFClkDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
1153 * PixelClock;
1154
1155 *VReadyOffsetPix = dml_max(
1156 150.0 / DPPCLK,
1157 TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / DPPCLK)
1158 * PixelClock;
1159
1160 Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
1161
1162 LineTime = (double) HTotal / PixelClock;
1163
1164 if (DynamicMetadataEnable) {
1165 double Tdmbf, Tdmec, Tdmsks;
1166
1167 Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
1168 Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
1169 Tdmec = LineTime;
1170 if (DynamicMetadataLinesBeforeActiveRequired == 0)
1171 Tdmsks = VBlank * LineTime / 2.0;
1172 else
1173 Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
1174 if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
1175 Tdmsks = Tdmsks / 2;
1176 if (VStartup * LineTime
1177 < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
1178 MyError = true;
1179 *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
1180 + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
1181 } else
1182 *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
1183 } else
1184 Tdm = 0;
1185
1186 if (VirtualMemoryEnable) {
1187 if (PageTableLevels == 4)
1188 *Tno_bw = UrgentExtraLatency + UrgentLatency;
1189 else if (PageTableLevels == 3)
1190 *Tno_bw = UrgentExtraLatency;
1191 else
1192 *Tno_bw = 0;
1193 } else if (DCCEnable)
1194 *Tno_bw = LineTime;
1195 else
1196 *Tno_bw = LineTime / 4;
1197
1198 dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
1199 - (Tsetup + Tdm) / LineTime
1200 - (*DSTYAfterScaler + *DSTXAfterScaler / HTotal);
1201
1202 Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
1203
1204 prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
1205 + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
1206 + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
1207 / Tsw_oto;
1208
1209 if (VirtualMemoryEnable == true) {
1210 Tvm_oto =
1211 dml_max(
1212 *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
1213 dml_max(
1214 UrgentExtraLatency
1215 + UrgentLatency
1216 * (PageTableLevels
1217 - 1),
1218 LineTime / 4.0));
1219 } else
1220 Tvm_oto = LineTime / 4.0;
1221
1222 if ((VirtualMemoryEnable == true || DCCEnable == true)) {
1223 Tr0_oto = dml_max(
1224 (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
1225 dml_max(UrgentLatency, dml_max(LineTime - Tvm_oto, LineTime / 4)));
1226 } else
1227 Tr0_oto = LineTime - Tvm_oto;
1228
1229 Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
1230
1231 dst_y_prefetch_oto = Tpre_oto / LineTime;
1232
1233 if (dst_y_prefetch_oto < dst_y_prefetch_equ)
1234 *DestinationLinesForPrefetch = dst_y_prefetch_oto;
1235 else
1236 *DestinationLinesForPrefetch = dst_y_prefetch_equ;
1237
1238 *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
1239 / 4;
1240
1241 dml_print("DML: VStartup: %d\n", VStartup);
1242 dml_print("DML: TCalc: %f\n", TCalc);
1243 dml_print("DML: TWait: %f\n", TWait);
1244 dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
1245 dml_print("DML: LineTime: %f\n", LineTime);
1246 dml_print("DML: Tsetup: %f\n", Tsetup);
1247 dml_print("DML: Tdm: %f\n", Tdm);
1248 dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
1249 dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
1250 dml_print("DML: HTotal: %d\n", HTotal);
1251
1252 *PrefetchBandwidth = 0;
1253 *DestinationLinesToRequestVMInVBlank = 0;
1254 *DestinationLinesToRequestRowInVBlank = 0;
1255 *VRatioPrefetchY = 0;
1256 *VRatioPrefetchC = 0;
1257 *RequiredPrefetchPixDataBW = 0;
1258 if (*DestinationLinesForPrefetch > 1) {
1259 *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
1260 + 2 * PixelPTEBytesPerRow
1261 + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
1262 + PrefetchSourceLinesC * SwathWidthY / 2
1263 * dml_ceil(BytePerPixelDETC, 2))
1264 / (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
1265 if (VirtualMemoryEnable) {
1266 TimeForFetchingMetaPTE =
1267 dml_max(
1268 *Tno_bw
1269 + (double) PDEAndMetaPTEBytesFrame
1270 / *PrefetchBandwidth,
1271 dml_max(
1272 UrgentExtraLatency
1273 + UrgentLatency
1274 * (PageTableLevels
1275 - 1),
1276 LineTime / 4));
1277 } else {
1278 if (NumberOfCursors > 0 || XFCEnabled)
1279 TimeForFetchingMetaPTE = LineTime / 4;
1280 else
1281 TimeForFetchingMetaPTE = 0.0;
1282 }
1283
1284 if ((VirtualMemoryEnable == true || DCCEnable == true)) {
1285 TimeForFetchingRowInVBlank =
1286 dml_max(
1287 (MetaRowByte + PixelPTEBytesPerRow)
1288 / *PrefetchBandwidth,
1289 dml_max(
1290 UrgentLatency,
1291 dml_max(
1292 LineTime
1293 - TimeForFetchingMetaPTE,
1294 LineTime
1295 / 4.0)));
1296 } else {
1297 if (NumberOfCursors > 0 || XFCEnabled)
1298 TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
1299 else
1300 TimeForFetchingRowInVBlank = 0.0;
1301 }
1302
1303 *DestinationLinesToRequestVMInVBlank = dml_floor(
1304 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
1305 1) / 4.0;
1306
1307 *DestinationLinesToRequestRowInVBlank = dml_floor(
1308 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
1309 1) / 4.0;
1310
1311 LinesToRequestPrefetchPixelData =
1312 *DestinationLinesForPrefetch
1313 - ((NumberOfCursors > 0 || VirtualMemoryEnable
1314 || DCCEnable) ?
1315 (*DestinationLinesToRequestVMInVBlank
1316 + *DestinationLinesToRequestRowInVBlank) :
1317 0.0);
1318
1319 if (LinesToRequestPrefetchPixelData > 0) {
1320
1321 *VRatioPrefetchY = (double) PrefetchSourceLinesY
1322 / LinesToRequestPrefetchPixelData;
1323 *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
1324 if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
1325 if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
1326 *VRatioPrefetchY =
1327 dml_max(
1328 (double) PrefetchSourceLinesY
1329 / LinesToRequestPrefetchPixelData,
1330 (double) MaxNumSwathY
1331 * SwathHeightY
1332 / (LinesToRequestPrefetchPixelData
1333 - (VInitPreFillY
1334 - 3.0)
1335 / 2.0));
1336 *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
1337 } else {
1338 MyError = true;
1339 *VRatioPrefetchY = 0;
1340 }
1341 }
1342
1343 *VRatioPrefetchC = (double) PrefetchSourceLinesC
1344 / LinesToRequestPrefetchPixelData;
1345 *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
1346
1347 if ((SwathHeightC > 4)) {
1348 if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
1349 *VRatioPrefetchC =
1350 dml_max(
1351 *VRatioPrefetchC,
1352 (double) MaxNumSwathC
1353 * SwathHeightC
1354 / (LinesToRequestPrefetchPixelData
1355 - (VInitPreFillC
1356 - 3.0)
1357 / 2.0));
1358 *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
1359 } else {
1360 MyError = true;
1361 *VRatioPrefetchC = 0;
1362 }
1363 }
1364
1365 *RequiredPrefetchPixDataBW =
1366 DPPPerPlane
1367 * ((double) PrefetchSourceLinesY
1368 / LinesToRequestPrefetchPixelData
1369 * dml_ceil(
1370 BytePerPixelDETY,
1371 1)
1372 + (double) PrefetchSourceLinesC
1373 / LinesToRequestPrefetchPixelData
1374 * dml_ceil(
1375 BytePerPixelDETC,
1376 2)
1377 / 2)
1378 * SwathWidthY / LineTime;
1379 } else {
1380 MyError = true;
1381 *VRatioPrefetchY = 0;
1382 *VRatioPrefetchC = 0;
1383 *RequiredPrefetchPixDataBW = 0;
1384 }
1385
1386 } else {
1387 MyError = true;
1388 }
1389
1390 if (MyError) {
1391 *PrefetchBandwidth = 0;
1392 TimeForFetchingMetaPTE = 0;
1393 TimeForFetchingRowInVBlank = 0;
1394 *DestinationLinesToRequestVMInVBlank = 0;
1395 *DestinationLinesToRequestRowInVBlank = 0;
1396 *DestinationLinesForPrefetch = 0;
1397 LinesToRequestPrefetchPixelData = 0;
1398 *VRatioPrefetchY = 0;
1399 *VRatioPrefetchC = 0;
1400 *RequiredPrefetchPixDataBW = 0;
1401 }
1402
1403 return MyError;
1404}
1405
1406static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
1407{
1408 return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
1409}
1410
1411static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
1412{
1413 return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
1414}
1415
1416static double CalculatePrefetchSourceLines(
1417 struct display_mode_lib *mode_lib,
1418 double VRatio,
1419 double vtaps,
1420 bool Interlace,
1421 bool ProgressiveToInterlaceUnitInOPP,
1422 unsigned int SwathHeight,
1423 unsigned int ViewportYStart,
1424 double *VInitPreFill,
1425 unsigned int *MaxNumSwath)
1426{
1427 unsigned int MaxPartialSwath;
1428
1429 if (ProgressiveToInterlaceUnitInOPP)
1430 *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
1431 else
1432 *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
1433
1434 if (!mode_lib->vba.IgnoreViewportPositioning) {
1435
1436 *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
1437
1438 if (*VInitPreFill > 1.0)
1439 MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
1440 else
1441 MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
1442 % SwathHeight;
1443 MaxPartialSwath = dml_max(1U, MaxPartialSwath);
1444
1445 } else {
1446
1447 if (ViewportYStart != 0)
1448 dml_print(
1449 "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
1450
1451 *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
1452
1453 if (*VInitPreFill > 1.0)
1454 MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
1455 else
1456 MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
1457 % SwathHeight;
1458 }
1459
1460 return *MaxNumSwath * SwathHeight + MaxPartialSwath;
1461}
1462
1463static unsigned int CalculateVMAndRowBytes(
1464 struct display_mode_lib *mode_lib,
1465 bool DCCEnable,
1466 unsigned int BlockHeight256Bytes,
1467 unsigned int BlockWidth256Bytes,
1468 enum source_format_class SourcePixelFormat,
1469 unsigned int SurfaceTiling,
1470 unsigned int BytePerPixel,
1471 enum scan_direction_class ScanDirection,
1472 unsigned int ViewportWidth,
1473 unsigned int ViewportHeight,
1474 unsigned int SwathWidth,
1475 bool VirtualMemoryEnable,
1476 unsigned int VMMPageSize,
1477 unsigned int PTEBufferSizeInRequests,
1478 unsigned int PDEProcessingBufIn64KBReqs,
1479 unsigned int Pitch,
1480 unsigned int DCCMetaPitch,
1481 unsigned int *MacroTileWidth,
1482 unsigned int *MetaRowByte,
1483 unsigned int *PixelPTEBytesPerRow,
1484 bool *PTEBufferSizeNotExceeded,
1485 unsigned int *dpte_row_height,
1486 unsigned int *meta_row_height)
1487{
1488 unsigned int MetaRequestHeight;
1489 unsigned int MetaRequestWidth;
1490 unsigned int MetaSurfWidth;
1491 unsigned int MetaSurfHeight;
1492 unsigned int MPDEBytesFrame;
1493 unsigned int MetaPTEBytesFrame;
1494 unsigned int DCCMetaSurfaceBytes;
1495
1496 unsigned int MacroTileSizeBytes;
1497 unsigned int MacroTileHeight;
1498 unsigned int DPDE0BytesFrame;
1499 unsigned int ExtraDPDEBytesFrame;
1500 unsigned int PDEAndMetaPTEBytesFrame;
1501
1502 if (DCCEnable == true) {
1503 MetaRequestHeight = 8 * BlockHeight256Bytes;
1504 MetaRequestWidth = 8 * BlockWidth256Bytes;
1505 if (ScanDirection == dm_horz) {
1506 *meta_row_height = MetaRequestHeight;
1507 MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
1508 + MetaRequestWidth;
1509 *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
1510 } else {
1511 *meta_row_height = MetaRequestWidth;
1512 MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
1513 + MetaRequestHeight;
1514 *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
1515 }
1516 if (ScanDirection == dm_horz) {
1517 DCCMetaSurfaceBytes = DCCMetaPitch
1518 * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
1519 + 64 * BlockHeight256Bytes) * BytePerPixel
1520 / 256;
1521 } else {
1522 DCCMetaSurfaceBytes = DCCMetaPitch
1523 * (dml_ceil(
1524 (double) ViewportHeight - 1,
1525 64 * BlockHeight256Bytes)
1526 + 64 * BlockHeight256Bytes) * BytePerPixel
1527 / 256;
1528 }
1529 if (VirtualMemoryEnable == true) {
1530 MetaPTEBytesFrame = (dml_ceil(
1531 (double) (DCCMetaSurfaceBytes - VMMPageSize)
1532 / (8 * VMMPageSize),
1533 1) + 1) * 64;
1534 MPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 1);
1535 } else {
1536 MetaPTEBytesFrame = 0;
1537 MPDEBytesFrame = 0;
1538 }
1539 } else {
1540 MetaPTEBytesFrame = 0;
1541 MPDEBytesFrame = 0;
1542 *MetaRowByte = 0;
1543 }
1544
1545 if (SurfaceTiling == dm_sw_linear) {
1546 MacroTileSizeBytes = 256;
1547 MacroTileHeight = 1;
1548 } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
1549 || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
1550 MacroTileSizeBytes = 4096;
1551 MacroTileHeight = 4 * BlockHeight256Bytes;
1552 } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
1553 || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
1554 || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
1555 || SurfaceTiling == dm_sw_64kb_r_x) {
1556 MacroTileSizeBytes = 65536;
1557 MacroTileHeight = 16 * BlockHeight256Bytes;
1558 } else {
1559 MacroTileSizeBytes = 262144;
1560 MacroTileHeight = 32 * BlockHeight256Bytes;
1561 }
1562 *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
1563
1564 if (VirtualMemoryEnable == true && mode_lib->vba.MaxPageTableLevels > 1) {
1565 if (ScanDirection == dm_horz) {
1566 DPDE0BytesFrame =
1567 64
1568 * (dml_ceil(
1569 ((Pitch
1570 * (dml_ceil(
1571 ViewportHeight
1572 - 1,
1573 MacroTileHeight)
1574 + MacroTileHeight)
1575 * BytePerPixel)
1576 - MacroTileSizeBytes)
1577 / (8
1578 * 2097152),
1579 1) + 1);
1580 } else {
1581 DPDE0BytesFrame =
1582 64
1583 * (dml_ceil(
1584 ((Pitch
1585 * (dml_ceil(
1586 (double) SwathWidth
1587 - 1,
1588 MacroTileHeight)
1589 + MacroTileHeight)
1590 * BytePerPixel)
1591 - MacroTileSizeBytes)
1592 / (8
1593 * 2097152),
1594 1) + 1);
1595 }
1596 ExtraDPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 2);
1597 } else {
1598 DPDE0BytesFrame = 0;
1599 ExtraDPDEBytesFrame = 0;
1600 }
1601
1602 PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
1603 + ExtraDPDEBytesFrame;
1604
1605 if (VirtualMemoryEnable == true) {
1606 unsigned int PTERequestSize;
1607 unsigned int PixelPTEReqHeight;
1608 unsigned int PixelPTEReqWidth;
1609 double FractionOfPTEReturnDrop;
1610 unsigned int EffectivePDEProcessingBufIn64KBReqs;
1611
1612 if (SurfaceTiling == dm_sw_linear) {
1613 PixelPTEReqHeight = 1;
1614 PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
1615 PTERequestSize = 64;
1616 FractionOfPTEReturnDrop = 0;
1617 } else if (MacroTileSizeBytes == 4096) {
1618 PixelPTEReqHeight = MacroTileHeight;
1619 PixelPTEReqWidth = 8 * *MacroTileWidth;
1620 PTERequestSize = 64;
1621 if (ScanDirection == dm_horz)
1622 FractionOfPTEReturnDrop = 0;
1623 else
1624 FractionOfPTEReturnDrop = 7 / 8;
1625 } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
1626 PixelPTEReqHeight = 16 * BlockHeight256Bytes;
1627 PixelPTEReqWidth = 16 * BlockWidth256Bytes;
1628 PTERequestSize = 128;
1629 FractionOfPTEReturnDrop = 0;
1630 } else {
1631 PixelPTEReqHeight = MacroTileHeight;
1632 PixelPTEReqWidth = 8 * *MacroTileWidth;
1633 PTERequestSize = 64;
1634 FractionOfPTEReturnDrop = 0;
1635 }
1636
1637 if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
1638 EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
1639 else
1640 EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
1641
1642 if (SurfaceTiling == dm_sw_linear) {
1643 *dpte_row_height =
1644 dml_min(
1645 128,
1646 1
1647 << (unsigned int) dml_floor(
1648 dml_log2(
1649 dml_min(
1650 (double) PTEBufferSizeInRequests
1651 * PixelPTEReqWidth,
1652 EffectivePDEProcessingBufIn64KBReqs
1653 * 65536.0
1654 / BytePerPixel)
1655 / Pitch),
1656 1));
1657 *PixelPTEBytesPerRow = PTERequestSize
1658 * (dml_ceil(
1659 (double) (Pitch * *dpte_row_height - 1)
1660 / PixelPTEReqWidth,
1661 1) + 1);
1662 } else if (ScanDirection == dm_horz) {
1663 *dpte_row_height = PixelPTEReqHeight;
1664 *PixelPTEBytesPerRow = PTERequestSize
1665 * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
1666 + 1);
1667 } else {
1668 *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
1669 *PixelPTEBytesPerRow = PTERequestSize
1670 * (dml_ceil(
1671 ((double) SwathWidth - 1)
1672 / PixelPTEReqHeight,
1673 1) + 1);
1674 }
1675 if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
1676 <= 64 * PTEBufferSizeInRequests) {
1677 *PTEBufferSizeNotExceeded = true;
1678 } else {
1679 *PTEBufferSizeNotExceeded = false;
1680 }
1681 } else {
1682 *PixelPTEBytesPerRow = 0;
1683 *PTEBufferSizeNotExceeded = true;
1684 }
1685
1686 return PDEAndMetaPTEBytesFrame;
1687}
1688
1689static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
1690 struct display_mode_lib *mode_lib)
1691{
1692 unsigned int j, k;
1693
1694 mode_lib->vba.WritebackDISPCLK = 0.0;
1695 mode_lib->vba.DISPCLKWithRamping = 0;
1696 mode_lib->vba.DISPCLKWithoutRamping = 0;
1697 mode_lib->vba.GlobalDPPCLK = 0.0;
1698
1699 // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
1700 //
1701 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1702 if (mode_lib->vba.WritebackEnable[k]) {
1703 mode_lib->vba.WritebackDISPCLK =
1704 dml_max(
1705 mode_lib->vba.WritebackDISPCLK,
1706 CalculateWriteBackDISPCLK(
1707 mode_lib->vba.WritebackPixelFormat[k],
1708 mode_lib->vba.PixelClock[k],
1709 mode_lib->vba.WritebackHRatio[k],
1710 mode_lib->vba.WritebackVRatio[k],
1711 mode_lib->vba.WritebackLumaHTaps[k],
1712 mode_lib->vba.WritebackLumaVTaps[k],
1713 mode_lib->vba.WritebackChromaHTaps[k],
1714 mode_lib->vba.WritebackChromaVTaps[k],
1715 mode_lib->vba.WritebackDestinationWidth[k],
1716 mode_lib->vba.HTotal[k],
1717 mode_lib->vba.WritebackChromaLineBufferWidth));
1718 }
1719 }
1720
1721 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1722 if (mode_lib->vba.HRatio[k] > 1) {
1723 mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
1724 mode_lib->vba.MaxDCHUBToPSCLThroughput,
1725 mode_lib->vba.MaxPSCLToLBThroughput
1726 * mode_lib->vba.HRatio[k]
1727 / dml_ceil(
1728 mode_lib->vba.htaps[k]
1729 / 6.0,
1730 1));
1731 } else {
1732 mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
1733 mode_lib->vba.MaxDCHUBToPSCLThroughput,
1734 mode_lib->vba.MaxPSCLToLBThroughput);
1735 }
1736
1737 mode_lib->vba.DPPCLKUsingSingleDPPLuma =
1738 mode_lib->vba.PixelClock[k]
1739 * dml_max(
1740 mode_lib->vba.vtaps[k] / 6.0
1741 * dml_min(
1742 1.0,
1743 mode_lib->vba.HRatio[k]),
1744 dml_max(
1745 mode_lib->vba.HRatio[k]
1746 * mode_lib->vba.VRatio[k]
1747 / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
1748 1.0));
1749
1750 if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
1751 && mode_lib->vba.DPPCLKUsingSingleDPPLuma
1752 < 2 * mode_lib->vba.PixelClock[k]) {
1753 mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
1754 }
1755
1756 if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
1757 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
1758 mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
1759 mode_lib->vba.DPPCLKUsingSingleDPP[k] =
1760 mode_lib->vba.DPPCLKUsingSingleDPPLuma;
1761 } else {
1762 if (mode_lib->vba.HRatio[k] > 1) {
1763 mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
1764 dml_min(
1765 mode_lib->vba.MaxDCHUBToPSCLThroughput,
1766 mode_lib->vba.MaxPSCLToLBThroughput
1767 * mode_lib->vba.HRatio[k]
1768 / 2
1769 / dml_ceil(
1770 mode_lib->vba.HTAPsChroma[k]
1771 / 6.0,
1772 1.0));
1773 } else {
1774 mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
1775 mode_lib->vba.MaxDCHUBToPSCLThroughput,
1776 mode_lib->vba.MaxPSCLToLBThroughput);
1777 }
1778 mode_lib->vba.DPPCLKUsingSingleDPPChroma =
1779 mode_lib->vba.PixelClock[k]
1780 * dml_max(
1781 mode_lib->vba.VTAPsChroma[k]
1782 / 6.0
1783 * dml_min(
1784 1.0,
1785 mode_lib->vba.HRatio[k]
1786 / 2),
1787 dml_max(
1788 mode_lib->vba.HRatio[k]
1789 * mode_lib->vba.VRatio[k]
1790 / 4
1791 / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
1792 1.0));
1793
1794 if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
1795 && mode_lib->vba.DPPCLKUsingSingleDPPChroma
1796 < 2 * mode_lib->vba.PixelClock[k]) {
1797 mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
1798 * mode_lib->vba.PixelClock[k];
1799 }
1800
1801 mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
1802 mode_lib->vba.DPPCLKUsingSingleDPPLuma,
1803 mode_lib->vba.DPPCLKUsingSingleDPPChroma);
1804 }
1805 }
1806
1807 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1808 if (mode_lib->vba.BlendingAndTiming[k] != k)
1809 continue;
1810 if (mode_lib->vba.ODMCombineEnabled[k]) {
1811 mode_lib->vba.DISPCLKWithRamping =
1812 dml_max(
1813 mode_lib->vba.DISPCLKWithRamping,
1814 mode_lib->vba.PixelClock[k] / 2
1815 * (1
1816 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
1817 / 100)
1818 * (1
1819 + mode_lib->vba.DISPCLKRampingMargin
1820 / 100));
1821 mode_lib->vba.DISPCLKWithoutRamping =
1822 dml_max(
1823 mode_lib->vba.DISPCLKWithoutRamping,
1824 mode_lib->vba.PixelClock[k] / 2
1825 * (1
1826 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
1827 / 100));
1828 } else if (!mode_lib->vba.ODMCombineEnabled[k]) {
1829 mode_lib->vba.DISPCLKWithRamping =
1830 dml_max(
1831 mode_lib->vba.DISPCLKWithRamping,
1832 mode_lib->vba.PixelClock[k]
1833 * (1
1834 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
1835 / 100)
1836 * (1
1837 + mode_lib->vba.DISPCLKRampingMargin
1838 / 100));
1839 mode_lib->vba.DISPCLKWithoutRamping =
1840 dml_max(
1841 mode_lib->vba.DISPCLKWithoutRamping,
1842 mode_lib->vba.PixelClock[k]
1843 * (1
1844 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
1845 / 100));
1846 }
1847 }
1848
1849 mode_lib->vba.DISPCLKWithRamping = dml_max(
1850 mode_lib->vba.DISPCLKWithRamping,
1851 mode_lib->vba.WritebackDISPCLK);
1852 mode_lib->vba.DISPCLKWithoutRamping = dml_max(
1853 mode_lib->vba.DISPCLKWithoutRamping,
1854 mode_lib->vba.WritebackDISPCLK);
1855
1856 ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
1857 mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
1858 mode_lib->vba.DISPCLKWithRamping,
1859 mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
1860 mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
1861 mode_lib->vba.DISPCLKWithoutRamping,
1862 mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
1863 mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
1864 mode_lib->vba.soc.clock_limits[NumberOfStates - 1].dispclk_mhz,
1865 mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
1866 if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
1867 > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
1868 mode_lib->vba.DISPCLK_calculated =
1869 mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
1870 } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
1871 > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
1872 mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
1873 } else {
1874 mode_lib->vba.DISPCLK_calculated =
1875 mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
1876 }
1877 DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
1878
1879 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1880 mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
1881 / mode_lib->vba.DPPPerPlane[k]
1882 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
1883 mode_lib->vba.GlobalDPPCLK = dml_max(
1884 mode_lib->vba.GlobalDPPCLK,
1885 mode_lib->vba.DPPCLK_calculated[k]);
1886 }
1887 mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
1888 mode_lib->vba.GlobalDPPCLK,
1889 mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
1890 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1891 mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
1892 * dml_ceil(
1893 mode_lib->vba.DPPCLK_calculated[k] * 255
1894 / mode_lib->vba.GlobalDPPCLK,
1895 1);
1896 DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
1897 }
1898
1899 // Urgent Watermark
1900 mode_lib->vba.DCCEnabledAnyPlane = false;
1901 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
1902 if (mode_lib->vba.DCCEnable[k])
1903 mode_lib->vba.DCCEnabledAnyPlane = true;
1904
1905 mode_lib->vba.ReturnBandwidthToDCN = dml_min(
1906 mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
1907 mode_lib->vba.FabricAndDRAMBandwidth * 1000)
1908 * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency / 100;
1909
1910 mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
1911 mode_lib->vba.ReturnBW = adjust_ReturnBW(
1912 mode_lib,
1913 mode_lib->vba.ReturnBW,
1914 mode_lib->vba.DCCEnabledAnyPlane,
1915 mode_lib->vba.ReturnBandwidthToDCN);
1916
1917 // Let's do this calculation again??
1918 mode_lib->vba.ReturnBandwidthToDCN = dml_min(
1919 mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
1920 mode_lib->vba.FabricAndDRAMBandwidth * 1000);
1921 mode_lib->vba.ReturnBW = adjust_ReturnBW(
1922 mode_lib,
1923 mode_lib->vba.ReturnBW,
1924 mode_lib->vba.DCCEnabledAnyPlane,
1925 mode_lib->vba.ReturnBandwidthToDCN);
1926
1927 DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
1928 DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
1929 DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
1930
1931 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1932 bool MainPlaneDoesODMCombine = false;
1933
1934 if (mode_lib->vba.SourceScan[k] == dm_horz)
1935 mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
1936 else
1937 mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
1938
1939 if (mode_lib->vba.ODMCombineEnabled[k] == true)
1940 MainPlaneDoesODMCombine = true;
1941 for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
1942 if (mode_lib->vba.BlendingAndTiming[k] == j
1943 && mode_lib->vba.ODMCombineEnabled[j] == true)
1944 MainPlaneDoesODMCombine = true;
1945
1946 if (MainPlaneDoesODMCombine == true)
1947 mode_lib->vba.SwathWidthY[k] = dml_min(
1948 (double) mode_lib->vba.SwathWidthSingleDPPY[k],
1949 dml_round(
1950 mode_lib->vba.HActive[k] / 2.0
1951 * mode_lib->vba.HRatio[k]));
1952 else
1953 mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
1954 / mode_lib->vba.DPPPerPlane[k];
1955 }
1956
1957 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1958 if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
1959 mode_lib->vba.BytePerPixelDETY[k] = 8;
1960 mode_lib->vba.BytePerPixelDETC[k] = 0;
1961 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
1962 mode_lib->vba.BytePerPixelDETY[k] = 4;
1963 mode_lib->vba.BytePerPixelDETC[k] = 0;
1964 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
1965 mode_lib->vba.BytePerPixelDETY[k] = 2;
1966 mode_lib->vba.BytePerPixelDETC[k] = 0;
1967 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
1968 mode_lib->vba.BytePerPixelDETY[k] = 1;
1969 mode_lib->vba.BytePerPixelDETC[k] = 0;
1970 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
1971 mode_lib->vba.BytePerPixelDETY[k] = 1;
1972 mode_lib->vba.BytePerPixelDETC[k] = 2;
1973 } else { // dm_420_10
1974 mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
1975 mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
1976 }
1977 }
1978
1979 mode_lib->vba.TotalDataReadBandwidth = 0.0;
1980 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
1981 mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
1982 * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
1983 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
1984 * mode_lib->vba.VRatio[k];
1985 mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
1986 / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
1987 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
1988 * mode_lib->vba.VRatio[k] / 2;
1989 DTRACE(
1990 " read_bw[%i] = %fBps",
1991 k,
1992 mode_lib->vba.ReadBandwidthPlaneLuma[k]
1993 + mode_lib->vba.ReadBandwidthPlaneChroma[k]);
1994 mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
1995 + mode_lib->vba.ReadBandwidthPlaneChroma[k];
1996 }
1997
1998 mode_lib->vba.TotalDCCActiveDPP = 0;
1999 mode_lib->vba.TotalActiveDPP = 0;
2000 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2001 mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
2002 + mode_lib->vba.DPPPerPlane[k];
2003 if (mode_lib->vba.DCCEnable[k])
2004 mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
2005 + mode_lib->vba.DPPPerPlane[k];
2006 }
2007
2008 mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
2009 (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
2010 + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
2011 * mode_lib->vba.NumberOfChannels
2012 / mode_lib->vba.ReturnBW;
2013
2014 mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
2015 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2016 double DataFabricLineDeliveryTimeLuma, DataFabricLineDeliveryTimeChroma;
2017
2018 if (mode_lib->vba.VRatio[k] <= 1.0)
2019 mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
2020 (double) mode_lib->vba.SwathWidthY[k]
2021 * mode_lib->vba.DPPPerPlane[k]
2022 / mode_lib->vba.HRatio[k]
2023 / mode_lib->vba.PixelClock[k];
2024 else
2025 mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
2026 (double) mode_lib->vba.SwathWidthY[k]
2027 / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
2028 / mode_lib->vba.DPPCLK[k];
2029
2030 DataFabricLineDeliveryTimeLuma = mode_lib->vba.SwathWidthSingleDPPY[k]
2031 * mode_lib->vba.SwathHeightY[k]
2032 * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
2033 / (mode_lib->vba.ReturnBW * mode_lib->vba.ReadBandwidthPlaneLuma[k]
2034 / mode_lib->vba.TotalDataReadBandwidth);
2035 mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(
2036 mode_lib->vba.LastPixelOfLineExtraWatermark,
2037 DataFabricLineDeliveryTimeLuma
2038 - mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]);
2039
2040 if (mode_lib->vba.BytePerPixelDETC[k] == 0)
2041 mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
2042 else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
2043 mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
2044 mode_lib->vba.SwathWidthY[k] / 2.0
2045 * mode_lib->vba.DPPPerPlane[k]
2046 / (mode_lib->vba.HRatio[k] / 2.0)
2047 / mode_lib->vba.PixelClock[k];
2048 else
2049 mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
2050 mode_lib->vba.SwathWidthY[k] / 2.0
2051 / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
2052 / mode_lib->vba.DPPCLK[k];
2053
2054 DataFabricLineDeliveryTimeChroma = mode_lib->vba.SwathWidthSingleDPPY[k] / 2.0
2055 * mode_lib->vba.SwathHeightC[k]
2056 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
2057 / (mode_lib->vba.ReturnBW
2058 * mode_lib->vba.ReadBandwidthPlaneChroma[k]
2059 / mode_lib->vba.TotalDataReadBandwidth);
2060 mode_lib->vba.LastPixelOfLineExtraWatermark =
2061 dml_max(
2062 mode_lib->vba.LastPixelOfLineExtraWatermark,
2063 DataFabricLineDeliveryTimeChroma
2064 - mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
2065 }
2066
2067 mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
2068 + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
2069 + mode_lib->vba.TotalDCCActiveDPP
2070 * mode_lib->vba.MetaChunkSize) * 1024.0
2071 / mode_lib->vba.ReturnBW;
2072
2073 if (mode_lib->vba.VirtualMemoryEnable)
2074 mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
2075 * mode_lib->vba.PTEChunkSize * 1024.0 / mode_lib->vba.ReturnBW;
2076
2077 mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatency
2078 + mode_lib->vba.LastPixelOfLineExtraWatermark
2079 + mode_lib->vba.UrgentExtraLatency;
2080
2081 DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
2082 DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
2083
2084 mode_lib->vba.MemoryTripWatermark = mode_lib->vba.UrgentLatency;
2085
2086 mode_lib->vba.TotalActiveWriteback = 0;
2087 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2088 if (mode_lib->vba.WritebackEnable[k])
2089 mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
2090 }
2091
2092 if (mode_lib->vba.TotalActiveWriteback <= 1)
2093 mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
2094 else
2095 mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
2096 + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
2097 / mode_lib->vba.SOCCLK;
2098
2099 DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
2100
2101 // NB P-State/DRAM Clock Change Watermark
2102 mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
2103 + mode_lib->vba.UrgentWatermark;
2104
2105 DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
2106
2107 DTRACE(" calculating wb pstate watermark");
2108 DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
2109 DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
2110
2111 if (mode_lib->vba.TotalActiveWriteback <= 1)
2112 mode_lib->vba.WritebackDRAMClockChangeWatermark =
2113 mode_lib->vba.DRAMClockChangeLatency
2114 + mode_lib->vba.WritebackLatency;
2115 else
2116 mode_lib->vba.WritebackDRAMClockChangeWatermark =
2117 mode_lib->vba.DRAMClockChangeLatency
2118 + mode_lib->vba.WritebackLatency
2119 + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
2120 / mode_lib->vba.SOCCLK;
2121
2122 DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
2123
2124 // Stutter Efficiency
2125 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2126 mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
2127 / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
2128 mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
2129 mode_lib->vba.LinesInDETY[k],
2130 mode_lib->vba.SwathHeightY[k]);
2131 mode_lib->vba.FullDETBufferingTimeY[k] =
2132 mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
2133 * (mode_lib->vba.HTotal[k]
2134 / mode_lib->vba.PixelClock[k])
2135 / mode_lib->vba.VRatio[k];
2136 if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
2137 mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
2138 / mode_lib->vba.BytePerPixelDETC[k]
2139 / (mode_lib->vba.SwathWidthY[k] / 2);
2140 mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
2141 mode_lib->vba.LinesInDETC[k],
2142 mode_lib->vba.SwathHeightC[k]);
2143 mode_lib->vba.FullDETBufferingTimeC[k] =
2144 mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
2145 * (mode_lib->vba.HTotal[k]
2146 / mode_lib->vba.PixelClock[k])
2147 / (mode_lib->vba.VRatio[k] / 2);
2148 } else {
2149 mode_lib->vba.LinesInDETC[k] = 0;
2150 mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
2151 mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
2152 }
2153 }
2154
2155 mode_lib->vba.MinFullDETBufferingTime = 999999.0;
2156 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2157 if (mode_lib->vba.FullDETBufferingTimeY[k]
2158 < mode_lib->vba.MinFullDETBufferingTime) {
2159 mode_lib->vba.MinFullDETBufferingTime =
2160 mode_lib->vba.FullDETBufferingTimeY[k];
2161 mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
2162 (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
2163 / mode_lib->vba.PixelClock[k];
2164 }
2165 if (mode_lib->vba.FullDETBufferingTimeC[k]
2166 < mode_lib->vba.MinFullDETBufferingTime) {
2167 mode_lib->vba.MinFullDETBufferingTime =
2168 mode_lib->vba.FullDETBufferingTimeC[k];
2169 mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
2170 (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
2171 / mode_lib->vba.PixelClock[k];
2172 }
2173 }
2174
2175 mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
2176 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2177 if (mode_lib->vba.DCCEnable[k]) {
2178 mode_lib->vba.AverageReadBandwidthGBytePerSecond =
2179 mode_lib->vba.AverageReadBandwidthGBytePerSecond
2180 + mode_lib->vba.ReadBandwidthPlaneLuma[k]
2181 / mode_lib->vba.DCCRate[k]
2182 / 1000
2183 + mode_lib->vba.ReadBandwidthPlaneChroma[k]
2184 / mode_lib->vba.DCCRate[k]
2185 / 1000;
2186 } else {
2187 mode_lib->vba.AverageReadBandwidthGBytePerSecond =
2188 mode_lib->vba.AverageReadBandwidthGBytePerSecond
2189 + mode_lib->vba.ReadBandwidthPlaneLuma[k]
2190 / 1000
2191 + mode_lib->vba.ReadBandwidthPlaneChroma[k]
2192 / 1000;
2193 }
2194 if (mode_lib->vba.DCCEnable[k]) {
2195 mode_lib->vba.AverageReadBandwidthGBytePerSecond =
2196 mode_lib->vba.AverageReadBandwidthGBytePerSecond
2197 + mode_lib->vba.ReadBandwidthPlaneLuma[k]
2198 / 1000 / 256
2199 + mode_lib->vba.ReadBandwidthPlaneChroma[k]
2200 / 1000 / 256;
2201 }
2202 if (mode_lib->vba.VirtualMemoryEnable) {
2203 mode_lib->vba.AverageReadBandwidthGBytePerSecond =
2204 mode_lib->vba.AverageReadBandwidthGBytePerSecond
2205 + mode_lib->vba.ReadBandwidthPlaneLuma[k]
2206 / 1000 / 512
2207 + mode_lib->vba.ReadBandwidthPlaneChroma[k]
2208 / 1000 / 512;
2209 }
2210 }
2211
2212 mode_lib->vba.PartOfBurstThatFitsInROB =
2213 dml_min(
2214 mode_lib->vba.MinFullDETBufferingTime
2215 * mode_lib->vba.TotalDataReadBandwidth,
2216 mode_lib->vba.ROBBufferSizeInKByte * 1024
2217 * mode_lib->vba.TotalDataReadBandwidth
2218 / (mode_lib->vba.AverageReadBandwidthGBytePerSecond
2219 * 1000));
2220 mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
2221 * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
2222 / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
2223 + (mode_lib->vba.MinFullDETBufferingTime
2224 * mode_lib->vba.TotalDataReadBandwidth
2225 - mode_lib->vba.PartOfBurstThatFitsInROB)
2226 / (mode_lib->vba.DCFCLK * 64);
2227 if (mode_lib->vba.TotalActiveWriteback == 0) {
2228 mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
2229 - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
2230 / mode_lib->vba.MinFullDETBufferingTime) * 100;
2231 } else {
2232 mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
2233 }
2234
2235 mode_lib->vba.SmallestVBlank = 999999;
2236 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2237 if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
2238 mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
2239 - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
2240 / mode_lib->vba.PixelClock[k];
2241 } else {
2242 mode_lib->vba.VBlankTime = 0;
2243 }
2244 mode_lib->vba.SmallestVBlank = dml_min(
2245 mode_lib->vba.SmallestVBlank,
2246 mode_lib->vba.VBlankTime);
2247 }
2248
2249 mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
2250 * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
2251 - mode_lib->vba.SmallestVBlank)
2252 + mode_lib->vba.SmallestVBlank)
2253 / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
2254
2255 // dml_ml->vba.DCFCLK Deep Sleep
2256 mode_lib->vba.DCFClkDeepSleep = 8.0;
2257
2258 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
2259 if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
2260 mode_lib->vba.DCFCLKDeepSleepPerPlane =
2261 dml_max(
2262 1.1 * mode_lib->vba.SwathWidthY[k]
2263 * dml_ceil(
2264 mode_lib->vba.BytePerPixelDETY[k],
2265 1) / 32
2266 / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
2267 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
2268 * dml_ceil(
2269 mode_lib->vba.BytePerPixelDETC[k],
2270 2) / 32
2271 / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
2272 } else
2273 mode_lib->vba.DCFCLKDeepSleepPerPlane = 1.1 * mode_lib->vba.SwathWidthY[k]
2274 * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
2275 / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
2276 mode_lib->vba.DCFCLKDeepSleepPerPlane = dml_max(
2277 mode_lib->vba.DCFCLKDeepSleepPerPlane,
2278 mode_lib->vba.PixelClock[k] / 16.0);
2279 mode_lib->vba.DCFClkDeepSleep = dml_max(
2280 mode_lib->vba.DCFClkDeepSleep,
2281 mode_lib->vba.DCFCLKDeepSleepPerPlane);
2282
2283 DTRACE(
2284 " dcfclk_deepsleep_per_plane[%i] = %fMHz",
2285 k,
2286 mode_lib->vba.DCFCLKDeepSleepPerPlane);
2287 }
2288
2289 DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFClkDeepSleep);
2290
2291 // Stutter Watermark
2292 mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
2293 + mode_lib->vba.LastPixelOfLineExtraWatermark
2294 + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFClkDeepSleep;
2295 mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
2296 + mode_lib->vba.LastPixelOfLineExtraWatermark
2297 + mode_lib->vba.UrgentExtraLatency;
2298
2299 DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
2300 DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
2301
2302 // Urgent Latency Supported
2303 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2304 mode_lib->vba.EffectiveDETPlusLBLinesLuma =
2305 dml_floor(
2306 mode_lib->vba.LinesInDETY[k]
2307 + dml_min(
2308 mode_lib->vba.LinesInDETY[k]
2309 * mode_lib->vba.DPPCLK[k]
2310 * mode_lib->vba.BytePerPixelDETY[k]
2311 * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
2312 / (mode_lib->vba.ReturnBW
2313 / mode_lib->vba.DPPPerPlane[k]),
2314 (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
2315 mode_lib->vba.SwathHeightY[k]);
2316
2317 mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
2318 * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
2319 / mode_lib->vba.VRatio[k]
2320 - mode_lib->vba.EffectiveDETPlusLBLinesLuma
2321 * mode_lib->vba.SwathWidthY[k]
2322 * mode_lib->vba.BytePerPixelDETY[k]
2323 / (mode_lib->vba.ReturnBW
2324 / mode_lib->vba.DPPPerPlane[k]);
2325
2326 if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
2327 mode_lib->vba.EffectiveDETPlusLBLinesChroma =
2328 dml_floor(
2329 mode_lib->vba.LinesInDETC[k]
2330 + dml_min(
2331 mode_lib->vba.LinesInDETC[k]
2332 * mode_lib->vba.DPPCLK[k]
2333 * mode_lib->vba.BytePerPixelDETC[k]
2334 * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
2335 / (mode_lib->vba.ReturnBW
2336 / mode_lib->vba.DPPPerPlane[k]),
2337 (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
2338 mode_lib->vba.SwathHeightC[k]);
2339 mode_lib->vba.UrgentLatencySupportUsChroma =
2340 mode_lib->vba.EffectiveDETPlusLBLinesChroma
2341 * (mode_lib->vba.HTotal[k]
2342 / mode_lib->vba.PixelClock[k])
2343 / (mode_lib->vba.VRatio[k] / 2)
2344 - mode_lib->vba.EffectiveDETPlusLBLinesChroma
2345 * (mode_lib->vba.SwathWidthY[k]
2346 / 2)
2347 * mode_lib->vba.BytePerPixelDETC[k]
2348 / (mode_lib->vba.ReturnBW
2349 / mode_lib->vba.DPPPerPlane[k]);
2350 mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
2351 mode_lib->vba.UrgentLatencySupportUsLuma,
2352 mode_lib->vba.UrgentLatencySupportUsChroma);
2353 } else {
2354 mode_lib->vba.UrgentLatencySupportUs[k] =
2355 mode_lib->vba.UrgentLatencySupportUsLuma;
2356 }
2357 }
2358
2359 mode_lib->vba.MinUrgentLatencySupportUs = 999999;
2360 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2361 mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
2362 mode_lib->vba.MinUrgentLatencySupportUs,
2363 mode_lib->vba.UrgentLatencySupportUs[k]);
2364 }
2365
2366 // Non-Urgent Latency Tolerance
2367 mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
2368 - mode_lib->vba.UrgentWatermark;
2369
2370 // DSCCLK
2371 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2372 if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
2373 mode_lib->vba.DSCCLK_calculated[k] = 0.0;
2374 } else {
2375 if (mode_lib->vba.OutputFormat[k] == dm_420
2376 || mode_lib->vba.OutputFormat[k] == dm_n422)
2377 mode_lib->vba.DSCFormatFactor = 2;
2378 else
2379 mode_lib->vba.DSCFormatFactor = 1;
2380 if (mode_lib->vba.ODMCombineEnabled[k])
2381 mode_lib->vba.DSCCLK_calculated[k] =
2382 mode_lib->vba.PixelClockBackEnd[k] / 6
2383 / mode_lib->vba.DSCFormatFactor
2384 / (1
2385 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
2386 / 100);
2387 else
2388 mode_lib->vba.DSCCLK_calculated[k] =
2389 mode_lib->vba.PixelClockBackEnd[k] / 3
2390 / mode_lib->vba.DSCFormatFactor
2391 / (1
2392 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
2393 / 100);
2394 }
2395 }
2396
2397 // DSC Delay
2398 // TODO
2399 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2400 double bpp = mode_lib->vba.OutputBpp[k];
2401 unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
2402
2403 if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
2404 if (!mode_lib->vba.ODMCombineEnabled[k]) {
2405 mode_lib->vba.DSCDelay[k] =
2406 dscceComputeDelay(
2407 mode_lib->vba.DSCInputBitPerComponent[k],
2408 bpp,
2409 dml_ceil(
2410 (double) mode_lib->vba.HActive[k]
2411 / mode_lib->vba.NumberOfDSCSlices[k],
2412 1),
2413 slices,
2414 mode_lib->vba.OutputFormat[k])
2415 + dscComputeDelay(
2416 mode_lib->vba.OutputFormat[k]);
2417 } else {
2418 mode_lib->vba.DSCDelay[k] =
2419 2
2420 * (dscceComputeDelay(
2421 mode_lib->vba.DSCInputBitPerComponent[k],
2422 bpp,
2423 dml_ceil(
2424 (double) mode_lib->vba.HActive[k]
2425 / mode_lib->vba.NumberOfDSCSlices[k],
2426 1),
2427 slices / 2.0,
2428 mode_lib->vba.OutputFormat[k])
2429 + dscComputeDelay(
2430 mode_lib->vba.OutputFormat[k]));
2431 }
2432 mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
2433 * mode_lib->vba.PixelClock[k]
2434 / mode_lib->vba.PixelClockBackEnd[k];
2435 } else {
2436 mode_lib->vba.DSCDelay[k] = 0;
2437 }
2438 }
2439
2440 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
2441 for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
2442 if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
2443 && mode_lib->vba.DSCEnabled[j])
2444 mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
2445
2446 // Prefetch
2447 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2448 unsigned int PDEAndMetaPTEBytesFrameY;
2449 unsigned int PixelPTEBytesPerRowY;
2450 unsigned int MetaRowByteY;
2451 unsigned int MetaRowByteC;
2452 unsigned int PDEAndMetaPTEBytesFrameC;
2453 unsigned int PixelPTEBytesPerRowC;
2454
2455 Calculate256BBlockSizes(
2456 mode_lib->vba.SourcePixelFormat[k],
2457 mode_lib->vba.SurfaceTiling[k],
2458 dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
2459 dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
2460 &mode_lib->vba.BlockHeight256BytesY[k],
2461 &mode_lib->vba.BlockHeight256BytesC[k],
2462 &mode_lib->vba.BlockWidth256BytesY[k],
2463 &mode_lib->vba.BlockWidth256BytesC[k]);
2464 PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
2465 mode_lib,
2466 mode_lib->vba.DCCEnable[k],
2467 mode_lib->vba.BlockHeight256BytesY[k],
2468 mode_lib->vba.BlockWidth256BytesY[k],
2469 mode_lib->vba.SourcePixelFormat[k],
2470 mode_lib->vba.SurfaceTiling[k],
2471 dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
2472 mode_lib->vba.SourceScan[k],
2473 mode_lib->vba.ViewportWidth[k],
2474 mode_lib->vba.ViewportHeight[k],
2475 mode_lib->vba.SwathWidthY[k],
2476 mode_lib->vba.VirtualMemoryEnable,
2477 mode_lib->vba.VMMPageSize,
2478 mode_lib->vba.PTEBufferSizeInRequests,
2479 mode_lib->vba.PDEProcessingBufIn64KBReqs,
2480 mode_lib->vba.PitchY[k],
2481 mode_lib->vba.DCCMetaPitchY[k],
2482 &mode_lib->vba.MacroTileWidthY[k],
2483 &MetaRowByteY,
2484 &PixelPTEBytesPerRowY,
2485 &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
2486 &mode_lib->vba.dpte_row_height[k],
2487 &mode_lib->vba.meta_row_height[k]);
2488 mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
2489 mode_lib,
2490 mode_lib->vba.VRatio[k],
2491 mode_lib->vba.vtaps[k],
2492 mode_lib->vba.Interlace[k],
2493 mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
2494 mode_lib->vba.SwathHeightY[k],
2495 mode_lib->vba.ViewportYStartY[k],
2496 &mode_lib->vba.VInitPreFillY[k],
2497 &mode_lib->vba.MaxNumSwathY[k]);
2498
2499 if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
2500 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
2501 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
2502 && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
2503 PDEAndMetaPTEBytesFrameC =
2504 CalculateVMAndRowBytes(
2505 mode_lib,
2506 mode_lib->vba.DCCEnable[k],
2507 mode_lib->vba.BlockHeight256BytesC[k],
2508 mode_lib->vba.BlockWidth256BytesC[k],
2509 mode_lib->vba.SourcePixelFormat[k],
2510 mode_lib->vba.SurfaceTiling[k],
2511 dml_ceil(
2512 mode_lib->vba.BytePerPixelDETC[k],
2513 2),
2514 mode_lib->vba.SourceScan[k],
2515 mode_lib->vba.ViewportWidth[k] / 2,
2516 mode_lib->vba.ViewportHeight[k] / 2,
2517 mode_lib->vba.SwathWidthY[k] / 2,
2518 mode_lib->vba.VirtualMemoryEnable,
2519 mode_lib->vba.VMMPageSize,
2520 mode_lib->vba.PTEBufferSizeInRequests,
2521 mode_lib->vba.PDEProcessingBufIn64KBReqs,
2522 mode_lib->vba.PitchC[k],
2523 0,
2524 &mode_lib->vba.MacroTileWidthC[k],
2525 &MetaRowByteC,
2526 &PixelPTEBytesPerRowC,
2527 &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
2528 &mode_lib->vba.dpte_row_height_chroma[k],
2529 &mode_lib->vba.meta_row_height_chroma[k]);
2530 mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
2531 mode_lib,
2532 mode_lib->vba.VRatio[k] / 2,
2533 mode_lib->vba.VTAPsChroma[k],
2534 mode_lib->vba.Interlace[k],
2535 mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
2536 mode_lib->vba.SwathHeightC[k],
2537 mode_lib->vba.ViewportYStartC[k],
2538 &mode_lib->vba.VInitPreFillC[k],
2539 &mode_lib->vba.MaxNumSwathC[k]);
2540 } else {
2541 PixelPTEBytesPerRowC = 0;
2542 PDEAndMetaPTEBytesFrameC = 0;
2543 MetaRowByteC = 0;
2544 mode_lib->vba.MaxNumSwathC[k] = 0;
2545 mode_lib->vba.PrefetchSourceLinesC[k] = 0;
2546 }
2547
2548 mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
2549 mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
2550 + PDEAndMetaPTEBytesFrameC;
2551 mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
2552
2553 CalculateActiveRowBandwidth(
2554 mode_lib->vba.VirtualMemoryEnable,
2555 mode_lib->vba.SourcePixelFormat[k],
2556 mode_lib->vba.VRatio[k],
2557 mode_lib->vba.DCCEnable[k],
2558 mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
2559 MetaRowByteY,
2560 MetaRowByteC,
2561 mode_lib->vba.meta_row_height[k],
2562 mode_lib->vba.meta_row_height_chroma[k],
2563 PixelPTEBytesPerRowY,
2564 PixelPTEBytesPerRowC,
2565 mode_lib->vba.dpte_row_height[k],
2566 mode_lib->vba.dpte_row_height_chroma[k],
2567 &mode_lib->vba.meta_row_bw[k],
2568 &mode_lib->vba.dpte_row_bw[k],
2569 &mode_lib->vba.qual_row_bw[k]);
2570 }
2571
2572 mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFClkDeepSleep;
2573
2574 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2575 if (mode_lib->vba.BlendingAndTiming[k] == k) {
2576 if (mode_lib->vba.WritebackEnable[k] == true) {
2577 mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
2578 mode_lib->vba.WritebackLatency
2579 + CalculateWriteBackDelay(
2580 mode_lib->vba.WritebackPixelFormat[k],
2581 mode_lib->vba.WritebackHRatio[k],
2582 mode_lib->vba.WritebackVRatio[k],
2583 mode_lib->vba.WritebackLumaHTaps[k],
2584 mode_lib->vba.WritebackLumaVTaps[k],
2585 mode_lib->vba.WritebackChromaHTaps[k],
2586 mode_lib->vba.WritebackChromaVTaps[k],
2587 mode_lib->vba.WritebackDestinationWidth[k])
2588 / mode_lib->vba.DISPCLK;
2589 } else
2590 mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
2591 for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
2592 if (mode_lib->vba.BlendingAndTiming[j] == k
2593 && mode_lib->vba.WritebackEnable[j] == true) {
2594 mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
2595 dml_max(
2596 mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
2597 mode_lib->vba.WritebackLatency
2598 + CalculateWriteBackDelay(
2599 mode_lib->vba.WritebackPixelFormat[j],
2600 mode_lib->vba.WritebackHRatio[j],
2601 mode_lib->vba.WritebackVRatio[j],
2602 mode_lib->vba.WritebackLumaHTaps[j],
2603 mode_lib->vba.WritebackLumaVTaps[j],
2604 mode_lib->vba.WritebackChromaHTaps[j],
2605 mode_lib->vba.WritebackChromaVTaps[j],
2606 mode_lib->vba.WritebackDestinationWidth[j])
2607 / mode_lib->vba.DISPCLK);
2608 }
2609 }
2610 }
2611 }
2612
2613 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
2614 for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
2615 if (mode_lib->vba.BlendingAndTiming[k] == j)
2616 mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
2617 mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
2618
2619 mode_lib->vba.VStartupLines = 13;
2620 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2621 mode_lib->vba.MaxVStartupLines[k] =
2622 mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
2623 - dml_max(
2624 1.0,
2625 dml_ceil(
2626 mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
2627 / (mode_lib->vba.HTotal[k]
2628 / mode_lib->vba.PixelClock[k]),
2629 1));
2630 }
2631
2632 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
2633 mode_lib->vba.MaximumMaxVStartupLines = dml_max(
2634 mode_lib->vba.MaximumMaxVStartupLines,
2635 mode_lib->vba.MaxVStartupLines[k]);
2636
2637 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2638 mode_lib->vba.cursor_bw[k] = 0.0;
2639 for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
2640 mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
2641 * mode_lib->vba.CursorBPP[k][j] / 8.0
2642 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
2643 * mode_lib->vba.VRatio[k];
2644 }
2645
2646 do {
2647 double MaxTotalRDBandwidth = 0;
2648 bool DestinationLineTimesForPrefetchLessThan2 = false;
2649 bool VRatioPrefetchMoreThan4 = false;
2650 bool prefetch_vm_bw_valid = true;
2651 bool prefetch_row_bw_valid = true;
2652 double TWait = CalculateTWait(
2653 mode_lib->vba.PrefetchMode,
2654 mode_lib->vba.DRAMClockChangeLatency,
2655 mode_lib->vba.UrgentLatency,
2656 mode_lib->vba.SREnterPlusExitTime);
2657
2658 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2659 if (mode_lib->vba.XFCEnabled[k] == true) {
2660 mode_lib->vba.XFCRemoteSurfaceFlipDelay =
2661 CalculateRemoteSurfaceFlipDelay(
2662 mode_lib,
2663 mode_lib->vba.VRatio[k],
2664 mode_lib->vba.SwathWidthY[k],
2665 dml_ceil(
2666 mode_lib->vba.BytePerPixelDETY[k],
2667 1),
2668 mode_lib->vba.HTotal[k]
2669 / mode_lib->vba.PixelClock[k],
2670 mode_lib->vba.XFCTSlvVupdateOffset,
2671 mode_lib->vba.XFCTSlvVupdateWidth,
2672 mode_lib->vba.XFCTSlvVreadyOffset,
2673 mode_lib->vba.XFCXBUFLatencyTolerance,
2674 mode_lib->vba.XFCFillBWOverhead,
2675 mode_lib->vba.XFCSlvChunkSize,
2676 mode_lib->vba.XFCBusTransportTime,
2677 mode_lib->vba.TCalc,
2678 TWait,
2679 &mode_lib->vba.SrcActiveDrainRate,
2680 &mode_lib->vba.TInitXFill,
2681 &mode_lib->vba.TslvChk);
2682 } else {
2683 mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
2684 }
2685 mode_lib->vba.ErrorResult[k] =
2686 CalculatePrefetchSchedule(
2687 mode_lib,
2688 mode_lib->vba.DPPCLK[k],
2689 mode_lib->vba.DISPCLK,
2690 mode_lib->vba.PixelClock[k],
2691 mode_lib->vba.DCFClkDeepSleep,
2692 mode_lib->vba.DSCDelay[k],
2693 mode_lib->vba.DPPPerPlane[k],
2694 mode_lib->vba.ScalerEnabled[k],
2695 mode_lib->vba.NumberOfCursors[k],
2696 mode_lib->vba.DPPCLKDelaySubtotal,
2697 mode_lib->vba.DPPCLKDelaySCL,
2698 mode_lib->vba.DPPCLKDelaySCLLBOnly,
2699 mode_lib->vba.DPPCLKDelayCNVCFormater,
2700 mode_lib->vba.DPPCLKDelayCNVCCursor,
2701 mode_lib->vba.DISPCLKDelaySubtotal,
2702 (unsigned int) (mode_lib->vba.SwathWidthY[k]
2703 / mode_lib->vba.HRatio[k]),
2704 mode_lib->vba.OutputFormat[k],
2705 mode_lib->vba.VTotal[k]
2706 - mode_lib->vba.VActive[k],
2707 mode_lib->vba.HTotal[k],
2708 mode_lib->vba.MaxInterDCNTileRepeaters,
2709 dml_min(
2710 mode_lib->vba.VStartupLines,
2711 mode_lib->vba.MaxVStartupLines[k]),
2712 mode_lib->vba.MaxPageTableLevels,
2713 mode_lib->vba.VirtualMemoryEnable,
2714 mode_lib->vba.DynamicMetadataEnable[k],
2715 mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
2716 mode_lib->vba.DynamicMetadataTransmittedBytes[k],
2717 mode_lib->vba.DCCEnable[k],
2718 mode_lib->vba.UrgentLatency,
2719 mode_lib->vba.UrgentExtraLatency,
2720 mode_lib->vba.TCalc,
2721 mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
2722 mode_lib->vba.MetaRowByte[k],
2723 mode_lib->vba.PixelPTEBytesPerRow[k],
2724 mode_lib->vba.PrefetchSourceLinesY[k],
2725 mode_lib->vba.SwathWidthY[k],
2726 mode_lib->vba.BytePerPixelDETY[k],
2727 mode_lib->vba.VInitPreFillY[k],
2728 mode_lib->vba.MaxNumSwathY[k],
2729 mode_lib->vba.PrefetchSourceLinesC[k],
2730 mode_lib->vba.BytePerPixelDETC[k],
2731 mode_lib->vba.VInitPreFillC[k],
2732 mode_lib->vba.MaxNumSwathC[k],
2733 mode_lib->vba.SwathHeightY[k],
2734 mode_lib->vba.SwathHeightC[k],
2735 TWait,
2736 mode_lib->vba.XFCEnabled[k],
2737 mode_lib->vba.XFCRemoteSurfaceFlipDelay,
2738 mode_lib->vba.Interlace[k],
2739 mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
2740 &mode_lib->vba.DSTXAfterScaler[k],
2741 &mode_lib->vba.DSTYAfterScaler[k],
2742 &mode_lib->vba.DestinationLinesForPrefetch[k],
2743 &mode_lib->vba.PrefetchBandwidth[k],
2744 &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
2745 &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
2746 &mode_lib->vba.VRatioPrefetchY[k],
2747 &mode_lib->vba.VRatioPrefetchC[k],
2748 &mode_lib->vba.RequiredPrefetchPixDataBW[k],
2749 &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
2750 &mode_lib->vba.Tno_bw[k],
2751 &mode_lib->vba.VUpdateOffsetPix[k],
2752 &mode_lib->vba.VUpdateWidthPix[k],
2753 &mode_lib->vba.VReadyOffsetPix[k]);
2754 if (mode_lib->vba.BlendingAndTiming[k] == k) {
2755 mode_lib->vba.VStartup[k] = dml_min(
2756 mode_lib->vba.VStartupLines,
2757 mode_lib->vba.MaxVStartupLines[k]);
2758 if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
2759 != 0) {
2760 mode_lib->vba.VStartup[k] =
2761 mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
2762 }
2763 } else {
2764 mode_lib->vba.VStartup[k] =
2765 dml_min(
2766 mode_lib->vba.VStartupLines,
2767 mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
2768 }
2769 }
2770
2771 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2772
2773 if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
2774 mode_lib->vba.prefetch_vm_bw[k] = 0;
2775 else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
2776 mode_lib->vba.prefetch_vm_bw[k] =
2777 (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
2778 / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
2779 * mode_lib->vba.HTotal[k]
2780 / mode_lib->vba.PixelClock[k]);
2781 } else {
2782 mode_lib->vba.prefetch_vm_bw[k] = 0;
2783 prefetch_vm_bw_valid = false;
2784 }
2785 if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
2786 == 0)
2787 mode_lib->vba.prefetch_row_bw[k] = 0;
2788 else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
2789 mode_lib->vba.prefetch_row_bw[k] =
2790 (double) (mode_lib->vba.MetaRowByte[k]
2791 + mode_lib->vba.PixelPTEBytesPerRow[k])
2792 / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
2793 * mode_lib->vba.HTotal[k]
2794 / mode_lib->vba.PixelClock[k]);
2795 } else {
2796 mode_lib->vba.prefetch_row_bw[k] = 0;
2797 prefetch_row_bw_valid = false;
2798 }
2799
2800 MaxTotalRDBandwidth =
2801 MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
2802 + dml_max(
2803 mode_lib->vba.prefetch_vm_bw[k],
2804 dml_max(
2805 mode_lib->vba.prefetch_row_bw[k],
2806 dml_max(
2807 mode_lib->vba.ReadBandwidthPlaneLuma[k]
2808 + mode_lib->vba.ReadBandwidthPlaneChroma[k],
2809 mode_lib->vba.RequiredPrefetchPixDataBW[k])
2810 + mode_lib->vba.meta_row_bw[k]
2811 + mode_lib->vba.dpte_row_bw[k]));
2812
2813 if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
2814 DestinationLineTimesForPrefetchLessThan2 = true;
2815 if (mode_lib->vba.VRatioPrefetchY[k] > 4
2816 || mode_lib->vba.VRatioPrefetchC[k] > 4)
2817 VRatioPrefetchMoreThan4 = true;
2818 }
2819
2820 if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
2821 && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
2822 && !DestinationLineTimesForPrefetchLessThan2)
2823 mode_lib->vba.PrefetchModeSupported = true;
2824 else {
2825 mode_lib->vba.PrefetchModeSupported = false;
2826 dml_print(
2827 "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
2828 }
2829
2830 if (mode_lib->vba.PrefetchModeSupported == true) {
2831 double final_flip_bw[DC__NUM_DPP__MAX];
2832 unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
2833 double total_dcn_read_bw_with_flip = 0;
2834
2835 mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
2836 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2837 mode_lib->vba.BandwidthAvailableForImmediateFlip =
2838 mode_lib->vba.BandwidthAvailableForImmediateFlip
2839 - mode_lib->vba.cursor_bw[k]
2840 - dml_max(
2841 mode_lib->vba.ReadBandwidthPlaneLuma[k]
2842 + mode_lib->vba.ReadBandwidthPlaneChroma[k]
2843 + mode_lib->vba.qual_row_bw[k],
2844 mode_lib->vba.PrefetchBandwidth[k]);
2845 }
2846
2847 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2848 ImmediateFlipBytes[k] = 0;
2849 if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
2850 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
2851 ImmediateFlipBytes[k] =
2852 mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
2853 + mode_lib->vba.MetaRowByte[k]
2854 + mode_lib->vba.PixelPTEBytesPerRow[k];
2855 }
2856 }
2857 mode_lib->vba.TotImmediateFlipBytes = 0;
2858 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2859 if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
2860 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
2861 mode_lib->vba.TotImmediateFlipBytes =
2862 mode_lib->vba.TotImmediateFlipBytes
2863 + ImmediateFlipBytes[k];
2864 }
2865 }
2866 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2867 CalculateFlipSchedule(
2868 mode_lib,
2869 mode_lib->vba.UrgentExtraLatency,
2870 mode_lib->vba.UrgentLatency,
2871 mode_lib->vba.MaxPageTableLevels,
2872 mode_lib->vba.VirtualMemoryEnable,
2873 mode_lib->vba.BandwidthAvailableForImmediateFlip,
2874 mode_lib->vba.TotImmediateFlipBytes,
2875 mode_lib->vba.SourcePixelFormat[k],
2876 ImmediateFlipBytes[k],
2877 mode_lib->vba.HTotal[k]
2878 / mode_lib->vba.PixelClock[k],
2879 mode_lib->vba.VRatio[k],
2880 mode_lib->vba.Tno_bw[k],
2881 mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
2882 mode_lib->vba.MetaRowByte[k],
2883 mode_lib->vba.PixelPTEBytesPerRow[k],
2884 mode_lib->vba.DCCEnable[k],
2885 mode_lib->vba.dpte_row_height[k],
2886 mode_lib->vba.meta_row_height[k],
2887 mode_lib->vba.qual_row_bw[k],
2888 &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
2889 &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
2890 &final_flip_bw[k],
2891 &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
2892 }
2893 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2894 total_dcn_read_bw_with_flip =
2895 total_dcn_read_bw_with_flip
2896 + mode_lib->vba.cursor_bw[k]
2897 + dml_max(
2898 mode_lib->vba.prefetch_vm_bw[k],
2899 dml_max(
2900 mode_lib->vba.prefetch_row_bw[k],
2901 final_flip_bw[k]
2902 + dml_max(
2903 mode_lib->vba.ReadBandwidthPlaneLuma[k]
2904 + mode_lib->vba.ReadBandwidthPlaneChroma[k],
2905 mode_lib->vba.RequiredPrefetchPixDataBW[k])));
2906 }
2907 mode_lib->vba.ImmediateFlipSupported = true;
2908 if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
2909 mode_lib->vba.ImmediateFlipSupported = false;
2910 }
2911 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2912 if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
2913 mode_lib->vba.ImmediateFlipSupported = false;
2914 }
2915 }
2916 } else {
2917 mode_lib->vba.ImmediateFlipSupported = false;
2918 }
2919
2920 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2921 if (mode_lib->vba.ErrorResult[k]) {
2922 mode_lib->vba.PrefetchModeSupported = false;
2923 dml_print(
2924 "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
2925 }
2926 }
2927
2928 mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
2929 } while (!((mode_lib->vba.PrefetchModeSupported
2930 && (!mode_lib->vba.ImmediateFlipSupport
2931 || mode_lib->vba.ImmediateFlipSupported))
2932 || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
2933
2934 //Display Pipeline Delivery Time in Prefetch
2935 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2936 if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
2937 mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
2938 mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
2939 / mode_lib->vba.HRatio[k]
2940 / mode_lib->vba.PixelClock[k];
2941 } else {
2942 mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
2943 mode_lib->vba.SwathWidthY[k]
2944 / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
2945 / mode_lib->vba.DPPCLK[k];
2946 }
2947 if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
2948 mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
2949 } else {
2950 if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
2951 mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
2952 mode_lib->vba.SwathWidthY[k]
2953 * mode_lib->vba.DPPPerPlane[k]
2954 / mode_lib->vba.HRatio[k]
2955 / mode_lib->vba.PixelClock[k];
2956 } else {
2957 mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
2958 mode_lib->vba.SwathWidthY[k]
2959 / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
2960 / mode_lib->vba.DPPCLK[k];
2961 }
2962 }
2963 }
2964
2965 // Min TTUVBlank
2966 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2967 if (mode_lib->vba.PrefetchMode == 0) {
2968 mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
2969 mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
2970 mode_lib->vba.MinTTUVBlank[k] = dml_max(
2971 mode_lib->vba.DRAMClockChangeWatermark,
2972 dml_max(
2973 mode_lib->vba.StutterEnterPlusExitWatermark,
2974 mode_lib->vba.UrgentWatermark));
2975 } else if (mode_lib->vba.PrefetchMode == 1) {
2976 mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
2977 mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
2978 mode_lib->vba.MinTTUVBlank[k] = dml_max(
2979 mode_lib->vba.StutterEnterPlusExitWatermark,
2980 mode_lib->vba.UrgentWatermark);
2981 } else {
2982 mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
2983 mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
2984 mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
2985 }
2986 if (!mode_lib->vba.DynamicMetadataEnable[k])
2987 mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
2988 + mode_lib->vba.MinTTUVBlank[k];
2989 }
2990
2991 // DCC Configuration
2992 mode_lib->vba.ActiveDPPs = 0;
2993 // NB P-State/DRAM Clock Change Support
2994 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2995 mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
2996 }
2997
2998 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
2999 double EffectiveLBLatencyHidingY;
3000 double EffectiveLBLatencyHidingC;
3001 double DPPOutputBufferLinesY;
3002 double DPPOutputBufferLinesC;
3003 double DPPOPPBufferingY;
3004 double MaxDETBufferingTimeY;
3005 double ActiveDRAMClockChangeLatencyMarginY;
3006
3007 mode_lib->vba.LBLatencyHidingSourceLinesY =
3008 dml_min(
3009 mode_lib->vba.MaxLineBufferLines,
3010 (unsigned int) dml_floor(
3011 (double) mode_lib->vba.LineBufferSize
3012 / mode_lib->vba.LBBitPerPixel[k]
3013 / (mode_lib->vba.SwathWidthY[k]
3014 / dml_max(
3015 mode_lib->vba.HRatio[k],
3016 1.0)),
3017 1)) - (mode_lib->vba.vtaps[k] - 1);
3018
3019 mode_lib->vba.LBLatencyHidingSourceLinesC =
3020 dml_min(
3021 mode_lib->vba.MaxLineBufferLines,
3022 (unsigned int) dml_floor(
3023 (double) mode_lib->vba.LineBufferSize
3024 / mode_lib->vba.LBBitPerPixel[k]
3025 / (mode_lib->vba.SwathWidthY[k]
3026 / 2.0
3027 / dml_max(
3028 mode_lib->vba.HRatio[k]
3029 / 2,
3030 1.0)),
3031 1))
3032 - (mode_lib->vba.VTAPsChroma[k] - 1);
3033
3034 EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
3035 / mode_lib->vba.VRatio[k]
3036 * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
3037
3038 EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
3039 / (mode_lib->vba.VRatio[k] / 2)
3040 * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
3041
3042 if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
3043 DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
3044 / mode_lib->vba.SwathWidthY[k];
3045 } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
3046 DPPOutputBufferLinesY = 0.5;
3047 } else {
3048 DPPOutputBufferLinesY = 1;
3049 }
3050
3051 if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
3052 DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
3053 / (mode_lib->vba.SwathWidthY[k] / 2);
3054 } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
3055 DPPOutputBufferLinesC = 0.5;
3056 } else {
3057 DPPOutputBufferLinesC = 1;
3058 }
3059
3060 DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
3061 * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
3062 MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
3063 + (mode_lib->vba.LinesInDETY[k]
3064 - mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
3065 / mode_lib->vba.SwathHeightY[k]
3066 * (mode_lib->vba.HTotal[k]
3067 / mode_lib->vba.PixelClock[k]);
3068
3069 ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
3070 + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
3071
3072 if (mode_lib->vba.ActiveDPPs > 1) {
3073 ActiveDRAMClockChangeLatencyMarginY =
3074 ActiveDRAMClockChangeLatencyMarginY
3075 - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
3076 * mode_lib->vba.SwathHeightY[k]
3077 * (mode_lib->vba.HTotal[k]
3078 / mode_lib->vba.PixelClock[k]);
3079 }
3080
3081 if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
3082 double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
3083 / mode_lib->vba.PixelClock[k])
3084 * (DPPOutputBufferLinesC
3085 + mode_lib->vba.OPPOutputBufferLines);
3086 double MaxDETBufferingTimeC =
3087 mode_lib->vba.FullDETBufferingTimeC[k]
3088 + (mode_lib->vba.LinesInDETC[k]
3089 - mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
3090 / mode_lib->vba.SwathHeightC[k]
3091 * (mode_lib->vba.HTotal[k]
3092 / mode_lib->vba.PixelClock[k]);
3093 double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
3094 + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
3095 - mode_lib->vba.DRAMClockChangeWatermark;
3096
3097 if (mode_lib->vba.ActiveDPPs > 1) {
3098 ActiveDRAMClockChangeLatencyMarginC =
3099 ActiveDRAMClockChangeLatencyMarginC
3100 - (1
3101 - 1
3102 / (mode_lib->vba.ActiveDPPs
3103 - 1))
3104 * mode_lib->vba.SwathHeightC[k]
3105 * (mode_lib->vba.HTotal[k]
3106 / mode_lib->vba.PixelClock[k]);
3107 }
3108 mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
3109 ActiveDRAMClockChangeLatencyMarginY,
3110 ActiveDRAMClockChangeLatencyMarginC);
3111 } else {
3112 mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
3113 ActiveDRAMClockChangeLatencyMarginY;
3114 }
3115
3116 if (mode_lib->vba.WritebackEnable[k]) {
3117 double WritebackDRAMClockChangeLatencyMargin;
3118
3119 if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
3120 WritebackDRAMClockChangeLatencyMargin =
3121 (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
3122 + mode_lib->vba.WritebackInterfaceChromaBufferSize)
3123 / (mode_lib->vba.WritebackDestinationWidth[k]
3124 * mode_lib->vba.WritebackDestinationHeight[k]
3125 / (mode_lib->vba.WritebackSourceHeight[k]
3126 * mode_lib->vba.HTotal[k]
3127 / mode_lib->vba.PixelClock[k])
3128 * 4)
3129 - mode_lib->vba.WritebackDRAMClockChangeWatermark;
3130 } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
3131 WritebackDRAMClockChangeLatencyMargin =
3132 dml_min(
3133 (double) mode_lib->vba.WritebackInterfaceLumaBufferSize
3134 * 8.0 / 10,
3135 2.0
3136 * mode_lib->vba.WritebackInterfaceChromaBufferSize
3137 * 8 / 10)
3138 / (mode_lib->vba.WritebackDestinationWidth[k]
3139 * mode_lib->vba.WritebackDestinationHeight[k]
3140 / (mode_lib->vba.WritebackSourceHeight[k]
3141 * mode_lib->vba.HTotal[k]
3142 / mode_lib->vba.PixelClock[k]))
3143 - mode_lib->vba.WritebackDRAMClockChangeWatermark;
3144 } else {
3145 WritebackDRAMClockChangeLatencyMargin =
3146 dml_min(
3147 (double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
3148 2.0
3149 * mode_lib->vba.WritebackInterfaceChromaBufferSize)
3150 / (mode_lib->vba.WritebackDestinationWidth[k]
3151 * mode_lib->vba.WritebackDestinationHeight[k]
3152 / (mode_lib->vba.WritebackSourceHeight[k]
3153 * mode_lib->vba.HTotal[k]
3154 / mode_lib->vba.PixelClock[k]))
3155 - mode_lib->vba.WritebackDRAMClockChangeWatermark;
3156 }
3157 mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
3158 mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
3159 WritebackDRAMClockChangeLatencyMargin);
3160 }
3161 }
3162
3163 mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
3164 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
3165 if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
3166 < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
3167 mode_lib->vba.MinActiveDRAMClockChangeMargin =
3168 mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
3169 }
3170 }
3171
3172 mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
3173 mode_lib->vba.MinActiveDRAMClockChangeMargin
3174 + mode_lib->vba.DRAMClockChangeLatency;
3175
3176 if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
3177 mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vactive;
3178 } else {
3179 if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
3180 mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vblank;
3181 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
3182 if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
3183 mode_lib->vba.DRAMClockChangeSupport =
3184 dm_dram_clock_change_unsupported;
3185 }
3186 }
3187 } else {
3188 mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
3189 }
3190 }
3191
3192 //XFC Parameters:
3193 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
3194 if (mode_lib->vba.XFCEnabled[k] == true) {
3195 double TWait;
3196
3197 mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
3198 mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
3199 mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
3200 TWait = CalculateTWait(
3201 mode_lib->vba.PrefetchMode,
3202 mode_lib->vba.DRAMClockChangeLatency,
3203 mode_lib->vba.UrgentLatency,
3204 mode_lib->vba.SREnterPlusExitTime);
3205 mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
3206 mode_lib,
3207 mode_lib->vba.VRatio[k],
3208 mode_lib->vba.SwathWidthY[k],
3209 dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
3210 mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
3211 mode_lib->vba.XFCTSlvVupdateOffset,
3212 mode_lib->vba.XFCTSlvVupdateWidth,
3213 mode_lib->vba.XFCTSlvVreadyOffset,
3214 mode_lib->vba.XFCXBUFLatencyTolerance,
3215 mode_lib->vba.XFCFillBWOverhead,
3216 mode_lib->vba.XFCSlvChunkSize,
3217 mode_lib->vba.XFCBusTransportTime,
3218 mode_lib->vba.TCalc,
3219 TWait,
3220 &mode_lib->vba.SrcActiveDrainRate,
3221 &mode_lib->vba.TInitXFill,
3222 &mode_lib->vba.TslvChk);
3223 mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
3224 dml_floor(
3225 mode_lib->vba.XFCRemoteSurfaceFlipDelay
3226 / (mode_lib->vba.HTotal[k]
3227 / mode_lib->vba.PixelClock[k]),
3228 1);
3229 mode_lib->vba.XFCTransferDelay[k] =
3230 dml_ceil(
3231 mode_lib->vba.XFCBusTransportTime
3232 / (mode_lib->vba.HTotal[k]
3233 / mode_lib->vba.PixelClock[k]),
3234 1);
3235 mode_lib->vba.XFCPrechargeDelay[k] =
3236 dml_ceil(
3237 (mode_lib->vba.XFCBusTransportTime
3238 + mode_lib->vba.TInitXFill
3239 + mode_lib->vba.TslvChk)
3240 / (mode_lib->vba.HTotal[k]
3241 / mode_lib->vba.PixelClock[k]),
3242 1);
3243 mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
3244 * mode_lib->vba.SrcActiveDrainRate;
3245 mode_lib->vba.FinalFillMargin =
3246 (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
3247 + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
3248 * mode_lib->vba.HTotal[k]
3249 / mode_lib->vba.PixelClock[k]
3250 * mode_lib->vba.SrcActiveDrainRate
3251 + mode_lib->vba.XFCFillConstant;
3252 mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
3253 * mode_lib->vba.SrcActiveDrainRate
3254 + mode_lib->vba.FinalFillMargin;
3255 mode_lib->vba.RemainingFillLevel = dml_max(
3256 0.0,
3257 mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
3258 mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
3259 / (mode_lib->vba.SrcActiveDrainRate
3260 * mode_lib->vba.XFCFillBWOverhead / 100);
3261 mode_lib->vba.XFCPrefetchMargin[k] =
3262 mode_lib->vba.XFCRemoteSurfaceFlipDelay
3263 + mode_lib->vba.TFinalxFill
3264 + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
3265 + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
3266 * mode_lib->vba.HTotal[k]
3267 / mode_lib->vba.PixelClock[k];
3268 } else {
3269 mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
3270 mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
3271 mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
3272 mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
3273 mode_lib->vba.XFCPrechargeDelay[k] = 0;
3274 mode_lib->vba.XFCTransferDelay[k] = 0;
3275 mode_lib->vba.XFCPrefetchMargin[k] = 0;
3276 }
3277 }
3278}
3279
3280static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
3281{
3282 double BytePerPixDETY;
3283 double BytePerPixDETC;
3284 double Read256BytesBlockHeightY;
3285 double Read256BytesBlockHeightC;
3286 double Read256BytesBlockWidthY;
3287 double Read256BytesBlockWidthC;
3288 double MaximumSwathHeightY;
3289 double MaximumSwathHeightC;
3290 double MinimumSwathHeightY;
3291 double MinimumSwathHeightC;
3292 double SwathWidth;
3293 double SwathWidthGranularityY;
3294 double SwathWidthGranularityC;
3295 double RoundedUpMaxSwathSizeBytesY;
3296 double RoundedUpMaxSwathSizeBytesC;
3297 unsigned int j, k;
3298
3299 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
3300 bool MainPlaneDoesODMCombine = false;
3301
3302 if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
3303 BytePerPixDETY = 8;
3304 BytePerPixDETC = 0;
3305 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
3306 BytePerPixDETY = 4;
3307 BytePerPixDETC = 0;
3308 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
3309 BytePerPixDETY = 2;
3310 BytePerPixDETC = 0;
3311 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
3312 BytePerPixDETY = 1;
3313 BytePerPixDETC = 0;
3314 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
3315 BytePerPixDETY = 1;
3316 BytePerPixDETC = 2;
3317 } else {
3318 BytePerPixDETY = 4.0 / 3.0;
3319 BytePerPixDETC = 8.0 / 3.0;
3320 }
3321
3322 if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
3323 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
3324 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
3325 || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
3326 if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
3327 Read256BytesBlockHeightY = 1;
3328 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
3329 Read256BytesBlockHeightY = 4;
3330 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
3331 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
3332 Read256BytesBlockHeightY = 8;
3333 } else {
3334 Read256BytesBlockHeightY = 16;
3335 }
3336 Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
3337 / Read256BytesBlockHeightY;
3338 Read256BytesBlockHeightC = 0;
3339 Read256BytesBlockWidthC = 0;
3340 } else {
3341 if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
3342 Read256BytesBlockHeightY = 1;
3343 Read256BytesBlockHeightC = 1;
3344 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
3345 Read256BytesBlockHeightY = 16;
3346 Read256BytesBlockHeightC = 8;
3347 } else {
3348 Read256BytesBlockHeightY = 8;
3349 Read256BytesBlockHeightC = 8;
3350 }
3351 Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
3352 / Read256BytesBlockHeightY;
3353 Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
3354 / Read256BytesBlockHeightC;
3355 }
3356
3357 if (mode_lib->vba.SourceScan[k] == dm_horz) {
3358 MaximumSwathHeightY = Read256BytesBlockHeightY;
3359 MaximumSwathHeightC = Read256BytesBlockHeightC;
3360 } else {
3361 MaximumSwathHeightY = Read256BytesBlockWidthY;
3362 MaximumSwathHeightC = Read256BytesBlockWidthC;
3363 }
3364
3365 if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
3366 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
3367 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
3368 || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
3369 if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
3370 || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
3371 && (mode_lib->vba.SurfaceTiling[k]
3372 == dm_sw_4kb_s
3373 || mode_lib->vba.SurfaceTiling[k]
3374 == dm_sw_4kb_s_x
3375 || mode_lib->vba.SurfaceTiling[k]
3376 == dm_sw_64kb_s
3377 || mode_lib->vba.SurfaceTiling[k]
3378 == dm_sw_64kb_s_t
3379 || mode_lib->vba.SurfaceTiling[k]
3380 == dm_sw_64kb_s_x
3381 || mode_lib->vba.SurfaceTiling[k]
3382 == dm_sw_var_s
3383 || mode_lib->vba.SurfaceTiling[k]
3384 == dm_sw_var_s_x)
3385 && mode_lib->vba.SourceScan[k] == dm_horz)) {
3386 MinimumSwathHeightY = MaximumSwathHeightY;
3387 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
3388 && mode_lib->vba.SourceScan[k] != dm_horz) {
3389 MinimumSwathHeightY = MaximumSwathHeightY;
3390 } else {
3391 MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
3392 }
3393 MinimumSwathHeightC = MaximumSwathHeightC;
3394 } else {
3395 if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
3396 MinimumSwathHeightY = MaximumSwathHeightY;
3397 MinimumSwathHeightC = MaximumSwathHeightC;
3398 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
3399 && mode_lib->vba.SourceScan[k] == dm_horz) {
3400 MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
3401 MinimumSwathHeightC = MaximumSwathHeightC;
3402 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
3403 && mode_lib->vba.SourceScan[k] == dm_horz) {
3404 MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
3405 MinimumSwathHeightY = MaximumSwathHeightY;
3406 } else {
3407 MinimumSwathHeightY = MaximumSwathHeightY;
3408 MinimumSwathHeightC = MaximumSwathHeightC;
3409 }
3410 }
3411
3412 if (mode_lib->vba.SourceScan[k] == dm_horz) {
3413 SwathWidth = mode_lib->vba.ViewportWidth[k];
3414 } else {
3415 SwathWidth = mode_lib->vba.ViewportHeight[k];
3416 }
3417
3418 if (mode_lib->vba.ODMCombineEnabled[k] == true) {
3419 MainPlaneDoesODMCombine = true;
3420 }
3421 for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
3422 if (mode_lib->vba.BlendingAndTiming[k] == j
3423 && mode_lib->vba.ODMCombineEnabled[j] == true) {
3424 MainPlaneDoesODMCombine = true;
3425 }
3426 }
3427
3428 if (MainPlaneDoesODMCombine == true) {
3429 SwathWidth = dml_min(
3430 SwathWidth,
3431 mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
3432 } else {
3433 SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
3434 }
3435
3436 SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
3437 RoundedUpMaxSwathSizeBytesY = (dml_ceil(
3438 (double) (SwathWidth - 1),
3439 SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
3440 * MaximumSwathHeightY;
3441 if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
3442 RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
3443 + 256;
3444 }
3445 if (MaximumSwathHeightC > 0) {
3446 SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
3447 / MaximumSwathHeightC;
3448 RoundedUpMaxSwathSizeBytesC = (dml_ceil(
3449 (double) (SwathWidth / 2.0 - 1),
3450 SwathWidthGranularityC) + SwathWidthGranularityC)
3451 * BytePerPixDETC * MaximumSwathHeightC;
3452 if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
3453 RoundedUpMaxSwathSizeBytesC = dml_ceil(
3454 RoundedUpMaxSwathSizeBytesC,
3455 256) + 256;
3456 }
3457 } else
3458 RoundedUpMaxSwathSizeBytesC = 0.0;
3459
3460 if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
3461 <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
3462 mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
3463 mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
3464 } else {
3465 mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
3466 mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
3467 }
3468
3469 if (mode_lib->vba.SwathHeightC[k] == 0) {
3470 mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024;
3471 mode_lib->vba.DETBufferSizeC[k] = 0;
3472 } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
3473 mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
3474 * 1024.0 / 2;
3475 mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
3476 * 1024.0 / 2;
3477 } else {
3478 mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
3479 * 1024.0 * 2 / 3;
3480 mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
3481 * 1024.0 / 3;
3482 }
3483 }
3484}
3485
3486bool Calculate256BBlockSizes(
3487 enum source_format_class SourcePixelFormat,
3488 enum dm_swizzle_mode SurfaceTiling,
3489 unsigned int BytePerPixelY,
3490 unsigned int BytePerPixelC,
3491 unsigned int *BlockHeight256BytesY,
3492 unsigned int *BlockHeight256BytesC,
3493 unsigned int *BlockWidth256BytesY,
3494 unsigned int *BlockWidth256BytesC)
3495{
3496 if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
3497 || SourcePixelFormat == dm_444_16
3498 || SourcePixelFormat == dm_444_8)) {
3499 if (SurfaceTiling == dm_sw_linear) {
3500 *BlockHeight256BytesY = 1;
3501 } else if (SourcePixelFormat == dm_444_64) {
3502 *BlockHeight256BytesY = 4;
3503 } else if (SourcePixelFormat == dm_444_8) {
3504 *BlockHeight256BytesY = 16;
3505 } else {
3506 *BlockHeight256BytesY = 8;
3507 }
3508 *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
3509 *BlockHeight256BytesC = 0;
3510 *BlockWidth256BytesC = 0;
3511 } else {
3512 if (SurfaceTiling == dm_sw_linear) {
3513 *BlockHeight256BytesY = 1;
3514 *BlockHeight256BytesC = 1;
3515 } else if (SourcePixelFormat == dm_420_8) {
3516 *BlockHeight256BytesY = 16;
3517 *BlockHeight256BytesC = 8;
3518 } else {
3519 *BlockHeight256BytesY = 8;
3520 *BlockHeight256BytesC = 8;
3521 }
3522 *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
3523 *BlockWidth256BytesC = 256 / BytePerPixelC / *BlockHeight256BytesC;
3524 }
3525 return true;
3526}
3527
3528static double CalculateTWait(
3529 unsigned int PrefetchMode,
3530 double DRAMClockChangeLatency,
3531 double UrgentLatency,
3532 double SREnterPlusExitTime)
3533{
3534 if (PrefetchMode == 0) {
3535 return dml_max(
3536 DRAMClockChangeLatency + UrgentLatency,
3537 dml_max(SREnterPlusExitTime, UrgentLatency));
3538 } else if (PrefetchMode == 1) {
3539 return dml_max(SREnterPlusExitTime, UrgentLatency);
3540 } else {
3541 return UrgentLatency;
3542 }
3543}
3544
3545static double CalculateRemoteSurfaceFlipDelay(
3546 struct display_mode_lib *mode_lib,
3547 double VRatio,
3548 double SwathWidth,
3549 double Bpp,
3550 double LineTime,
3551 double XFCTSlvVupdateOffset,
3552 double XFCTSlvVupdateWidth,
3553 double XFCTSlvVreadyOffset,
3554 double XFCXBUFLatencyTolerance,
3555 double XFCFillBWOverhead,
3556 double XFCSlvChunkSize,
3557 double XFCBusTransportTime,
3558 double TCalc,
3559 double TWait,
3560 double *SrcActiveDrainRate,
3561 double *TInitXFill,
3562 double *TslvChk)
3563{
3564 double TSlvSetup, AvgfillRate, result;
3565
3566 *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
3567 TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
3568 *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
3569 AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
3570 *TslvChk = XFCSlvChunkSize / AvgfillRate;
3571 dml_print(
3572 "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
3573 *SrcActiveDrainRate);
3574 dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
3575 dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
3576 dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
3577 dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
3578 result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
3579 dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
3580 return result;
3581}
3582
3583static double CalculateWriteBackDISPCLK(
3584 enum source_format_class WritebackPixelFormat,
3585 double PixelClock,
3586 double WritebackHRatio,
3587 double WritebackVRatio,
3588 unsigned int WritebackLumaHTaps,
3589 unsigned int WritebackLumaVTaps,
3590 unsigned int WritebackChromaHTaps,
3591 unsigned int WritebackChromaVTaps,
3592 double WritebackDestinationWidth,
3593 unsigned int HTotal,
3594 unsigned int WritebackChromaLineBufferWidth)
3595{
3596 double CalculateWriteBackDISPCLK =
3597 1.01 * PixelClock
3598 * dml_max(
3599 dml_ceil(WritebackLumaHTaps / 4.0, 1)
3600 / WritebackHRatio,
3601 dml_max(
3602 (WritebackLumaVTaps
3603 * dml_ceil(
3604 1.0
3605 / WritebackVRatio,
3606 1)
3607 * dml_ceil(
3608 WritebackDestinationWidth
3609 / 4.0,
3610 1)
3611 + dml_ceil(
3612 WritebackDestinationWidth
3613 / 4.0,
3614 1))
3615 / (double) HTotal
3616 + dml_ceil(
3617 1.0
3618 / WritebackVRatio,
3619 1)
3620 * (dml_ceil(
3621 WritebackLumaVTaps
3622 / 4.0,
3623 1)
3624 + 4.0)
3625 / (double) HTotal,
3626 dml_ceil(
3627 1.0
3628 / WritebackVRatio,
3629 1)
3630 * WritebackDestinationWidth
3631 / (double) HTotal));
3632 if (WritebackPixelFormat != dm_444_32) {
3633 CalculateWriteBackDISPCLK =
3634 dml_max(
3635 CalculateWriteBackDISPCLK,
3636 1.01 * PixelClock
3637 * dml_max(
3638 dml_ceil(
3639 WritebackChromaHTaps
3640 / 2.0,
3641 1)
3642 / (2
3643 * WritebackHRatio),
3644 dml_max(
3645 (WritebackChromaVTaps
3646 * dml_ceil(
3647 1
3648 / (2
3649 * WritebackVRatio),
3650 1)
3651 * dml_ceil(
3652 WritebackDestinationWidth
3653 / 2.0
3654 / 2.0,
3655 1)
3656 + dml_ceil(
3657 WritebackDestinationWidth
3658 / 2.0
3659 / WritebackChromaLineBufferWidth,
3660 1))
3661 / HTotal
3662 + dml_ceil(
3663 1
3664 / (2
3665 * WritebackVRatio),
3666 1)
3667 * (dml_ceil(
3668 WritebackChromaVTaps
3669 / 4.0,
3670 1)
3671 + 4)
3672 / HTotal,
3673 dml_ceil(
3674 1.0
3675 / (2
3676 * WritebackVRatio),
3677 1)
3678 * WritebackDestinationWidth
3679 / 2.0
3680 / HTotal)));
3681 }
3682 return CalculateWriteBackDISPCLK;
3683}
3684
3685static double CalculateWriteBackDelay(
3686 enum source_format_class WritebackPixelFormat,
3687 double WritebackHRatio,
3688 double WritebackVRatio,
3689 unsigned int WritebackLumaHTaps,
3690 unsigned int WritebackLumaVTaps,
3691 unsigned int WritebackChromaHTaps,
3692 unsigned int WritebackChromaVTaps,
3693 unsigned int WritebackDestinationWidth)
3694{
3695 double CalculateWriteBackDelay =
3696 dml_max(
3697 dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
3698 WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
3699 * dml_ceil(
3700 WritebackDestinationWidth
3701 / 4.0,
3702 1)
3703 + dml_ceil(1.0 / WritebackVRatio, 1)
3704 * (dml_ceil(
3705 WritebackLumaVTaps
3706 / 4.0,
3707 1) + 4));
3708
3709 if (WritebackPixelFormat != dm_444_32) {
3710 CalculateWriteBackDelay =
3711 dml_max(
3712 CalculateWriteBackDelay,
3713 dml_max(
3714 dml_ceil(
3715 WritebackChromaHTaps
3716 / 2.0,
3717 1)
3718 / (2
3719 * WritebackHRatio),
3720 WritebackChromaVTaps
3721 * dml_ceil(
3722 1
3723 / (2
3724 * WritebackVRatio),
3725 1)
3726 * dml_ceil(
3727 WritebackDestinationWidth
3728 / 2.0
3729 / 2.0,
3730 1)
3731 + dml_ceil(
3732 1
3733 / (2
3734 * WritebackVRatio),
3735 1)
3736 * (dml_ceil(
3737 WritebackChromaVTaps
3738 / 4.0,
3739 1)
3740 + 4)));
3741 }
3742 return CalculateWriteBackDelay;
3743}
3744
3745static void CalculateActiveRowBandwidth(
3746 bool VirtualMemoryEnable,
3747 enum source_format_class SourcePixelFormat,
3748 double VRatio,
3749 bool DCCEnable,
3750 double LineTime,
3751 unsigned int MetaRowByteLuma,
3752 unsigned int MetaRowByteChroma,
3753 unsigned int meta_row_height_luma,
3754 unsigned int meta_row_height_chroma,
3755 unsigned int PixelPTEBytesPerRowLuma,
3756 unsigned int PixelPTEBytesPerRowChroma,
3757 unsigned int dpte_row_height_luma,
3758 unsigned int dpte_row_height_chroma,
3759 double *meta_row_bw,
3760 double *dpte_row_bw,
3761 double *qual_row_bw)
3762{
3763 if (DCCEnable != true) {
3764 *meta_row_bw = 0;
3765 } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
3766 *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
3767 + VRatio / 2 * MetaRowByteChroma
3768 / (meta_row_height_chroma * LineTime);
3769 } else {
3770 *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
3771 }
3772
3773 if (VirtualMemoryEnable != true) {
3774 *dpte_row_bw = 0;
3775 } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
3776 *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
3777 + VRatio / 2 * PixelPTEBytesPerRowChroma
3778 / (dpte_row_height_chroma * LineTime);
3779 } else {
3780 *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
3781 }
3782
3783 if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
3784 *qual_row_bw = *meta_row_bw + *dpte_row_bw;
3785 } else {
3786 *qual_row_bw = 0;
3787 }
3788}
3789
3790static void CalculateFlipSchedule(
3791 struct display_mode_lib *mode_lib,
3792 double UrgentExtraLatency,
3793 double UrgentLatency,
3794 unsigned int MaxPageTableLevels,
3795 bool VirtualMemoryEnable,
3796 double BandwidthAvailableForImmediateFlip,
3797 unsigned int TotImmediateFlipBytes,
3798 enum source_format_class SourcePixelFormat,
3799 unsigned int ImmediateFlipBytes,
3800 double LineTime,
3801 double Tno_bw,
3802 double VRatio,
3803 double PDEAndMetaPTEBytesFrame,
3804 unsigned int MetaRowByte,
3805 unsigned int PixelPTEBytesPerRow,
3806 bool DCCEnable,
3807 unsigned int dpte_row_height,
3808 unsigned int meta_row_height,
3809 double qual_row_bw,
3810 double *DestinationLinesToRequestVMInImmediateFlip,
3811 double *DestinationLinesToRequestRowInImmediateFlip,
3812 double *final_flip_bw,
3813 bool *ImmediateFlipSupportedForPipe)
3814{
3815 double min_row_time = 0.0;
3816
3817 if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
3818 *DestinationLinesToRequestVMInImmediateFlip = 0.0;
3819 *DestinationLinesToRequestRowInImmediateFlip = 0.0;
3820 *final_flip_bw = qual_row_bw;
3821 *ImmediateFlipSupportedForPipe = true;
3822 } else {
3823 double TimeForFetchingMetaPTEImmediateFlip;
3824 double TimeForFetchingRowInVBlankImmediateFlip;
3825
3826 if (VirtualMemoryEnable == true) {
3827 mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
3828 * ImmediateFlipBytes / TotImmediateFlipBytes;
3829 TimeForFetchingMetaPTEImmediateFlip =
3830 dml_max(
3831 Tno_bw
3832 + PDEAndMetaPTEBytesFrame
3833 / mode_lib->vba.ImmediateFlipBW,
3834 dml_max(
3835 UrgentExtraLatency
3836 + UrgentLatency
3837 * (MaxPageTableLevels
3838 - 1),
3839 LineTime / 4.0));
3840 } else {
3841 TimeForFetchingMetaPTEImmediateFlip = 0;
3842 }
3843
3844 *DestinationLinesToRequestVMInImmediateFlip = dml_floor(
3845 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
3846 1) / 4.0;
3847
3848 if ((VirtualMemoryEnable == true || DCCEnable == true)) {
3849 mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
3850 * ImmediateFlipBytes / TotImmediateFlipBytes;
3851 TimeForFetchingRowInVBlankImmediateFlip = dml_max(
3852 (MetaRowByte + PixelPTEBytesPerRow)
3853 / mode_lib->vba.ImmediateFlipBW,
3854 dml_max(UrgentLatency, LineTime / 4.0));
3855 } else {
3856 TimeForFetchingRowInVBlankImmediateFlip = 0;
3857 }
3858
3859 *DestinationLinesToRequestRowInImmediateFlip = dml_floor(
3860 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
3861 1) / 4.0;
3862
3863 if (VirtualMemoryEnable == true) {
3864 *final_flip_bw =
3865 dml_max(
3866 PDEAndMetaPTEBytesFrame
3867 / (*DestinationLinesToRequestVMInImmediateFlip
3868 * LineTime),
3869 (MetaRowByte + PixelPTEBytesPerRow)
3870 / (TimeForFetchingRowInVBlankImmediateFlip
3871 * LineTime));
3872 } else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
3873 *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
3874 / (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
3875 } else {
3876 *final_flip_bw = 0;
3877 }
3878
3879 if (VirtualMemoryEnable && !DCCEnable)
3880 min_row_time = dpte_row_height * LineTime / VRatio;
3881 else if (!VirtualMemoryEnable && DCCEnable)
3882 min_row_time = meta_row_height * LineTime / VRatio;
3883 else
3884 min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
3885 / VRatio;
3886
3887 if (*DestinationLinesToRequestVMInImmediateFlip >= 8
3888 || *DestinationLinesToRequestRowInImmediateFlip >= 16
3889 || TimeForFetchingMetaPTEImmediateFlip
3890 + 2 * TimeForFetchingRowInVBlankImmediateFlip
3891 > min_row_time)
3892 *ImmediateFlipSupportedForPipe = false;
3893 else
3894 *ImmediateFlipSupportedForPipe = true;
3895 }
3896}
3897
3898static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib)
3899{
3900 unsigned int k;
3901
3902 //Progressive To dml_ml->vba.Interlace Unit Effect
3903 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
3904 mode_lib->vba.PixelClockBackEnd[k] = mode_lib->vba.PixelClock[k];
3905 if (mode_lib->vba.Interlace[k] == 1
3906 && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true) {
3907 mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClock[k];
3908 }
3909 }
3910}
3911
3912static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp)
3913{
3914 switch (ebpp) {
3915 case dm_cur_2bit:
3916 return 2;
3917 case dm_cur_32bit:
3918 return 32;
3919 case dm_cur_64bit:
3920 return 64;
3921 default:
3922 return 0;
3923 }
3924}
3925
3926static unsigned int TruncToValidBPP(
3927 double DecimalBPP,
3928 bool DSCEnabled,
3929 enum output_encoder_class Output,
3930 enum output_format_class Format,
3931 unsigned int DSCInputBitPerComponent)
3932{
3933 if (Output == dm_hdmi) {
3934 if (Format == dm_420) {
3935 if (DecimalBPP >= 18)
3936 return 18;
3937 else if (DecimalBPP >= 15)
3938 return 15;
3939 else if (DecimalBPP >= 12)
3940 return 12;
3941 else
3942 return BPP_INVALID;
3943 } else if (Format == dm_444) {
3944 if (DecimalBPP >= 36)
3945 return 36;
3946 else if (DecimalBPP >= 30)
3947 return 30;
3948 else if (DecimalBPP >= 24)
3949 return 24;
3950 else
3951 return BPP_INVALID;
3952 } else {
3953 if (DecimalBPP / 1.5 >= 24)
3954 return 24;
3955 else if (DecimalBPP / 1.5 >= 20)
3956 return 20;
3957 else if (DecimalBPP / 1.5 >= 16)
3958 return 16;
3959 else
3960 return BPP_INVALID;
3961 }
3962 } else {
3963 if (DSCEnabled) {
3964 if (Format == dm_420) {
3965 if (DecimalBPP < 6)
3966 return BPP_INVALID;
3967 else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
3968 return 1.5 * DSCInputBitPerComponent - 1 / 16;
3969 else
3970 return dml_floor(16 * DecimalBPP, 1) / 16;
3971 } else if (Format == dm_n422) {
3972 if (DecimalBPP < 7)
3973 return BPP_INVALID;
3974 else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
3975 return 2 * DSCInputBitPerComponent - 1 / 16;
3976 else
3977 return dml_floor(16 * DecimalBPP, 1) / 16;
3978 } else {
3979 if (DecimalBPP < 8)
3980 return BPP_INVALID;
3981 else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
3982 return 3 * DSCInputBitPerComponent - 1 / 16;
3983 else
3984 return dml_floor(16 * DecimalBPP, 1) / 16;
3985 }
3986 } else if (Format == dm_420) {
3987 if (DecimalBPP >= 18)
3988 return 18;
3989 else if (DecimalBPP >= 15)
3990 return 15;
3991 else if (DecimalBPP >= 12)
3992 return 12;
3993 else
3994 return BPP_INVALID;
3995 } else if (Format == dm_s422 || Format == dm_n422) {
3996 if (DecimalBPP >= 24)
3997 return 24;
3998 else if (DecimalBPP >= 20)
3999 return 20;
4000 else if (DecimalBPP >= 16)
4001 return 16;
4002 else
4003 return BPP_INVALID;
4004 } else {
4005 if (DecimalBPP >= 36)
4006 return 36;
4007 else if (DecimalBPP >= 30)
4008 return 30;
4009 else if (DecimalBPP >= 24)
4010 return 24;
4011 else
4012 return BPP_INVALID;
4013 }
4014 }
4015}
4016
4017static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
4018{
4019 int i;
4020 unsigned int j, k;
4021 /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
4022
4023 /*Scale Ratio, taps Support Check*/
4024
4025 mode_lib->vba.ScaleRatioAndTapsSupport = true;
4026 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4027 if (mode_lib->vba.ScalerEnabled[k] == false
4028 && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
4029 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
4030 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
4031 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
4032 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
4033 || mode_lib->vba.HRatio[k] != 1.0
4034 || mode_lib->vba.htaps[k] != 1.0
4035 || mode_lib->vba.VRatio[k] != 1.0
4036 || mode_lib->vba.vtaps[k] != 1.0)) {
4037 mode_lib->vba.ScaleRatioAndTapsSupport = false;
4038 } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
4039 || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
4040 || (mode_lib->vba.htaps[k] > 1.0
4041 && (mode_lib->vba.htaps[k] % 2) == 1)
4042 || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
4043 || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
4044 || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
4045 || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
4046 || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
4047 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
4048 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
4049 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
4050 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
4051 && (mode_lib->vba.HRatio[k] / 2.0
4052 > mode_lib->vba.HTAPsChroma[k]
4053 || mode_lib->vba.VRatio[k] / 2.0
4054 > mode_lib->vba.VTAPsChroma[k]))) {
4055 mode_lib->vba.ScaleRatioAndTapsSupport = false;
4056 }
4057 }
4058 /*Source Format, Pixel Format and Scan Support Check*/
4059
4060 mode_lib->vba.SourceFormatPixelAndScanSupport = true;
4061 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4062 if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
4063 && mode_lib->vba.SourceScan[k] != dm_horz)
4064 || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
4065 || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
4066 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
4067 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
4068 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
4069 || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
4070 || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
4071 && mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
4072 || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
4073 && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
4074 || mode_lib->vba.SourcePixelFormat[k]
4075 == dm_420_8
4076 || mode_lib->vba.SourcePixelFormat[k]
4077 == dm_420_10))
4078 || (((mode_lib->vba.SurfaceTiling[k]
4079 == dm_sw_gfx7_2d_thin_gl
4080 || mode_lib->vba.SurfaceTiling[k]
4081 == dm_sw_gfx7_2d_thin_lvp)
4082 && !((mode_lib->vba.SourcePixelFormat[k]
4083 == dm_444_64
4084 || mode_lib->vba.SourcePixelFormat[k]
4085 == dm_444_32)
4086 && mode_lib->vba.SourceScan[k]
4087 == dm_horz
4088 && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
4089 == true
4090 && mode_lib->vba.DCCEnable[k]
4091 == false))
4092 || (mode_lib->vba.DCCEnable[k] == true
4093 && (mode_lib->vba.SurfaceTiling[k]
4094 == dm_sw_linear
4095 || mode_lib->vba.SourcePixelFormat[k]
4096 == dm_420_8
4097 || mode_lib->vba.SourcePixelFormat[k]
4098 == dm_420_10)))) {
4099 mode_lib->vba.SourceFormatPixelAndScanSupport = false;
4100 }
4101 }
4102 /*Bandwidth Support Check*/
4103
4104 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4105 if (mode_lib->vba.SourceScan[k] == dm_horz) {
4106 mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
4107 } else {
4108 mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
4109 }
4110 if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
4111 mode_lib->vba.BytePerPixelInDETY[k] = 8.0;
4112 mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
4113 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
4114 mode_lib->vba.BytePerPixelInDETY[k] = 4.0;
4115 mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
4116 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
4117 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
4118 mode_lib->vba.BytePerPixelInDETY[k] = 2.0;
4119 mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
4120 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
4121 mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
4122 mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
4123 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
4124 mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
4125 mode_lib->vba.BytePerPixelInDETC[k] = 2.0;
4126 } else {
4127 mode_lib->vba.BytePerPixelInDETY[k] = 4.0 / 3;
4128 mode_lib->vba.BytePerPixelInDETC[k] = 8.0 / 3;
4129 }
4130 }
4131 mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond = 0.0;
4132 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4133 mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.SwathWidthYSingleDPP[k]
4134 * (dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
4135 * mode_lib->vba.VRatio[k]
4136 + dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
4137 / 2.0 * mode_lib->vba.VRatio[k] / 2)
4138 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
4139 if (mode_lib->vba.DCCEnable[k] == true) {
4140 mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
4141 * (1 + 1 / 256);
4142 }
4143 if (mode_lib->vba.VirtualMemoryEnable == true
4144 && mode_lib->vba.SourceScan[k] != dm_horz
4145 && (mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s
4146 || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s_x
4147 || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
4148 || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x)) {
4149 mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
4150 * (1 + 1 / 64);
4151 } else if (mode_lib->vba.VirtualMemoryEnable == true
4152 && mode_lib->vba.SourceScan[k] == dm_horz
4153 && (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
4154 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32)
4155 && (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s
4156 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_t
4157 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_x
4158 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
4159 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
4160 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
4161 || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x)) {
4162 mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
4163 * (1 + 1 / 256);
4164 } else if (mode_lib->vba.VirtualMemoryEnable == true) {
4165 mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
4166 * (1 + 1 / 512);
4167 }
4168 mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond =
4169 mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
4170 + mode_lib->vba.ReadBandwidth[k] / 1000.0;
4171 }
4172 mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond = 0.0;
4173 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4174 if (mode_lib->vba.WritebackEnable[k] == true
4175 && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
4176 mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
4177 * mode_lib->vba.WritebackDestinationHeight[k]
4178 / (mode_lib->vba.WritebackSourceHeight[k]
4179 * mode_lib->vba.HTotal[k]
4180 / mode_lib->vba.PixelClock[k]) * 4.0;
4181 } else if (mode_lib->vba.WritebackEnable[k] == true
4182 && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
4183 mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
4184 * mode_lib->vba.WritebackDestinationHeight[k]
4185 / (mode_lib->vba.WritebackSourceHeight[k]
4186 * mode_lib->vba.HTotal[k]
4187 / mode_lib->vba.PixelClock[k]) * 3.0;
4188 } else if (mode_lib->vba.WritebackEnable[k] == true) {
4189 mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
4190 * mode_lib->vba.WritebackDestinationHeight[k]
4191 / (mode_lib->vba.WritebackSourceHeight[k]
4192 * mode_lib->vba.HTotal[k]
4193 / mode_lib->vba.PixelClock[k]) * 1.5;
4194 } else {
4195 mode_lib->vba.WriteBandwidth[k] = 0.0;
4196 }
4197 mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond =
4198 mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond
4199 + mode_lib->vba.WriteBandwidth[k] / 1000.0;
4200 }
4201 mode_lib->vba.TotalBandwidthConsumedGBytePerSecond =
4202 mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
4203 + mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond;
4204 mode_lib->vba.DCCEnabledInAnyPlane = false;
4205 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4206 if (mode_lib->vba.DCCEnable[k] == true) {
4207 mode_lib->vba.DCCEnabledInAnyPlane = true;
4208 }
4209 }
4210 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
4211 mode_lib->vba.FabricAndDRAMBandwidthPerState[i] = dml_min(
4212 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
4213 * mode_lib->vba.DRAMChannelWidth,
4214 mode_lib->vba.FabricClockPerState[i]
4215 * mode_lib->vba.FabricDatapathToDCNDataReturn)
4216 / 1000;
4217 mode_lib->vba.ReturnBWToDCNPerState = dml_min(
4218 mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
4219 mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0)
4220 * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
4221 / 100;
4222 mode_lib->vba.ReturnBWPerState[i] = mode_lib->vba.ReturnBWToDCNPerState;
4223 if (mode_lib->vba.DCCEnabledInAnyPlane == true
4224 && mode_lib->vba.ReturnBWToDCNPerState
4225 > mode_lib->vba.DCFCLKPerState[i]
4226 * mode_lib->vba.ReturnBusWidth
4227 / 4.0) {
4228 mode_lib->vba.ReturnBWPerState[i] =
4229 dml_min(
4230 mode_lib->vba.ReturnBWPerState[i],
4231 mode_lib->vba.ReturnBWToDCNPerState * 4.0
4232 * (1.0
4233 - mode_lib->vba.UrgentLatency
4234 / ((mode_lib->vba.ROBBufferSizeInKByte
4235 - mode_lib->vba.PixelChunkSizeInKByte)
4236 * 1024.0
4237 / (mode_lib->vba.ReturnBWToDCNPerState
4238 - mode_lib->vba.DCFCLKPerState[i]
4239 * mode_lib->vba.ReturnBusWidth
4240 / 4.0)
4241 + mode_lib->vba.UrgentLatency)));
4242 }
4243 mode_lib->vba.CriticalPoint =
4244 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
4245 * mode_lib->vba.UrgentLatency
4246 / (mode_lib->vba.ReturnBWToDCNPerState
4247 * mode_lib->vba.UrgentLatency
4248 + (mode_lib->vba.ROBBufferSizeInKByte
4249 - mode_lib->vba.PixelChunkSizeInKByte)
4250 * 1024.0);
4251 if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
4252 && mode_lib->vba.CriticalPoint < 4.0) {
4253 mode_lib->vba.ReturnBWPerState[i] =
4254 dml_min(
4255 mode_lib->vba.ReturnBWPerState[i],
4256 dml_pow(
4257 4.0
4258 * mode_lib->vba.ReturnBWToDCNPerState
4259 * (mode_lib->vba.ROBBufferSizeInKByte
4260 - mode_lib->vba.PixelChunkSizeInKByte)
4261 * 1024.0
4262 * mode_lib->vba.ReturnBusWidth
4263 * mode_lib->vba.DCFCLKPerState[i]
4264 * mode_lib->vba.UrgentLatency
4265 / (mode_lib->vba.ReturnBWToDCNPerState
4266 * mode_lib->vba.UrgentLatency
4267 + (mode_lib->vba.ROBBufferSizeInKByte
4268 - mode_lib->vba.PixelChunkSizeInKByte)
4269 * 1024.0),
4270 2));
4271 }
4272 mode_lib->vba.ReturnBWToDCNPerState = dml_min(
4273 mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
4274 mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0);
4275 if (mode_lib->vba.DCCEnabledInAnyPlane == true
4276 && mode_lib->vba.ReturnBWToDCNPerState
4277 > mode_lib->vba.DCFCLKPerState[i]
4278 * mode_lib->vba.ReturnBusWidth
4279 / 4.0) {
4280 mode_lib->vba.ReturnBWPerState[i] =
4281 dml_min(
4282 mode_lib->vba.ReturnBWPerState[i],
4283 mode_lib->vba.ReturnBWToDCNPerState * 4.0
4284 * (1.0
4285 - mode_lib->vba.UrgentLatency
4286 / ((mode_lib->vba.ROBBufferSizeInKByte
4287 - mode_lib->vba.PixelChunkSizeInKByte)
4288 * 1024.0
4289 / (mode_lib->vba.ReturnBWToDCNPerState
4290 - mode_lib->vba.DCFCLKPerState[i]
4291 * mode_lib->vba.ReturnBusWidth
4292 / 4.0)
4293 + mode_lib->vba.UrgentLatency)));
4294 }
4295 mode_lib->vba.CriticalPoint =
4296 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
4297 * mode_lib->vba.UrgentLatency
4298 / (mode_lib->vba.ReturnBWToDCNPerState
4299 * mode_lib->vba.UrgentLatency
4300 + (mode_lib->vba.ROBBufferSizeInKByte
4301 - mode_lib->vba.PixelChunkSizeInKByte)
4302 * 1024.0);
4303 if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
4304 && mode_lib->vba.CriticalPoint < 4.0) {
4305 mode_lib->vba.ReturnBWPerState[i] =
4306 dml_min(
4307 mode_lib->vba.ReturnBWPerState[i],
4308 dml_pow(
4309 4.0
4310 * mode_lib->vba.ReturnBWToDCNPerState
4311 * (mode_lib->vba.ROBBufferSizeInKByte
4312 - mode_lib->vba.PixelChunkSizeInKByte)
4313 * 1024.0
4314 * mode_lib->vba.ReturnBusWidth
4315 * mode_lib->vba.DCFCLKPerState[i]
4316 * mode_lib->vba.UrgentLatency
4317 / (mode_lib->vba.ReturnBWToDCNPerState
4318 * mode_lib->vba.UrgentLatency
4319 + (mode_lib->vba.ROBBufferSizeInKByte
4320 - mode_lib->vba.PixelChunkSizeInKByte)
4321 * 1024.0),
4322 2));
4323 }
4324 }
4325 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
4326 if ((mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond * 1000.0
4327 <= mode_lib->vba.ReturnBWPerState[i])
4328 && (mode_lib->vba.TotalBandwidthConsumedGBytePerSecond * 1000.0
4329 <= mode_lib->vba.FabricAndDRAMBandwidthPerState[i]
4330 * 1000.0
4331 * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
4332 / 100.0)) {
4333 mode_lib->vba.BandwidthSupport[i] = true;
4334 } else {
4335 mode_lib->vba.BandwidthSupport[i] = false;
4336 }
4337 }
4338 /*Writeback Latency support check*/
4339
4340 mode_lib->vba.WritebackLatencySupport = true;
4341 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4342 if (mode_lib->vba.WritebackEnable[k] == true) {
4343 if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
4344 if (mode_lib->vba.WriteBandwidth[k]
4345 > (mode_lib->vba.WritebackInterfaceLumaBufferSize
4346 + mode_lib->vba.WritebackInterfaceChromaBufferSize)
4347 / mode_lib->vba.WritebackLatency) {
4348 mode_lib->vba.WritebackLatencySupport = false;
4349 }
4350 } else {
4351 if (mode_lib->vba.WriteBandwidth[k]
4352 > 1.5
4353 * dml_min(
4354 mode_lib->vba.WritebackInterfaceLumaBufferSize,
4355 2.0
4356 * mode_lib->vba.WritebackInterfaceChromaBufferSize)
4357 / mode_lib->vba.WritebackLatency) {
4358 mode_lib->vba.WritebackLatencySupport = false;
4359 }
4360 }
4361 }
4362 }
4363 /*Re-ordering Buffer Support Check*/
4364
4365 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
4366 mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
4367 (mode_lib->vba.RoundTripPingLatencyCycles + 32.0)
4368 / mode_lib->vba.DCFCLKPerState[i]
4369 + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
4370 * mode_lib->vba.NumberOfChannels
4371 / mode_lib->vba.ReturnBWPerState[i];
4372 if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte)
4373 * 1024.0 / mode_lib->vba.ReturnBWPerState[i]
4374 > mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
4375 mode_lib->vba.ROBSupport[i] = true;
4376 } else {
4377 mode_lib->vba.ROBSupport[i] = false;
4378 }
4379 }
4380 /*Writeback Mode Support Check*/
4381
4382 mode_lib->vba.TotalNumberOfActiveWriteback = 0;
4383 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4384 if (mode_lib->vba.WritebackEnable[k] == true) {
4385 mode_lib->vba.TotalNumberOfActiveWriteback =
4386 mode_lib->vba.TotalNumberOfActiveWriteback + 1;
4387 }
4388 }
4389 mode_lib->vba.WritebackModeSupport = true;
4390 if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
4391 mode_lib->vba.WritebackModeSupport = false;
4392 }
4393 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4394 if (mode_lib->vba.WritebackEnable[k] == true
4395 && mode_lib->vba.Writeback10bpc420Supported != true
4396 && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
4397 mode_lib->vba.WritebackModeSupport = false;
4398 }
4399 }
4400 /*Writeback Scale Ratio and Taps Support Check*/
4401
4402 mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
4403 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4404 if (mode_lib->vba.WritebackEnable[k] == true) {
4405 if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
4406 && (mode_lib->vba.WritebackHRatio[k] != 1.0
4407 || mode_lib->vba.WritebackVRatio[k] != 1.0)) {
4408 mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
4409 }
4410 if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
4411 || mode_lib->vba.WritebackVRatio[k]
4412 > mode_lib->vba.WritebackMaxVSCLRatio
4413 || mode_lib->vba.WritebackHRatio[k]
4414 < mode_lib->vba.WritebackMinHSCLRatio
4415 || mode_lib->vba.WritebackVRatio[k]
4416 < mode_lib->vba.WritebackMinVSCLRatio
4417 || mode_lib->vba.WritebackLumaHTaps[k]
4418 > mode_lib->vba.WritebackMaxHSCLTaps
4419 || mode_lib->vba.WritebackLumaVTaps[k]
4420 > mode_lib->vba.WritebackMaxVSCLTaps
4421 || mode_lib->vba.WritebackHRatio[k]
4422 > mode_lib->vba.WritebackLumaHTaps[k]
4423 || mode_lib->vba.WritebackVRatio[k]
4424 > mode_lib->vba.WritebackLumaVTaps[k]
4425 || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
4426 && ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
4427 == 1))
4428 || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
4429 && (mode_lib->vba.WritebackChromaHTaps[k]
4430 > mode_lib->vba.WritebackMaxHSCLTaps
4431 || mode_lib->vba.WritebackChromaVTaps[k]
4432 > mode_lib->vba.WritebackMaxVSCLTaps
4433 || 2.0
4434 * mode_lib->vba.WritebackHRatio[k]
4435 > mode_lib->vba.WritebackChromaHTaps[k]
4436 || 2.0
4437 * mode_lib->vba.WritebackVRatio[k]
4438 > mode_lib->vba.WritebackChromaVTaps[k]
4439 || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
4440 && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
4441 mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
4442 }
4443 if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
4444 mode_lib->vba.WritebackLumaVExtra =
4445 dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
4446 } else {
4447 mode_lib->vba.WritebackLumaVExtra = -1;
4448 }
4449 if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
4450 && mode_lib->vba.WritebackLumaVTaps[k]
4451 > (mode_lib->vba.WritebackLineBufferLumaBufferSize
4452 + mode_lib->vba.WritebackLineBufferChromaBufferSize)
4453 / 3.0
4454 / mode_lib->vba.WritebackDestinationWidth[k]
4455 - mode_lib->vba.WritebackLumaVExtra)
4456 || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
4457 && mode_lib->vba.WritebackLumaVTaps[k]
4458 > mode_lib->vba.WritebackLineBufferLumaBufferSize
4459 / mode_lib->vba.WritebackDestinationWidth[k]
4460 - mode_lib->vba.WritebackLumaVExtra)
4461 || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
4462 && mode_lib->vba.WritebackLumaVTaps[k]
4463 > mode_lib->vba.WritebackLineBufferLumaBufferSize
4464 * 8.0 / 10.0
4465 / mode_lib->vba.WritebackDestinationWidth[k]
4466 - mode_lib->vba.WritebackLumaVExtra)) {
4467 mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
4468 }
4469 if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
4470 mode_lib->vba.WritebackChromaVExtra = 0.0;
4471 } else {
4472 mode_lib->vba.WritebackChromaVExtra = -1;
4473 }
4474 if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
4475 && mode_lib->vba.WritebackChromaVTaps[k]
4476 > mode_lib->vba.WritebackLineBufferChromaBufferSize
4477 / mode_lib->vba.WritebackDestinationWidth[k]
4478 - mode_lib->vba.WritebackChromaVExtra)
4479 || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
4480 && mode_lib->vba.WritebackChromaVTaps[k]
4481 > mode_lib->vba.WritebackLineBufferChromaBufferSize
4482 * 8.0 / 10.0
4483 / mode_lib->vba.WritebackDestinationWidth[k]
4484 - mode_lib->vba.WritebackChromaVExtra)) {
4485 mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
4486 }
4487 }
4488 }
4489 /*Maximum DISPCLK/DPPCLK Support check*/
4490
4491 mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
4492 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4493 if (mode_lib->vba.WritebackEnable[k] == true) {
4494 mode_lib->vba.WritebackRequiredDISPCLK =
4495 dml_max(
4496 mode_lib->vba.WritebackRequiredDISPCLK,
4497 CalculateWriteBackDISPCLK(
4498 mode_lib->vba.WritebackPixelFormat[k],
4499 mode_lib->vba.PixelClock[k],
4500 mode_lib->vba.WritebackHRatio[k],
4501 mode_lib->vba.WritebackVRatio[k],
4502 mode_lib->vba.WritebackLumaHTaps[k],
4503 mode_lib->vba.WritebackLumaVTaps[k],
4504 mode_lib->vba.WritebackChromaHTaps[k],
4505 mode_lib->vba.WritebackChromaVTaps[k],
4506 mode_lib->vba.WritebackDestinationWidth[k],
4507 mode_lib->vba.HTotal[k],
4508 mode_lib->vba.WritebackChromaLineBufferWidth));
4509 }
4510 }
4511 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4512 if (mode_lib->vba.HRatio[k] > 1.0) {
4513 mode_lib->vba.PSCL_FACTOR[k] = dml_min(
4514 mode_lib->vba.MaxDCHUBToPSCLThroughput,
4515 mode_lib->vba.MaxPSCLToLBThroughput
4516 * mode_lib->vba.HRatio[k]
4517 / dml_ceil(
4518 mode_lib->vba.htaps[k]
4519 / 6.0,
4520 1.0));
4521 } else {
4522 mode_lib->vba.PSCL_FACTOR[k] = dml_min(
4523 mode_lib->vba.MaxDCHUBToPSCLThroughput,
4524 mode_lib->vba.MaxPSCLToLBThroughput);
4525 }
4526 if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
4527 mode_lib->vba.PSCL_FACTOR_CHROMA[k] = 0.0;
4528 mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
4529 mode_lib->vba.PixelClock[k]
4530 * dml_max3(
4531 mode_lib->vba.vtaps[k] / 6.0
4532 * dml_min(
4533 1.0,
4534 mode_lib->vba.HRatio[k]),
4535 mode_lib->vba.HRatio[k]
4536 * mode_lib->vba.VRatio[k]
4537 / mode_lib->vba.PSCL_FACTOR[k],
4538 1.0);
4539 if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
4540 && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4541 < 2.0 * mode_lib->vba.PixelClock[k]) {
4542 mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
4543 * mode_lib->vba.PixelClock[k];
4544 }
4545 } else {
4546 if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
4547 mode_lib->vba.PSCL_FACTOR_CHROMA[k] =
4548 dml_min(
4549 mode_lib->vba.MaxDCHUBToPSCLThroughput,
4550 mode_lib->vba.MaxPSCLToLBThroughput
4551 * mode_lib->vba.HRatio[k]
4552 / 2.0
4553 / dml_ceil(
4554 mode_lib->vba.HTAPsChroma[k]
4555 / 6.0,
4556 1.0));
4557 } else {
4558 mode_lib->vba.PSCL_FACTOR_CHROMA[k] = dml_min(
4559 mode_lib->vba.MaxDCHUBToPSCLThroughput,
4560 mode_lib->vba.MaxPSCLToLBThroughput);
4561 }
4562 mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
4563 mode_lib->vba.PixelClock[k]
4564 * dml_max5(
4565 mode_lib->vba.vtaps[k] / 6.0
4566 * dml_min(
4567 1.0,
4568 mode_lib->vba.HRatio[k]),
4569 mode_lib->vba.HRatio[k]
4570 * mode_lib->vba.VRatio[k]
4571 / mode_lib->vba.PSCL_FACTOR[k],
4572 mode_lib->vba.VTAPsChroma[k]
4573 / 6.0
4574 * dml_min(
4575 1.0,
4576 mode_lib->vba.HRatio[k]
4577 / 2.0),
4578 mode_lib->vba.HRatio[k]
4579 * mode_lib->vba.VRatio[k]
4580 / 4.0
4581 / mode_lib->vba.PSCL_FACTOR_CHROMA[k],
4582 1.0);
4583 if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
4584 || mode_lib->vba.HTAPsChroma[k] > 6.0
4585 || mode_lib->vba.VTAPsChroma[k] > 6.0)
4586 && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4587 < 2.0 * mode_lib->vba.PixelClock[k]) {
4588 mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
4589 * mode_lib->vba.PixelClock[k];
4590 }
4591 }
4592 }
4593 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4594 Calculate256BBlockSizes(
4595 mode_lib->vba.SourcePixelFormat[k],
4596 mode_lib->vba.SurfaceTiling[k],
4597 dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
4598 dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
4599 &mode_lib->vba.Read256BlockHeightY[k],
4600 &mode_lib->vba.Read256BlockHeightC[k],
4601 &mode_lib->vba.Read256BlockWidthY[k],
4602 &mode_lib->vba.Read256BlockWidthC[k]);
4603 if (mode_lib->vba.SourceScan[k] == dm_horz) {
4604 mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockHeightY[k];
4605 mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockHeightC[k];
4606 } else {
4607 mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockWidthY[k];
4608 mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockWidthC[k];
4609 }
4610 if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
4611 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
4612 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
4613 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
4614 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
4615 if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
4616 || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
4617 && (mode_lib->vba.SurfaceTiling[k]
4618 == dm_sw_4kb_s
4619 || mode_lib->vba.SurfaceTiling[k]
4620 == dm_sw_4kb_s_x
4621 || mode_lib->vba.SurfaceTiling[k]
4622 == dm_sw_64kb_s
4623 || mode_lib->vba.SurfaceTiling[k]
4624 == dm_sw_64kb_s_t
4625 || mode_lib->vba.SurfaceTiling[k]
4626 == dm_sw_64kb_s_x
4627 || mode_lib->vba.SurfaceTiling[k]
4628 == dm_sw_var_s
4629 || mode_lib->vba.SurfaceTiling[k]
4630 == dm_sw_var_s_x)
4631 && mode_lib->vba.SourceScan[k] == dm_horz)) {
4632 mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
4633 } else {
4634 mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
4635 / 2.0;
4636 }
4637 mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
4638 } else {
4639 if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
4640 mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
4641 mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
4642 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
4643 && mode_lib->vba.SourceScan[k] == dm_horz) {
4644 mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
4645 / 2.0;
4646 mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
4647 } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
4648 && mode_lib->vba.SourceScan[k] == dm_horz) {
4649 mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k]
4650 / 2.0;
4651 mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
4652 } else {
4653 mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
4654 mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
4655 }
4656 }
4657 if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
4658 mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
4659 } else {
4660 mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
4661 }
4662 mode_lib->vba.MaximumSwathWidthInDETBuffer =
4663 dml_min(
4664 mode_lib->vba.MaximumSwathWidthSupport,
4665 mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0
4666 / (mode_lib->vba.BytePerPixelInDETY[k]
4667 * mode_lib->vba.MinSwathHeightY[k]
4668 + mode_lib->vba.BytePerPixelInDETC[k]
4669 / 2.0
4670 * mode_lib->vba.MinSwathHeightC[k]));
4671 if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
4672 mode_lib->vba.MaximumSwathWidthInLineBuffer =
4673 mode_lib->vba.LineBufferSize
4674 * dml_max(mode_lib->vba.HRatio[k], 1.0)
4675 / mode_lib->vba.LBBitPerPixel[k]
4676 / (mode_lib->vba.vtaps[k]
4677 + dml_max(
4678 dml_ceil(
4679 mode_lib->vba.VRatio[k],
4680 1.0)
4681 - 2,
4682 0.0));
4683 } else {
4684 mode_lib->vba.MaximumSwathWidthInLineBuffer =
4685 dml_min(
4686 mode_lib->vba.LineBufferSize
4687 * dml_max(
4688 mode_lib->vba.HRatio[k],
4689 1.0)
4690 / mode_lib->vba.LBBitPerPixel[k]
4691 / (mode_lib->vba.vtaps[k]
4692 + dml_max(
4693 dml_ceil(
4694 mode_lib->vba.VRatio[k],
4695 1.0)
4696 - 2,
4697 0.0)),
4698 2.0 * mode_lib->vba.LineBufferSize
4699 * dml_max(
4700 mode_lib->vba.HRatio[k]
4701 / 2.0,
4702 1.0)
4703 / mode_lib->vba.LBBitPerPixel[k]
4704 / (mode_lib->vba.VTAPsChroma[k]
4705 + dml_max(
4706 dml_ceil(
4707 mode_lib->vba.VRatio[k]
4708 / 2.0,
4709 1.0)
4710 - 2,
4711 0.0)));
4712 }
4713 mode_lib->vba.MaximumSwathWidth[k] = dml_min(
4714 mode_lib->vba.MaximumSwathWidthInDETBuffer,
4715 mode_lib->vba.MaximumSwathWidthInLineBuffer);
4716 }
4717 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
4718 mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
4719 mode_lib->vba.MaxDispclk[i],
4720 mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
4721 mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
4722 mode_lib->vba.MaxDppclk[i],
4723 mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
4724 mode_lib->vba.RequiredDISPCLK[i] = 0.0;
4725 mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
4726 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4727 mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
4728 mode_lib->vba.PixelClock[k]
4729 * (1.0
4730 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4731 / 100.0)
4732 * (1.0
4733 + mode_lib->vba.DISPCLKRampingMargin
4734 / 100.0);
4735 if (mode_lib->vba.ODMCapability == true
4736 && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
4737 > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
4738 mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
4739 mode_lib->vba.PlaneRequiredDISPCLK =
4740 mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
4741 / 2.0;
4742 } else {
4743 mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
4744 mode_lib->vba.PlaneRequiredDISPCLK =
4745 mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
4746 }
4747 if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4748 * (1.0
4749 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4750 / 100.0)
4751 <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
4752 && mode_lib->vba.SwathWidthYSingleDPP[k]
4753 <= mode_lib->vba.MaximumSwathWidth[k]
4754 && mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
4755 mode_lib->vba.NoOfDPP[i][k] = 1;
4756 mode_lib->vba.RequiredDPPCLK[i][k] =
4757 mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4758 * (1.0
4759 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4760 / 100.0);
4761 } else {
4762 mode_lib->vba.NoOfDPP[i][k] = 2;
4763 mode_lib->vba.RequiredDPPCLK[i][k] =
4764 mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4765 * (1.0
4766 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4767 / 100.0)
4768 / 2.0;
4769 }
4770 mode_lib->vba.RequiredDISPCLK[i] = dml_max(
4771 mode_lib->vba.RequiredDISPCLK[i],
4772 mode_lib->vba.PlaneRequiredDISPCLK);
4773 if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k] / mode_lib->vba.NoOfDPP[i][k]
4774 * (1.0
4775 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4776 / 100.0)
4777 > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
4778 || (mode_lib->vba.PlaneRequiredDISPCLK
4779 > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
4780 mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
4781 }
4782 }
4783 mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
4784 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4785 mode_lib->vba.TotalNumberOfActiveDPP[i] =
4786 mode_lib->vba.TotalNumberOfActiveDPP[i]
4787 + mode_lib->vba.NoOfDPP[i][k];
4788 }
4789 if ((mode_lib->vba.MaxDispclk[i] == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
4790 && mode_lib->vba.MaxDppclk[i]
4791 == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])
4792 && (mode_lib->vba.TotalNumberOfActiveDPP[i]
4793 > mode_lib->vba.MaxNumDPP
4794 || mode_lib->vba.DISPCLK_DPPCLK_Support[i] == false)) {
4795 mode_lib->vba.RequiredDISPCLK[i] = 0.0;
4796 mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
4797 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4798 mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
4799 mode_lib->vba.PixelClock[k]
4800 * (1.0
4801 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4802 / 100.0);
4803 if (mode_lib->vba.ODMCapability == true
4804 && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
4805 > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
4806 mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
4807 mode_lib->vba.PlaneRequiredDISPCLK =
4808 mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
4809 / 2.0;
4810 } else {
4811 mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
4812 mode_lib->vba.PlaneRequiredDISPCLK =
4813 mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
4814 }
4815 if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4816 * (1.0
4817 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4818 / 100.0)
4819 <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
4820 && mode_lib->vba.SwathWidthYSingleDPP[k]
4821 <= mode_lib->vba.MaximumSwathWidth[k]
4822 && mode_lib->vba.ODMCombineEnablePerState[i][k]
4823 == false) {
4824 mode_lib->vba.NoOfDPP[i][k] = 1;
4825 mode_lib->vba.RequiredDPPCLK[i][k] =
4826 mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4827 * (1.0
4828 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4829 / 100.0);
4830 } else {
4831 mode_lib->vba.NoOfDPP[i][k] = 2;
4832 mode_lib->vba.RequiredDPPCLK[i][k] =
4833 mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4834 * (1.0
4835 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4836 / 100.0)
4837 / 2.0;
4838 }
4839 mode_lib->vba.RequiredDISPCLK[i] = dml_max(
4840 mode_lib->vba.RequiredDISPCLK[i],
4841 mode_lib->vba.PlaneRequiredDISPCLK);
4842 if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4843 / mode_lib->vba.NoOfDPP[i][k]
4844 * (1.0
4845 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4846 / 100.0)
4847 > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
4848 || (mode_lib->vba.PlaneRequiredDISPCLK
4849 > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
4850 mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
4851 }
4852 }
4853 mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
4854 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4855 mode_lib->vba.TotalNumberOfActiveDPP[i] =
4856 mode_lib->vba.TotalNumberOfActiveDPP[i]
4857 + mode_lib->vba.NoOfDPP[i][k];
4858 }
4859 }
4860 if (mode_lib->vba.TotalNumberOfActiveDPP[i] > mode_lib->vba.MaxNumDPP) {
4861 mode_lib->vba.RequiredDISPCLK[i] = 0.0;
4862 mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
4863 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4864 mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
4865 if (mode_lib->vba.SwathWidthYSingleDPP[k]
4866 <= mode_lib->vba.MaximumSwathWidth[k]) {
4867 mode_lib->vba.NoOfDPP[i][k] = 1;
4868 mode_lib->vba.RequiredDPPCLK[i][k] =
4869 mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4870 * (1.0
4871 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4872 / 100.0);
4873 } else {
4874 mode_lib->vba.NoOfDPP[i][k] = 2;
4875 mode_lib->vba.RequiredDPPCLK[i][k] =
4876 mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4877 * (1.0
4878 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4879 / 100.0)
4880 / 2.0;
4881 }
4882 if (!(mode_lib->vba.MaxDispclk[i]
4883 == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
4884 && mode_lib->vba.MaxDppclk[i]
4885 == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])) {
4886 mode_lib->vba.PlaneRequiredDISPCLK =
4887 mode_lib->vba.PixelClock[k]
4888 * (1.0
4889 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4890 / 100.0)
4891 * (1.0
4892 + mode_lib->vba.DISPCLKRampingMargin
4893 / 100.0);
4894 } else {
4895 mode_lib->vba.PlaneRequiredDISPCLK =
4896 mode_lib->vba.PixelClock[k]
4897 * (1.0
4898 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4899 / 100.0);
4900 }
4901 mode_lib->vba.RequiredDISPCLK[i] = dml_max(
4902 mode_lib->vba.RequiredDISPCLK[i],
4903 mode_lib->vba.PlaneRequiredDISPCLK);
4904 if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
4905 / mode_lib->vba.NoOfDPP[i][k]
4906 * (1.0
4907 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
4908 / 100.0)
4909 > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
4910 || (mode_lib->vba.PlaneRequiredDISPCLK
4911 > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
4912 mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
4913 }
4914 }
4915 mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
4916 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4917 mode_lib->vba.TotalNumberOfActiveDPP[i] =
4918 mode_lib->vba.TotalNumberOfActiveDPP[i]
4919 + mode_lib->vba.NoOfDPP[i][k];
4920 }
4921 }
4922 mode_lib->vba.RequiredDISPCLK[i] = dml_max(
4923 mode_lib->vba.RequiredDISPCLK[i],
4924 mode_lib->vba.WritebackRequiredDISPCLK);
4925 if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
4926 < mode_lib->vba.WritebackRequiredDISPCLK) {
4927 mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
4928 }
4929 }
4930 /*Viewport Size Check*/
4931
4932 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
4933 mode_lib->vba.ViewportSizeSupport[i] = true;
4934 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4935 if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
4936 if (dml_min(mode_lib->vba.SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
4937 > mode_lib->vba.MaximumSwathWidth[k]) {
4938 mode_lib->vba.ViewportSizeSupport[i] = false;
4939 }
4940 } else {
4941 if (mode_lib->vba.SwathWidthYSingleDPP[k] / 2.0
4942 > mode_lib->vba.MaximumSwathWidth[k]) {
4943 mode_lib->vba.ViewportSizeSupport[i] = false;
4944 }
4945 }
4946 }
4947 }
4948 /*Total Available Pipes Support Check*/
4949
4950 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
4951 if (mode_lib->vba.TotalNumberOfActiveDPP[i] <= mode_lib->vba.MaxNumDPP) {
4952 mode_lib->vba.TotalAvailablePipesSupport[i] = true;
4953 } else {
4954 mode_lib->vba.TotalAvailablePipesSupport[i] = false;
4955 }
4956 }
4957 /*Total Available OTG Support Check*/
4958
4959 mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
4960 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4961 if (mode_lib->vba.BlendingAndTiming[k] == k) {
4962 mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
4963 + 1.0;
4964 }
4965 }
4966 if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
4967 mode_lib->vba.NumberOfOTGSupport = true;
4968 } else {
4969 mode_lib->vba.NumberOfOTGSupport = false;
4970 }
4971 /*Display IO and DSC Support Check*/
4972
4973 mode_lib->vba.NonsupportedDSCInputBPC = false;
4974 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4975 if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
4976 || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
4977 || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
4978 mode_lib->vba.NonsupportedDSCInputBPC = true;
4979 }
4980 }
4981 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
4982 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4983 mode_lib->vba.RequiresDSC[i][k] = 0;
4984 mode_lib->vba.RequiresFEC[i][k] = 0;
4985 if (mode_lib->vba.BlendingAndTiming[k] == k) {
4986 if (mode_lib->vba.Output[k] == dm_hdmi) {
4987 mode_lib->vba.RequiresDSC[i][k] = 0;
4988 mode_lib->vba.RequiresFEC[i][k] = 0;
4989 mode_lib->vba.OutputBppPerState[i][k] =
4990 TruncToValidBPP(dml_min(600.0, mode_lib->vba.PHYCLKPerState[i])
4991 / mode_lib->vba.PixelClockBackEnd[k] * 24,
4992 false,
4993 mode_lib->vba.Output[k],
4994 mode_lib->vba.OutputFormat[k],
4995 mode_lib->vba.DSCInputBitPerComponent[k]);
4996 } else if (mode_lib->vba.Output[k] == dm_dp
4997 || mode_lib->vba.Output[k] == dm_edp) {
4998 if (mode_lib->vba.Output[k] == dm_edp) {
4999 mode_lib->vba.EffectiveFECOverhead = 0.0;
5000 } else {
5001 mode_lib->vba.EffectiveFECOverhead =
5002 mode_lib->vba.FECOverhead;
5003 }
5004 if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
5005 mode_lib->vba.Outbpp =
5006 TruncToValidBPP((1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
5007 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
5008 false,
5009 mode_lib->vba.Output[k],
5010 mode_lib->vba.OutputFormat[k],
5011 mode_lib->vba.DSCInputBitPerComponent[k]);
5012 mode_lib->vba.OutbppDSC =
5013 TruncToValidBPP((1.0 - mode_lib->vba.Downspreading / 100.0)
5014 * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
5015 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
5016 true,
5017 mode_lib->vba.Output[k],
5018 mode_lib->vba.OutputFormat[k],
5019 mode_lib->vba.DSCInputBitPerComponent[k]);
5020 if (mode_lib->vba.DSCEnabled[k] == true) {
5021 mode_lib->vba.RequiresDSC[i][k] = true;
5022 if (mode_lib->vba.Output[k] == dm_dp) {
5023 mode_lib->vba.RequiresFEC[i][k] =
5024 true;
5025 } else {
5026 mode_lib->vba.RequiresFEC[i][k] =
5027 false;
5028 }
5029 mode_lib->vba.Outbpp =
5030 mode_lib->vba.OutbppDSC;
5031 } else {
5032 mode_lib->vba.RequiresDSC[i][k] = false;
5033 mode_lib->vba.RequiresFEC[i][k] = false;
5034 }
5035 mode_lib->vba.OutputBppPerState[i][k] =
5036 mode_lib->vba.Outbpp;
5037 }
5038 if (mode_lib->vba.Outbpp == BPP_INVALID) {
5039 mode_lib->vba.Outbpp =
5040 TruncToValidBPP((1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
5041 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
5042 false,
5043 mode_lib->vba.Output[k],
5044 mode_lib->vba.OutputFormat[k],
5045 mode_lib->vba.DSCInputBitPerComponent[k]);
5046 mode_lib->vba.OutbppDSC =
5047 TruncToValidBPP((1.0 - mode_lib->vba.Downspreading / 100.0)
5048 * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
5049 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
5050 true,
5051 mode_lib->vba.Output[k],
5052 mode_lib->vba.OutputFormat[k],
5053 mode_lib->vba.DSCInputBitPerComponent[k]);
5054 if (mode_lib->vba.DSCEnabled[k] == true) {
5055 mode_lib->vba.RequiresDSC[i][k] = true;
5056 if (mode_lib->vba.Output[k] == dm_dp) {
5057 mode_lib->vba.RequiresFEC[i][k] =
5058 true;
5059 } else {
5060 mode_lib->vba.RequiresFEC[i][k] =
5061 false;
5062 }
5063 mode_lib->vba.Outbpp =
5064 mode_lib->vba.OutbppDSC;
5065 } else {
5066 mode_lib->vba.RequiresDSC[i][k] = false;
5067 mode_lib->vba.RequiresFEC[i][k] = false;
5068 }
5069 mode_lib->vba.OutputBppPerState[i][k] =
5070 mode_lib->vba.Outbpp;
5071 }
5072 if (mode_lib->vba.Outbpp == BPP_INVALID
5073 && mode_lib->vba.PHYCLKPerState[i]
5074 >= 810.0) {
5075 mode_lib->vba.Outbpp =
5076 TruncToValidBPP((1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
5077 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
5078 false,
5079 mode_lib->vba.Output[k],
5080 mode_lib->vba.OutputFormat[k],
5081 mode_lib->vba.DSCInputBitPerComponent[k]);
5082 mode_lib->vba.OutbppDSC =
5083 TruncToValidBPP((1.0 - mode_lib->vba.Downspreading / 100.0)
5084 * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
5085 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
5086 true,
5087 mode_lib->vba.Output[k],
5088 mode_lib->vba.OutputFormat[k],
5089 mode_lib->vba.DSCInputBitPerComponent[k]);
5090 if (mode_lib->vba.DSCEnabled[k] == true
5091 || mode_lib->vba.Outbpp == BPP_INVALID) {
5092 mode_lib->vba.RequiresDSC[i][k] = true;
5093 if (mode_lib->vba.Output[k] == dm_dp) {
5094 mode_lib->vba.RequiresFEC[i][k] =
5095 true;
5096 } else {
5097 mode_lib->vba.RequiresFEC[i][k] =
5098 false;
5099 }
5100 mode_lib->vba.Outbpp =
5101 mode_lib->vba.OutbppDSC;
5102 } else {
5103 mode_lib->vba.RequiresDSC[i][k] = false;
5104 mode_lib->vba.RequiresFEC[i][k] = false;
5105 }
5106 mode_lib->vba.OutputBppPerState[i][k] =
5107 mode_lib->vba.Outbpp;
5108 }
5109 }
5110 } else {
5111 mode_lib->vba.OutputBppPerState[i][k] = BPP_BLENDED_PIPE;
5112 }
5113 }
5114 }
5115 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5116 mode_lib->vba.DIOSupport[i] = true;
5117 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5118 if (mode_lib->vba.OutputBppPerState[i][k] == BPP_INVALID
5119 || (mode_lib->vba.OutputFormat[k] == dm_420
5120 && mode_lib->vba.ProgressiveToInterlaceUnitInOPP
5121 == true)) {
5122 mode_lib->vba.DIOSupport[i] = false;
5123 }
5124 }
5125 }
5126 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5127 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5128 mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
5129 if (mode_lib->vba.BlendingAndTiming[k] == k) {
5130 if ((mode_lib->vba.Output[k] == dm_dp
5131 || mode_lib->vba.Output[k] == dm_edp)) {
5132 if (mode_lib->vba.OutputFormat[k] == dm_420
5133 || mode_lib->vba.OutputFormat[k]
5134 == dm_n422) {
5135 mode_lib->vba.DSCFormatFactor = 2;
5136 } else {
5137 mode_lib->vba.DSCFormatFactor = 1;
5138 }
5139 if (mode_lib->vba.RequiresDSC[i][k] == true) {
5140 if (mode_lib->vba.ODMCombineEnablePerState[i][k]
5141 == true) {
5142 if (mode_lib->vba.PixelClockBackEnd[k] / 6.0
5143 / mode_lib->vba.DSCFormatFactor
5144 > (1.0
5145 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
5146 / 100.0)
5147 * mode_lib->vba.MaxDSCCLK[i]) {
5148 mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
5149 true;
5150 }
5151 } else {
5152 if (mode_lib->vba.PixelClockBackEnd[k] / 3.0
5153 / mode_lib->vba.DSCFormatFactor
5154 > (1.0
5155 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
5156 / 100.0)
5157 * mode_lib->vba.MaxDSCCLK[i]) {
5158 mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
5159 true;
5160 }
5161 }
5162 }
5163 }
5164 }
5165 }
5166 }
5167 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5168 mode_lib->vba.NotEnoughDSCUnits[i] = false;
5169 mode_lib->vba.TotalDSCUnitsRequired = 0.0;
5170 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5171 if (mode_lib->vba.RequiresDSC[i][k] == true) {
5172 if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
5173 mode_lib->vba.TotalDSCUnitsRequired =
5174 mode_lib->vba.TotalDSCUnitsRequired + 2.0;
5175 } else {
5176 mode_lib->vba.TotalDSCUnitsRequired =
5177 mode_lib->vba.TotalDSCUnitsRequired + 1.0;
5178 }
5179 }
5180 }
5181 if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
5182 mode_lib->vba.NotEnoughDSCUnits[i] = true;
5183 }
5184 }
5185 /*DSC Delay per state*/
5186
5187 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5188 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5189 if (mode_lib->vba.BlendingAndTiming[k] != k) {
5190 mode_lib->vba.slices = 0;
5191 } else if (mode_lib->vba.RequiresDSC[i][k] == 0
5192 || mode_lib->vba.RequiresDSC[i][k] == false) {
5193 mode_lib->vba.slices = 0;
5194 } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
5195 mode_lib->vba.slices = dml_ceil(
5196 mode_lib->vba.PixelClockBackEnd[k] / 400.0,
5197 4.0);
5198 } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
5199 mode_lib->vba.slices = 8.0;
5200 } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
5201 mode_lib->vba.slices = 4.0;
5202 } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
5203 mode_lib->vba.slices = 2.0;
5204 } else {
5205 mode_lib->vba.slices = 1.0;
5206 }
5207 if (mode_lib->vba.OutputBppPerState[i][k] == BPP_BLENDED_PIPE
5208 || mode_lib->vba.OutputBppPerState[i][k] == BPP_INVALID) {
5209 mode_lib->vba.bpp = 0.0;
5210 } else {
5211 mode_lib->vba.bpp = mode_lib->vba.OutputBppPerState[i][k];
5212 }
5213 if (mode_lib->vba.RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
5214 if (mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
5215 mode_lib->vba.DSCDelayPerState[i][k] =
5216 dscceComputeDelay(
5217 mode_lib->vba.DSCInputBitPerComponent[k],
5218 mode_lib->vba.bpp,
5219 dml_ceil(
5220 mode_lib->vba.HActive[k]
5221 / mode_lib->vba.slices,
5222 1.0),
5223 mode_lib->vba.slices,
5224 mode_lib->vba.OutputFormat[k])
5225 + dscComputeDelay(
5226 mode_lib->vba.OutputFormat[k]);
5227 } else {
5228 mode_lib->vba.DSCDelayPerState[i][k] =
5229 2.0
5230 * (dscceComputeDelay(
5231 mode_lib->vba.DSCInputBitPerComponent[k],
5232 mode_lib->vba.bpp,
5233 dml_ceil(
5234 mode_lib->vba.HActive[k]
5235 / mode_lib->vba.slices,
5236 1.0),
5237 mode_lib->vba.slices
5238 / 2,
5239 mode_lib->vba.OutputFormat[k])
5240 + dscComputeDelay(
5241 mode_lib->vba.OutputFormat[k]));
5242 }
5243 mode_lib->vba.DSCDelayPerState[i][k] =
5244 mode_lib->vba.DSCDelayPerState[i][k]
5245 * mode_lib->vba.PixelClock[k]
5246 / mode_lib->vba.PixelClockBackEnd[k];
5247 } else {
5248 mode_lib->vba.DSCDelayPerState[i][k] = 0.0;
5249 }
5250 }
5251 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5252 for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
5253 if (mode_lib->vba.BlendingAndTiming[k] == j
5254 && mode_lib->vba.RequiresDSC[i][j] == true) {
5255 mode_lib->vba.DSCDelayPerState[i][k] =
5256 mode_lib->vba.DSCDelayPerState[i][j];
5257 }
5258 }
5259 }
5260 }
5261 /*Urgent Latency Support Check*/
5262
5263 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5264 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5265 if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
5266 mode_lib->vba.SwathWidthYPerState[i][k] =
5267 dml_min(
5268 mode_lib->vba.SwathWidthYSingleDPP[k],
5269 dml_round(
5270 mode_lib->vba.HActive[k]
5271 / 2.0
5272 * mode_lib->vba.HRatio[k]));
5273 } else {
5274 mode_lib->vba.SwathWidthYPerState[i][k] =
5275 mode_lib->vba.SwathWidthYSingleDPP[k]
5276 / mode_lib->vba.NoOfDPP[i][k];
5277 }
5278 mode_lib->vba.SwathWidthGranularityY = 256.0
5279 / dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
5280 / mode_lib->vba.MaxSwathHeightY[k];
5281 mode_lib->vba.RoundedUpMaxSwathSizeBytesY = (dml_ceil(
5282 mode_lib->vba.SwathWidthYPerState[i][k] - 1.0,
5283 mode_lib->vba.SwathWidthGranularityY)
5284 + mode_lib->vba.SwathWidthGranularityY)
5285 * mode_lib->vba.BytePerPixelInDETY[k]
5286 * mode_lib->vba.MaxSwathHeightY[k];
5287 if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
5288 mode_lib->vba.RoundedUpMaxSwathSizeBytesY = dml_ceil(
5289 mode_lib->vba.RoundedUpMaxSwathSizeBytesY,
5290 256.0) + 256;
5291 }
5292 if (mode_lib->vba.MaxSwathHeightC[k] > 0.0) {
5293 mode_lib->vba.SwathWidthGranularityC = 256.0
5294 / dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
5295 / mode_lib->vba.MaxSwathHeightC[k];
5296 mode_lib->vba.RoundedUpMaxSwathSizeBytesC = (dml_ceil(
5297 mode_lib->vba.SwathWidthYPerState[i][k] / 2.0 - 1.0,
5298 mode_lib->vba.SwathWidthGranularityC)
5299 + mode_lib->vba.SwathWidthGranularityC)
5300 * mode_lib->vba.BytePerPixelInDETC[k]
5301 * mode_lib->vba.MaxSwathHeightC[k];
5302 if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
5303 mode_lib->vba.RoundedUpMaxSwathSizeBytesC = dml_ceil(
5304 mode_lib->vba.RoundedUpMaxSwathSizeBytesC,
5305 256.0) + 256;
5306 }
5307 } else {
5308 mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0;
5309 }
5310 if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY
5311 + mode_lib->vba.RoundedUpMaxSwathSizeBytesC
5312 <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
5313 mode_lib->vba.SwathHeightYPerState[i][k] =
5314 mode_lib->vba.MaxSwathHeightY[k];
5315 mode_lib->vba.SwathHeightCPerState[i][k] =
5316 mode_lib->vba.MaxSwathHeightC[k];
5317 } else {
5318 mode_lib->vba.SwathHeightYPerState[i][k] =
5319 mode_lib->vba.MinSwathHeightY[k];
5320 mode_lib->vba.SwathHeightCPerState[i][k] =
5321 mode_lib->vba.MinSwathHeightC[k];
5322 }
5323 if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
5324 mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
5325 * 1024.0 / mode_lib->vba.BytePerPixelInDETY[k]
5326 / mode_lib->vba.SwathWidthYPerState[i][k];
5327 mode_lib->vba.LinesInDETChroma = 0.0;
5328 } else if (mode_lib->vba.SwathHeightYPerState[i][k]
5329 <= mode_lib->vba.SwathHeightCPerState[i][k]) {
5330 mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
5331 * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETY[k]
5332 / mode_lib->vba.SwathWidthYPerState[i][k];
5333 mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
5334 * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETC[k]
5335 / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
5336 } else {
5337 mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
5338 * 1024.0 * 2.0 / 3.0
5339 / mode_lib->vba.BytePerPixelInDETY[k]
5340 / mode_lib->vba.SwathWidthYPerState[i][k];
5341 mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
5342 * 1024.0 / 3.0 / mode_lib->vba.BytePerPixelInDETY[k]
5343 / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
5344 }
5345 mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma =
5346 dml_min(
5347 mode_lib->vba.MaxLineBufferLines,
5348 dml_floor(
5349 mode_lib->vba.LineBufferSize
5350 / mode_lib->vba.LBBitPerPixel[k]
5351 / (mode_lib->vba.SwathWidthYPerState[i][k]
5352 / dml_max(
5353 mode_lib->vba.HRatio[k],
5354 1.0)),
5355 1.0))
5356 - (mode_lib->vba.vtaps[k] - 1.0);
5357 mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma =
5358 dml_min(
5359 mode_lib->vba.MaxLineBufferLines,
5360 dml_floor(
5361 mode_lib->vba.LineBufferSize
5362 / mode_lib->vba.LBBitPerPixel[k]
5363 / (mode_lib->vba.SwathWidthYPerState[i][k]
5364 / 2.0
5365 / dml_max(
5366 mode_lib->vba.HRatio[k]
5367 / 2.0,
5368 1.0)),
5369 1.0))
5370 - (mode_lib->vba.VTAPsChroma[k] - 1.0);
5371 mode_lib->vba.EffectiveDETLBLinesLuma =
5372 dml_floor(
5373 mode_lib->vba.LinesInDETLuma
5374 + dml_min(
5375 mode_lib->vba.LinesInDETLuma
5376 * mode_lib->vba.RequiredDISPCLK[i]
5377 * mode_lib->vba.BytePerPixelInDETY[k]
5378 * mode_lib->vba.PSCL_FACTOR[k]
5379 / mode_lib->vba.ReturnBWPerState[i],
5380 mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
5381 mode_lib->vba.SwathHeightYPerState[i][k]);
5382 mode_lib->vba.EffectiveDETLBLinesChroma =
5383 dml_floor(
5384 mode_lib->vba.LinesInDETChroma
5385 + dml_min(
5386 mode_lib->vba.LinesInDETChroma
5387 * mode_lib->vba.RequiredDISPCLK[i]
5388 * mode_lib->vba.BytePerPixelInDETC[k]
5389 * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
5390 / mode_lib->vba.ReturnBWPerState[i],
5391 mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
5392 mode_lib->vba.SwathHeightCPerState[i][k]);
5393 if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
5394 mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
5395 mode_lib->vba.EffectiveDETLBLinesLuma
5396 * (mode_lib->vba.HTotal[k]
5397 / mode_lib->vba.PixelClock[k])
5398 / mode_lib->vba.VRatio[k]
5399 - mode_lib->vba.EffectiveDETLBLinesLuma
5400 * mode_lib->vba.SwathWidthYPerState[i][k]
5401 * dml_ceil(
5402 mode_lib->vba.BytePerPixelInDETY[k],
5403 1.0)
5404 / (mode_lib->vba.ReturnBWPerState[i]
5405 / mode_lib->vba.NoOfDPP[i][k]);
5406 } else {
5407 mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
5408 dml_min(
5409 mode_lib->vba.EffectiveDETLBLinesLuma
5410 * (mode_lib->vba.HTotal[k]
5411 / mode_lib->vba.PixelClock[k])
5412 / mode_lib->vba.VRatio[k]
5413 - mode_lib->vba.EffectiveDETLBLinesLuma
5414 * mode_lib->vba.SwathWidthYPerState[i][k]
5415 * dml_ceil(
5416 mode_lib->vba.BytePerPixelInDETY[k],
5417 1.0)
5418 / (mode_lib->vba.ReturnBWPerState[i]
5419 / mode_lib->vba.NoOfDPP[i][k]),
5420 mode_lib->vba.EffectiveDETLBLinesChroma
5421 * (mode_lib->vba.HTotal[k]
5422 / mode_lib->vba.PixelClock[k])
5423 / (mode_lib->vba.VRatio[k]
5424 / 2.0)
5425 - mode_lib->vba.EffectiveDETLBLinesChroma
5426 * mode_lib->vba.SwathWidthYPerState[i][k]
5427 / 2.0
5428 * dml_ceil(
5429 mode_lib->vba.BytePerPixelInDETC[k],
5430 2.0)
5431 / (mode_lib->vba.ReturnBWPerState[i]
5432 / mode_lib->vba.NoOfDPP[i][k]));
5433 }
5434 }
5435 }
5436 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5437 mode_lib->vba.UrgentLatencySupport[i] = true;
5438 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5439 if (mode_lib->vba.UrgentLatencySupportUsPerState[i][k]
5440 < mode_lib->vba.UrgentLatency / 1.0) {
5441 mode_lib->vba.UrgentLatencySupport[i] = false;
5442 }
5443 }
5444 }
5445 /*Prefetch Check*/
5446
5447 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5448 mode_lib->vba.TotalNumberOfDCCActiveDPP[i] = 0.0;
5449 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5450 if (mode_lib->vba.DCCEnable[k] == true) {
5451 mode_lib->vba.TotalNumberOfDCCActiveDPP[i] =
5452 mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
5453 + mode_lib->vba.NoOfDPP[i][k];
5454 }
5455 }
5456 }
5457 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5458 mode_lib->vba.ProjectedDCFCLKDeepSleep = 8.0;
5459 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5460 mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
5461 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5462 mode_lib->vba.PixelClock[k] / 16.0);
5463 if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
5464 if (mode_lib->vba.VRatio[k] <= 1.0) {
5465 mode_lib->vba.ProjectedDCFCLKDeepSleep =
5466 dml_max(
5467 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5468 1.1
5469 * dml_ceil(
5470 mode_lib->vba.BytePerPixelInDETY[k],
5471 1.0)
5472 / 64.0
5473 * mode_lib->vba.HRatio[k]
5474 * mode_lib->vba.PixelClock[k]
5475 / mode_lib->vba.NoOfDPP[i][k]);
5476 } else {
5477 mode_lib->vba.ProjectedDCFCLKDeepSleep =
5478 dml_max(
5479 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5480 1.1
5481 * dml_ceil(
5482 mode_lib->vba.BytePerPixelInDETY[k],
5483 1.0)
5484 / 64.0
5485 * mode_lib->vba.PSCL_FACTOR[k]
5486 * mode_lib->vba.RequiredDPPCLK[i][k]);
5487 }
5488 } else {
5489 if (mode_lib->vba.VRatio[k] <= 1.0) {
5490 mode_lib->vba.ProjectedDCFCLKDeepSleep =
5491 dml_max(
5492 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5493 1.1
5494 * dml_ceil(
5495 mode_lib->vba.BytePerPixelInDETY[k],
5496 1.0)
5497 / 32.0
5498 * mode_lib->vba.HRatio[k]
5499 * mode_lib->vba.PixelClock[k]
5500 / mode_lib->vba.NoOfDPP[i][k]);
5501 } else {
5502 mode_lib->vba.ProjectedDCFCLKDeepSleep =
5503 dml_max(
5504 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5505 1.1
5506 * dml_ceil(
5507 mode_lib->vba.BytePerPixelInDETY[k],
5508 1.0)
5509 / 32.0
5510 * mode_lib->vba.PSCL_FACTOR[k]
5511 * mode_lib->vba.RequiredDPPCLK[i][k]);
5512 }
5513 if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
5514 mode_lib->vba.ProjectedDCFCLKDeepSleep =
5515 dml_max(
5516 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5517 1.1
5518 * dml_ceil(
5519 mode_lib->vba.BytePerPixelInDETC[k],
5520 2.0)
5521 / 32.0
5522 * mode_lib->vba.HRatio[k]
5523 / 2.0
5524 * mode_lib->vba.PixelClock[k]
5525 / mode_lib->vba.NoOfDPP[i][k]);
5526 } else {
5527 mode_lib->vba.ProjectedDCFCLKDeepSleep =
5528 dml_max(
5529 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5530 1.1
5531 * dml_ceil(
5532 mode_lib->vba.BytePerPixelInDETC[k],
5533 2.0)
5534 / 32.0
5535 * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
5536 * mode_lib->vba.RequiredDPPCLK[i][k]);
5537 }
5538 }
5539 }
5540 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5541 mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
5542 mode_lib,
5543 mode_lib->vba.DCCEnable[k],
5544 mode_lib->vba.Read256BlockHeightY[k],
5545 mode_lib->vba.Read256BlockWidthY[k],
5546 mode_lib->vba.SourcePixelFormat[k],
5547 mode_lib->vba.SurfaceTiling[k],
5548 dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
5549 mode_lib->vba.SourceScan[k],
5550 mode_lib->vba.ViewportWidth[k],
5551 mode_lib->vba.ViewportHeight[k],
5552 mode_lib->vba.SwathWidthYPerState[i][k],
5553 mode_lib->vba.VirtualMemoryEnable,
5554 mode_lib->vba.VMMPageSize,
5555 mode_lib->vba.PTEBufferSizeInRequests,
5556 mode_lib->vba.PDEProcessingBufIn64KBReqs,
5557 mode_lib->vba.PitchY[k],
5558 mode_lib->vba.DCCMetaPitchY[k],
5559 &mode_lib->vba.MacroTileWidthY[k],
5560 &mode_lib->vba.MetaRowBytesY,
5561 &mode_lib->vba.DPTEBytesPerRowY,
5562 &mode_lib->vba.PTEBufferSizeNotExceededY[i][k],
5563 &mode_lib->vba.dpte_row_height[k],
5564 &mode_lib->vba.meta_row_height[k]);
5565 mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
5566 mode_lib,
5567 mode_lib->vba.VRatio[k],
5568 mode_lib->vba.vtaps[k],
5569 mode_lib->vba.Interlace[k],
5570 mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
5571 mode_lib->vba.SwathHeightYPerState[i][k],
5572 mode_lib->vba.ViewportYStartY[k],
5573 &mode_lib->vba.PrefillY[k],
5574 &mode_lib->vba.MaxNumSwY[k]);
5575 if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
5576 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
5577 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
5578 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
5579 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
5580 mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
5581 mode_lib,
5582 mode_lib->vba.DCCEnable[k],
5583 mode_lib->vba.Read256BlockHeightY[k],
5584 mode_lib->vba.Read256BlockWidthY[k],
5585 mode_lib->vba.SourcePixelFormat[k],
5586 mode_lib->vba.SurfaceTiling[k],
5587 dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
5588 mode_lib->vba.SourceScan[k],
5589 mode_lib->vba.ViewportWidth[k] / 2.0,
5590 mode_lib->vba.ViewportHeight[k] / 2.0,
5591 mode_lib->vba.SwathWidthYPerState[i][k] / 2.0,
5592 mode_lib->vba.VirtualMemoryEnable,
5593 mode_lib->vba.VMMPageSize,
5594 mode_lib->vba.PTEBufferSizeInRequests,
5595 mode_lib->vba.PDEProcessingBufIn64KBReqs,
5596 mode_lib->vba.PitchC[k],
5597 0.0,
5598 &mode_lib->vba.MacroTileWidthC[k],
5599 &mode_lib->vba.MetaRowBytesC,
5600 &mode_lib->vba.DPTEBytesPerRowC,
5601 &mode_lib->vba.PTEBufferSizeNotExceededC[i][k],
5602 &mode_lib->vba.dpte_row_height_chroma[k],
5603 &mode_lib->vba.meta_row_height_chroma[k]);
5604 mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
5605 mode_lib,
5606 mode_lib->vba.VRatio[k] / 2.0,
5607 mode_lib->vba.VTAPsChroma[k],
5608 mode_lib->vba.Interlace[k],
5609 mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
5610 mode_lib->vba.SwathHeightCPerState[i][k],
5611 mode_lib->vba.ViewportYStartC[k],
5612 &mode_lib->vba.PrefillC[k],
5613 &mode_lib->vba.MaxNumSwC[k]);
5614 } else {
5615 mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
5616 mode_lib->vba.MetaRowBytesC = 0.0;
5617 mode_lib->vba.DPTEBytesPerRowC = 0.0;
5618 mode_lib->vba.PrefetchLinesC[k] = 0.0;
5619 mode_lib->vba.PTEBufferSizeNotExceededC[i][k] = true;
5620 }
5621 mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] =
5622 mode_lib->vba.PDEAndMetaPTEBytesPerFrameY
5623 + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
5624 mode_lib->vba.MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY
5625 + mode_lib->vba.MetaRowBytesC;
5626 mode_lib->vba.DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY
5627 + mode_lib->vba.DPTEBytesPerRowC;
5628 }
5629 mode_lib->vba.ExtraLatency =
5630 mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
5631 + (mode_lib->vba.TotalNumberOfActiveDPP[i]
5632 * mode_lib->vba.PixelChunkSizeInKByte
5633 + mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
5634 * mode_lib->vba.MetaChunkSize)
5635 * 1024.0
5636 / mode_lib->vba.ReturnBWPerState[i];
5637 if (mode_lib->vba.VirtualMemoryEnable == true) {
5638 mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
5639 + mode_lib->vba.TotalNumberOfActiveDPP[i]
5640 * mode_lib->vba.PTEChunkSize * 1024.0
5641 / mode_lib->vba.ReturnBWPerState[i];
5642 }
5643 mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
5644 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5645 if (mode_lib->vba.BlendingAndTiming[k] == k) {
5646 if (mode_lib->vba.WritebackEnable[k] == true) {
5647 mode_lib->vba.WritebackDelay[i][k] =
5648 mode_lib->vba.WritebackLatency
5649 + CalculateWriteBackDelay(
5650 mode_lib->vba.WritebackPixelFormat[k],
5651 mode_lib->vba.WritebackHRatio[k],
5652 mode_lib->vba.WritebackVRatio[k],
5653 mode_lib->vba.WritebackLumaHTaps[k],
5654 mode_lib->vba.WritebackLumaVTaps[k],
5655 mode_lib->vba.WritebackChromaHTaps[k],
5656 mode_lib->vba.WritebackChromaVTaps[k],
5657 mode_lib->vba.WritebackDestinationWidth[k])
5658 / mode_lib->vba.RequiredDISPCLK[i];
5659 } else {
5660 mode_lib->vba.WritebackDelay[i][k] = 0.0;
5661 }
5662 for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
5663 if (mode_lib->vba.BlendingAndTiming[j] == k
5664 && mode_lib->vba.WritebackEnable[j]
5665 == true) {
5666 mode_lib->vba.WritebackDelay[i][k] =
5667 dml_max(
5668 mode_lib->vba.WritebackDelay[i][k],
5669 mode_lib->vba.WritebackLatency
5670 + CalculateWriteBackDelay(
5671 mode_lib->vba.WritebackPixelFormat[j],
5672 mode_lib->vba.WritebackHRatio[j],
5673 mode_lib->vba.WritebackVRatio[j],
5674 mode_lib->vba.WritebackLumaHTaps[j],
5675 mode_lib->vba.WritebackLumaVTaps[j],
5676 mode_lib->vba.WritebackChromaHTaps[j],
5677 mode_lib->vba.WritebackChromaVTaps[j],
5678 mode_lib->vba.WritebackDestinationWidth[j])
5679 / mode_lib->vba.RequiredDISPCLK[i]);
5680 }
5681 }
5682 }
5683 }
5684 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5685 for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
5686 if (mode_lib->vba.BlendingAndTiming[k] == j) {
5687 mode_lib->vba.WritebackDelay[i][k] =
5688 mode_lib->vba.WritebackDelay[i][j];
5689 }
5690 }
5691 }
5692 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5693 mode_lib->vba.MaximumVStartup[k] =
5694 mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
5695 - dml_max(
5696 1.0,
5697 dml_ceil(
5698 mode_lib->vba.WritebackDelay[i][k]
5699 / (mode_lib->vba.HTotal[k]
5700 / mode_lib->vba.PixelClock[k]),
5701 1.0));
5702 }
5703 mode_lib->vba.TWait = CalculateTWait(
5704 mode_lib->vba.PrefetchMode,
5705 mode_lib->vba.DRAMClockChangeLatency,
5706 mode_lib->vba.UrgentLatency,
5707 mode_lib->vba.SREnterPlusExitTime);
5708 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5709 if (mode_lib->vba.XFCEnabled[k] == true) {
5710 mode_lib->vba.XFCRemoteSurfaceFlipDelay =
5711 CalculateRemoteSurfaceFlipDelay(
5712 mode_lib,
5713 mode_lib->vba.VRatio[k],
5714 mode_lib->vba.SwathWidthYPerState[i][k],
5715 dml_ceil(
5716 mode_lib->vba.BytePerPixelInDETY[k],
5717 1.0),
5718 mode_lib->vba.HTotal[k]
5719 / mode_lib->vba.PixelClock[k],
5720 mode_lib->vba.XFCTSlvVupdateOffset,
5721 mode_lib->vba.XFCTSlvVupdateWidth,
5722 mode_lib->vba.XFCTSlvVreadyOffset,
5723 mode_lib->vba.XFCXBUFLatencyTolerance,
5724 mode_lib->vba.XFCFillBWOverhead,
5725 mode_lib->vba.XFCSlvChunkSize,
5726 mode_lib->vba.XFCBusTransportTime,
5727 mode_lib->vba.TimeCalc,
5728 mode_lib->vba.TWait,
5729 &mode_lib->vba.SrcActiveDrainRate,
5730 &mode_lib->vba.TInitXFill,
5731 &mode_lib->vba.TslvChk);
5732 } else {
5733 mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
5734 }
5735 mode_lib->vba.IsErrorResult[i][k] =
5736 CalculatePrefetchSchedule(
5737 mode_lib,
5738 mode_lib->vba.RequiredDPPCLK[i][k],
5739 mode_lib->vba.RequiredDISPCLK[i],
5740 mode_lib->vba.PixelClock[k],
5741 mode_lib->vba.ProjectedDCFCLKDeepSleep,
5742 mode_lib->vba.DSCDelayPerState[i][k],
5743 mode_lib->vba.NoOfDPP[i][k],
5744 mode_lib->vba.ScalerEnabled[k],
5745 mode_lib->vba.NumberOfCursors[k],
5746 mode_lib->vba.DPPCLKDelaySubtotal,
5747 mode_lib->vba.DPPCLKDelaySCL,
5748 mode_lib->vba.DPPCLKDelaySCLLBOnly,
5749 mode_lib->vba.DPPCLKDelayCNVCFormater,
5750 mode_lib->vba.DPPCLKDelayCNVCCursor,
5751 mode_lib->vba.DISPCLKDelaySubtotal,
5752 mode_lib->vba.SwathWidthYPerState[i][k]
5753 / mode_lib->vba.HRatio[k],
5754 mode_lib->vba.OutputFormat[k],
5755 mode_lib->vba.VTotal[k]
5756 - mode_lib->vba.VActive[k],
5757 mode_lib->vba.HTotal[k],
5758 mode_lib->vba.MaxInterDCNTileRepeaters,
5759 mode_lib->vba.MaximumVStartup[k],
5760 mode_lib->vba.MaxPageTableLevels,
5761 mode_lib->vba.VirtualMemoryEnable,
5762 mode_lib->vba.DynamicMetadataEnable[k],
5763 mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
5764 mode_lib->vba.DynamicMetadataTransmittedBytes[k],
5765 mode_lib->vba.DCCEnable[k],
5766 mode_lib->vba.UrgentLatency,
5767 mode_lib->vba.ExtraLatency,
5768 mode_lib->vba.TimeCalc,
5769 mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
5770 mode_lib->vba.MetaRowBytes[k],
5771 mode_lib->vba.DPTEBytesPerRow[k],
5772 mode_lib->vba.PrefetchLinesY[k],
5773 mode_lib->vba.SwathWidthYPerState[i][k],
5774 mode_lib->vba.BytePerPixelInDETY[k],
5775 mode_lib->vba.PrefillY[k],
5776 mode_lib->vba.MaxNumSwY[k],
5777 mode_lib->vba.PrefetchLinesC[k],
5778 mode_lib->vba.BytePerPixelInDETC[k],
5779 mode_lib->vba.PrefillC[k],
5780 mode_lib->vba.MaxNumSwC[k],
5781 mode_lib->vba.SwathHeightYPerState[i][k],
5782 mode_lib->vba.SwathHeightCPerState[i][k],
5783 mode_lib->vba.TWait,
5784 mode_lib->vba.XFCEnabled[k],
5785 mode_lib->vba.XFCRemoteSurfaceFlipDelay,
5786 mode_lib->vba.Interlace[k],
5787 mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
5788 mode_lib->vba.DSTXAfterScaler,
5789 mode_lib->vba.DSTYAfterScaler,
5790 &mode_lib->vba.LineTimesForPrefetch[k],
5791 &mode_lib->vba.PrefetchBW[k],
5792 &mode_lib->vba.LinesForMetaPTE[k],
5793 &mode_lib->vba.LinesForMetaAndDPTERow[k],
5794 &mode_lib->vba.VRatioPreY[i][k],
5795 &mode_lib->vba.VRatioPreC[i][k],
5796 &mode_lib->vba.RequiredPrefetchPixelDataBW[i][k],
5797 &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
5798 &mode_lib->vba.Tno_bw[k],
5799 &mode_lib->vba.VUpdateOffsetPix[k],
5800 &mode_lib->vba.VUpdateWidthPix[k],
5801 &mode_lib->vba.VReadyOffsetPix[k]);
5802 }
5803 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5804 mode_lib->vba.cursor_bw[k] = mode_lib->vba.NumberOfCursors[k]
5805 * mode_lib->vba.CursorWidth[k][0]
5806 * mode_lib->vba.CursorBPP[k][0] / 8.0
5807 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
5808 * mode_lib->vba.VRatio[k];
5809 }
5810 mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
5811 mode_lib->vba.prefetch_vm_bw_valid = true;
5812 mode_lib->vba.prefetch_row_bw_valid = true;
5813 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5814 if (mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] == 0.0) {
5815 mode_lib->vba.prefetch_vm_bw[k] = 0.0;
5816 } else if (mode_lib->vba.LinesForMetaPTE[k] > 0.0) {
5817 mode_lib->vba.prefetch_vm_bw[k] =
5818 mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
5819 / (mode_lib->vba.LinesForMetaPTE[k]
5820 * mode_lib->vba.HTotal[k]
5821 / mode_lib->vba.PixelClock[k]);
5822 } else {
5823 mode_lib->vba.prefetch_vm_bw[k] = 0.0;
5824 mode_lib->vba.prefetch_vm_bw_valid = false;
5825 }
5826 if (mode_lib->vba.MetaRowBytes[k] + mode_lib->vba.DPTEBytesPerRow[k]
5827 == 0.0) {
5828 mode_lib->vba.prefetch_row_bw[k] = 0.0;
5829 } else if (mode_lib->vba.LinesForMetaAndDPTERow[k] > 0.0) {
5830 mode_lib->vba.prefetch_row_bw[k] = (mode_lib->vba.MetaRowBytes[k]
5831 + mode_lib->vba.DPTEBytesPerRow[k])
5832 / (mode_lib->vba.LinesForMetaAndDPTERow[k]
5833 * mode_lib->vba.HTotal[k]
5834 / mode_lib->vba.PixelClock[k]);
5835 } else {
5836 mode_lib->vba.prefetch_row_bw[k] = 0.0;
5837 mode_lib->vba.prefetch_row_bw_valid = false;
5838 }
5839 mode_lib->vba.MaximumReadBandwidthWithPrefetch =
5840 mode_lib->vba.MaximumReadBandwidthWithPrefetch
5841 + mode_lib->vba.cursor_bw[k]
5842 + dml_max4(
5843 mode_lib->vba.prefetch_vm_bw[k],
5844 mode_lib->vba.prefetch_row_bw[k],
5845 mode_lib->vba.ReadBandwidth[k],
5846 mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]);
5847 }
5848 mode_lib->vba.PrefetchSupported[i] = true;
5849 if (mode_lib->vba.MaximumReadBandwidthWithPrefetch
5850 > mode_lib->vba.ReturnBWPerState[i]
5851 || mode_lib->vba.prefetch_vm_bw_valid == false
5852 || mode_lib->vba.prefetch_row_bw_valid == false) {
5853 mode_lib->vba.PrefetchSupported[i] = false;
5854 }
5855 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5856 if (mode_lib->vba.LineTimesForPrefetch[k] < 2.0
5857 || mode_lib->vba.LinesForMetaPTE[k] >= 8.0
5858 || mode_lib->vba.LinesForMetaAndDPTERow[k] >= 16.0
5859 || mode_lib->vba.IsErrorResult[i][k] == true) {
5860 mode_lib->vba.PrefetchSupported[i] = false;
5861 }
5862 }
5863 mode_lib->vba.VRatioInPrefetchSupported[i] = true;
5864 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5865 if (mode_lib->vba.VRatioPreY[i][k] > 4.0
5866 || mode_lib->vba.VRatioPreC[i][k] > 4.0
5867 || mode_lib->vba.IsErrorResult[i][k] == true) {
5868 mode_lib->vba.VRatioInPrefetchSupported[i] = false;
5869 }
5870 }
5871 if (mode_lib->vba.PrefetchSupported[i] == true
5872 && mode_lib->vba.VRatioInPrefetchSupported[i] == true) {
5873 mode_lib->vba.BandwidthAvailableForImmediateFlip =
5874 mode_lib->vba.ReturnBWPerState[i];
5875 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5876 mode_lib->vba.BandwidthAvailableForImmediateFlip =
5877 mode_lib->vba.BandwidthAvailableForImmediateFlip
5878 - mode_lib->vba.cursor_bw[k]
5879 - dml_max(
5880 mode_lib->vba.ReadBandwidth[k],
5881 mode_lib->vba.PrefetchBW[k]);
5882 }
5883 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5884 mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
5885 if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
5886 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
5887 mode_lib->vba.ImmediateFlipBytes[k] =
5888 mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
5889 + mode_lib->vba.MetaRowBytes[k]
5890 + mode_lib->vba.DPTEBytesPerRow[k];
5891 }
5892 }
5893 mode_lib->vba.TotImmediateFlipBytes = 0.0;
5894 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5895 if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
5896 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
5897 mode_lib->vba.TotImmediateFlipBytes =
5898 mode_lib->vba.TotImmediateFlipBytes
5899 + mode_lib->vba.ImmediateFlipBytes[k];
5900 }
5901 }
5902 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5903 CalculateFlipSchedule(
5904 mode_lib,
5905 mode_lib->vba.ExtraLatency,
5906 mode_lib->vba.UrgentLatency,
5907 mode_lib->vba.MaxPageTableLevels,
5908 mode_lib->vba.VirtualMemoryEnable,
5909 mode_lib->vba.BandwidthAvailableForImmediateFlip,
5910 mode_lib->vba.TotImmediateFlipBytes,
5911 mode_lib->vba.SourcePixelFormat[k],
5912 mode_lib->vba.ImmediateFlipBytes[k],
5913 mode_lib->vba.HTotal[k]
5914 / mode_lib->vba.PixelClock[k],
5915 mode_lib->vba.VRatio[k],
5916 mode_lib->vba.Tno_bw[k],
5917 mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
5918 mode_lib->vba.MetaRowBytes[k],
5919 mode_lib->vba.DPTEBytesPerRow[k],
5920 mode_lib->vba.DCCEnable[k],
5921 mode_lib->vba.dpte_row_height[k],
5922 mode_lib->vba.meta_row_height[k],
5923 mode_lib->vba.qual_row_bw[k],
5924 &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
5925 &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
5926 &mode_lib->vba.final_flip_bw[k],
5927 &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
5928 }
5929 mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
5930 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5931 mode_lib->vba.total_dcn_read_bw_with_flip =
5932 mode_lib->vba.total_dcn_read_bw_with_flip
5933 + mode_lib->vba.cursor_bw[k]
5934 + dml_max3(
5935 mode_lib->vba.prefetch_vm_bw[k],
5936 mode_lib->vba.prefetch_row_bw[k],
5937 mode_lib->vba.final_flip_bw[k]
5938 + dml_max(
5939 mode_lib->vba.ReadBandwidth[k],
5940 mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]));
5941 }
5942 mode_lib->vba.ImmediateFlipSupportedForState[i] = true;
5943 if (mode_lib->vba.total_dcn_read_bw_with_flip
5944 > mode_lib->vba.ReturnBWPerState[i]) {
5945 mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
5946 }
5947 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5948 if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
5949 mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
5950 }
5951 }
5952 } else {
5953 mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
5954 }
5955 }
5956 /*PTE Buffer Size Check*/
5957
5958 for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
5959 mode_lib->vba.PTEBufferSizeNotExceeded[i] = true;
5960 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5961 if (mode_lib->vba.PTEBufferSizeNotExceededY[i][k] == false
5962 || mode_lib->vba.PTEBufferSizeNotExceededC[i][k] == false) {
5963 mode_lib->vba.PTEBufferSizeNotExceeded[i] = false;
5964 }
5965 }
5966 }
5967 /*Cursor Support Check*/
5968
5969 mode_lib->vba.CursorSupport = true;
5970 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5971 if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
5972 if (dml_floor(
5973 dml_floor(
5974 mode_lib->vba.CursorBufferSize
5975 - mode_lib->vba.CursorChunkSize,
5976 mode_lib->vba.CursorChunkSize) * 1024.0
5977 / (mode_lib->vba.CursorWidth[k][0]
5978 * mode_lib->vba.CursorBPP[k][0]
5979 / 8.0),
5980 1.0)
5981 * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
5982 / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatency
5983 || (mode_lib->vba.CursorBPP[k][0] == 64.0
5984 && mode_lib->vba.Cursor64BppSupport == false)) {
5985 mode_lib->vba.CursorSupport = false;
5986 }
5987 }
5988 }
5989 /*Valid Pitch Check*/
5990
5991 mode_lib->vba.PitchSupport = true;
5992 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
5993 mode_lib->vba.AlignedYPitch[k] = dml_ceil(
5994 dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
5995 mode_lib->vba.MacroTileWidthY[k]);
5996 if (mode_lib->vba.AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
5997 mode_lib->vba.PitchSupport = false;
5998 }
5999 if (mode_lib->vba.DCCEnable[k] == true) {
6000 mode_lib->vba.AlignedDCCMetaPitch[k] = dml_ceil(
6001 dml_max(
6002 mode_lib->vba.DCCMetaPitchY[k],
6003 mode_lib->vba.ViewportWidth[k]),
6004 64.0 * mode_lib->vba.Read256BlockWidthY[k]);
6005 } else {
6006 mode_lib->vba.AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
6007 }
6008 if (mode_lib->vba.AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
6009 mode_lib->vba.PitchSupport = false;
6010 }
6011 if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
6012 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
6013 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
6014 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
6015 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
6016 mode_lib->vba.AlignedCPitch[k] = dml_ceil(
6017 dml_max(
6018 mode_lib->vba.PitchC[k],
6019 mode_lib->vba.ViewportWidth[k] / 2.0),
6020 mode_lib->vba.MacroTileWidthC[k]);
6021 } else {
6022 mode_lib->vba.AlignedCPitch[k] = mode_lib->vba.PitchC[k];
6023 }
6024 if (mode_lib->vba.AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
6025 mode_lib->vba.PitchSupport = false;
6026 }
6027 }
6028 /*Mode Support, Voltage State and SOC Configuration*/
6029
6030 for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
6031 if (mode_lib->vba.ScaleRatioAndTapsSupport == true
6032 && mode_lib->vba.SourceFormatPixelAndScanSupport == true
6033 && mode_lib->vba.ViewportSizeSupport[i] == true
6034 && mode_lib->vba.BandwidthSupport[i] == true
6035 && mode_lib->vba.DIOSupport[i] == true
6036 && mode_lib->vba.NotEnoughDSCUnits[i] == false
6037 && mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] == false
6038 && mode_lib->vba.UrgentLatencySupport[i] == true
6039 && mode_lib->vba.ROBSupport[i] == true
6040 && mode_lib->vba.DISPCLK_DPPCLK_Support[i] == true
6041 && mode_lib->vba.TotalAvailablePipesSupport[i] == true
6042 && mode_lib->vba.NumberOfOTGSupport == true
6043 && mode_lib->vba.WritebackModeSupport == true
6044 && mode_lib->vba.WritebackLatencySupport == true
6045 && mode_lib->vba.WritebackScaleRatioAndTapsSupport == true
6046 && mode_lib->vba.CursorSupport == true
6047 && mode_lib->vba.PitchSupport == true
6048 && mode_lib->vba.PrefetchSupported[i] == true
6049 && mode_lib->vba.VRatioInPrefetchSupported[i] == true
6050 && mode_lib->vba.PTEBufferSizeNotExceeded[i] == true
6051 && mode_lib->vba.NonsupportedDSCInputBPC == false) {
6052 mode_lib->vba.ModeSupport[i] = true;
6053 } else {
6054 mode_lib->vba.ModeSupport[i] = false;
6055 }
6056 }
6057 for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
6058 if (i == DC__VOLTAGE_STATES || mode_lib->vba.ModeSupport[i] == true) {
6059 mode_lib->vba.VoltageLevel = i;
6060 }
6061 }
6062 mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
6063 mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
6064 mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
6065 mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
6066 mode_lib->vba.FabricAndDRAMBandwidth =
6067 mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
6068 mode_lib->vba.ImmediateFlipSupport =
6069 mode_lib->vba.ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel];
6070 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
6071 mode_lib->vba.DPPPerPlane[k] = mode_lib->vba.NoOfDPP[mode_lib->vba.VoltageLevel][k];
6072 }
6073 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
6074 if (mode_lib->vba.BlendingAndTiming[k] == k) {
6075 mode_lib->vba.ODMCombineEnabled[k] =
6076 mode_lib->vba.ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
6077 } else {
6078 mode_lib->vba.ODMCombineEnabled[k] = 0;
6079 }
6080 mode_lib->vba.DSCEnabled[k] =
6081 mode_lib->vba.RequiresDSC[mode_lib->vba.VoltageLevel][k];
6082 mode_lib->vba.OutputBpp[k] =
6083 mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k];
6084 }
6085}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
deleted file mode 100644
index 4112409cd974..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ /dev/null
@@ -1,598 +0,0 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DML2_DISPLAY_MODE_VBA_H__
27#define __DML2_DISPLAY_MODE_VBA_H__
28
29#include "dml_common_defs.h"
30
31struct display_mode_lib;
32
33void set_prefetch_mode(struct display_mode_lib *mode_lib,
34 bool cstate_en,
35 bool pstate_en,
36 bool ignore_viewport_pos,
37 bool immediate_flip_support);
38
39#define dml_get_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes)
40
41dml_get_attr_decl(clk_dcf_deepsleep);
42dml_get_attr_decl(wm_urgent);
43dml_get_attr_decl(wm_memory_trip);
44dml_get_attr_decl(wm_writeback_urgent);
45dml_get_attr_decl(wm_stutter_exit);
46dml_get_attr_decl(wm_stutter_enter_exit);
47dml_get_attr_decl(wm_dram_clock_change);
48dml_get_attr_decl(wm_writeback_dram_clock_change);
49dml_get_attr_decl(wm_xfc_underflow);
50dml_get_attr_decl(stutter_efficiency_no_vblank);
51dml_get_attr_decl(stutter_efficiency);
52dml_get_attr_decl(urgent_latency);
53dml_get_attr_decl(urgent_extra_latency);
54dml_get_attr_decl(nonurgent_latency);
55dml_get_attr_decl(dram_clock_change_latency);
56dml_get_attr_decl(dispclk_calculated);
57dml_get_attr_decl(total_data_read_bw);
58dml_get_attr_decl(return_bw);
59dml_get_attr_decl(tcalc);
60
61#define dml_get_pipe_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe)
62
63dml_get_pipe_attr_decl(dsc_delay);
64dml_get_pipe_attr_decl(dppclk_calculated);
65dml_get_pipe_attr_decl(dscclk_calculated);
66dml_get_pipe_attr_decl(min_ttu_vblank);
67dml_get_pipe_attr_decl(vratio_prefetch_l);
68dml_get_pipe_attr_decl(vratio_prefetch_c);
69dml_get_pipe_attr_decl(dst_x_after_scaler);
70dml_get_pipe_attr_decl(dst_y_after_scaler);
71dml_get_pipe_attr_decl(dst_y_per_vm_vblank);
72dml_get_pipe_attr_decl(dst_y_per_row_vblank);
73dml_get_pipe_attr_decl(dst_y_prefetch);
74dml_get_pipe_attr_decl(dst_y_per_vm_flip);
75dml_get_pipe_attr_decl(dst_y_per_row_flip);
76dml_get_pipe_attr_decl(xfc_transfer_delay);
77dml_get_pipe_attr_decl(xfc_precharge_delay);
78dml_get_pipe_attr_decl(xfc_remote_surface_flip_latency);
79dml_get_pipe_attr_decl(xfc_prefetch_margin);
80
81unsigned int get_vstartup_calculated(
82 struct display_mode_lib *mode_lib,
83 const display_e2e_pipe_params_st *pipes,
84 unsigned int num_pipes,
85 unsigned int which_pipe);
86
87double get_total_immediate_flip_bytes(
88 struct display_mode_lib *mode_lib,
89 const display_e2e_pipe_params_st *pipes,
90 unsigned int num_pipes);
91double get_total_immediate_flip_bw(
92 struct display_mode_lib *mode_lib,
93 const display_e2e_pipe_params_st *pipes,
94 unsigned int num_pipes);
95double get_total_prefetch_bw(
96 struct display_mode_lib *mode_lib,
97 const display_e2e_pipe_params_st *pipes,
98 unsigned int num_pipes);
99
100unsigned int dml_get_voltage_level(
101 struct display_mode_lib *mode_lib,
102 const display_e2e_pipe_params_st *pipes,
103 unsigned int num_pipes);
104
105bool Calculate256BBlockSizes(
106 enum source_format_class SourcePixelFormat,
107 enum dm_swizzle_mode SurfaceTiling,
108 unsigned int BytePerPixelY,
109 unsigned int BytePerPixelC,
110 unsigned int *BlockHeight256BytesY,
111 unsigned int *BlockHeight256BytesC,
112 unsigned int *BlockWidth256BytesY,
113 unsigned int *BlockWidth256BytesC);
114
115
116struct vba_vars_st {
117 ip_params_st ip;
118 soc_bounding_box_st soc;
119
120 unsigned int MaximumMaxVStartupLines;
121 double cursor_bw[DC__NUM_DPP__MAX];
122 double meta_row_bw[DC__NUM_DPP__MAX];
123 double dpte_row_bw[DC__NUM_DPP__MAX];
124 double qual_row_bw[DC__NUM_DPP__MAX];
125 double WritebackDISPCLK;
126 double PSCL_THROUGHPUT_LUMA[DC__NUM_DPP__MAX];
127 double PSCL_THROUGHPUT_CHROMA[DC__NUM_DPP__MAX];
128 double DPPCLKUsingSingleDPPLuma;
129 double DPPCLKUsingSingleDPPChroma;
130 double DPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
131 double DISPCLKWithRamping;
132 double DISPCLKWithoutRamping;
133 double GlobalDPPCLK;
134 double DISPCLKWithRampingRoundedToDFSGranularity;
135 double DISPCLKWithoutRampingRoundedToDFSGranularity;
136 double MaxDispclkRoundedToDFSGranularity;
137 bool DCCEnabledAnyPlane;
138 double ReturnBandwidthToDCN;
139 unsigned int SwathWidthY[DC__NUM_DPP__MAX];
140 unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
141 double BytePerPixelDETY[DC__NUM_DPP__MAX];
142 double BytePerPixelDETC[DC__NUM_DPP__MAX];
143 double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
144 double ReadBandwidthPlaneChroma[DC__NUM_DPP__MAX];
145 unsigned int TotalActiveDPP;
146 unsigned int TotalDCCActiveDPP;
147 double UrgentRoundTripAndOutOfOrderLatency;
148 double DisplayPipeLineDeliveryTimeLuma[DC__NUM_DPP__MAX]; // WM
149 double DisplayPipeLineDeliveryTimeChroma[DC__NUM_DPP__MAX]; // WM
150 double LinesInDETY[DC__NUM_DPP__MAX]; // WM
151 double LinesInDETC[DC__NUM_DPP__MAX]; // WM
152 unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
153 unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
154 double FullDETBufferingTimeY[DC__NUM_DPP__MAX]; // WM
155 double FullDETBufferingTimeC[DC__NUM_DPP__MAX]; // WM
156 double MinFullDETBufferingTime;
157 double FrameTimeForMinFullDETBufferingTime;
158 double AverageReadBandwidthGBytePerSecond;
159 double PartOfBurstThatFitsInROB;
160 double StutterBurstTime;
161 //unsigned int NextPrefetchMode;
162 double VBlankTime;
163 double SmallestVBlank;
164 double DCFCLKDeepSleepPerPlane;
165 double EffectiveDETPlusLBLinesLuma;
166 double EffectiveDETPlusLBLinesChroma;
167 double UrgentLatencySupportUsLuma;
168 double UrgentLatencySupportUsChroma;
169 double UrgentLatencySupportUs[DC__NUM_DPP__MAX];
170 unsigned int DSCFormatFactor;
171 unsigned int BlockHeight256BytesY[DC__NUM_DPP__MAX];
172 unsigned int BlockHeight256BytesC[DC__NUM_DPP__MAX];
173 unsigned int BlockWidth256BytesY[DC__NUM_DPP__MAX];
174 unsigned int BlockWidth256BytesC[DC__NUM_DPP__MAX];
175 double VInitPreFillY[DC__NUM_DPP__MAX];
176 double VInitPreFillC[DC__NUM_DPP__MAX];
177 unsigned int MaxNumSwathY[DC__NUM_DPP__MAX];
178 unsigned int MaxNumSwathC[DC__NUM_DPP__MAX];
179 double PrefetchSourceLinesY[DC__NUM_DPP__MAX];
180 double PrefetchSourceLinesC[DC__NUM_DPP__MAX];
181 double PixelPTEBytesPerRow[DC__NUM_DPP__MAX];
182 double MetaRowByte[DC__NUM_DPP__MAX];
183 unsigned int dpte_row_height[DC__NUM_DPP__MAX];
184 unsigned int dpte_row_height_chroma[DC__NUM_DPP__MAX];
185 unsigned int meta_row_height[DC__NUM_DPP__MAX];
186 unsigned int meta_row_height_chroma[DC__NUM_DPP__MAX];
187
188 unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
189 unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
190 unsigned int MaxVStartupLines[DC__NUM_DPP__MAX];
191 double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
192 bool PrefetchModeSupported;
193 bool AllowDRAMClockChangeDuringVBlank[DC__NUM_DPP__MAX];
194 bool AllowDRAMSelfRefreshDuringVBlank[DC__NUM_DPP__MAX];
195 double RequiredPrefetchPixDataBW[DC__NUM_DPP__MAX];
196 double XFCRemoteSurfaceFlipDelay;
197 double TInitXFill;
198 double TslvChk;
199 double SrcActiveDrainRate;
200 double Tno_bw[DC__NUM_DPP__MAX];
201 bool ImmediateFlipSupported;
202
203 double prefetch_vm_bw[DC__NUM_DPP__MAX];
204 double prefetch_row_bw[DC__NUM_DPP__MAX];
205 bool ImmediateFlipSupportedForPipe[DC__NUM_DPP__MAX];
206 unsigned int VStartupLines;
207 double DisplayPipeLineDeliveryTimeLumaPrefetch[DC__NUM_DPP__MAX];
208 double DisplayPipeLineDeliveryTimeChromaPrefetch[DC__NUM_DPP__MAX];
209 unsigned int ActiveDPPs;
210 unsigned int LBLatencyHidingSourceLinesY;
211 unsigned int LBLatencyHidingSourceLinesC;
212 double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
213 double MinActiveDRAMClockChangeMargin;
214 double XFCSlaveVUpdateOffset[DC__NUM_DPP__MAX];
215 double XFCSlaveVupdateWidth[DC__NUM_DPP__MAX];
216 double XFCSlaveVReadyOffset[DC__NUM_DPP__MAX];
217 double InitFillLevel;
218 double FinalFillMargin;
219 double FinalFillLevel;
220 double RemainingFillLevel;
221 double TFinalxFill;
222
223
224 //
225 // SOC Bounding Box Parameters
226 //
227 double SRExitTime;
228 double SREnterPlusExitTime;
229 double UrgentLatency;
230 double WritebackLatency;
231 double PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency;
232 double NumberOfChannels;
233 double DRAMChannelWidth;
234 double FabricDatapathToDCNDataReturn;
235 double ReturnBusWidth;
236 double Downspreading;
237 double DISPCLKDPPCLKDSCCLKDownSpreading;
238 double DISPCLKDPPCLKVCOSpeed;
239 double RoundTripPingLatencyCycles;
240 double UrgentOutOfOrderReturnPerChannel;
241 unsigned int VMMPageSize;
242 double DRAMClockChangeLatency;
243 double XFCBusTransportTime;
244 double XFCXBUFLatencyTolerance;
245
246 //
247 // IP Parameters
248 //
249 unsigned int ROBBufferSizeInKByte;
250 double DETBufferSizeInKByte;
251 unsigned int DPPOutputBufferPixels;
252 unsigned int OPPOutputBufferLines;
253 unsigned int PixelChunkSizeInKByte;
254 double ReturnBW;
255 bool VirtualMemoryEnable;
256 unsigned int MaxPageTableLevels;
257 unsigned int OverridePageTableLevels;
258 unsigned int PTEChunkSize;
259 unsigned int MetaChunkSize;
260 unsigned int WritebackChunkSize;
261 bool ODMCapability;
262 unsigned int NumberOfDSC;
263 unsigned int LineBufferSize;
264 unsigned int MaxLineBufferLines;
265 unsigned int WritebackInterfaceLumaBufferSize;
266 unsigned int WritebackInterfaceChromaBufferSize;
267 unsigned int WritebackChromaLineBufferWidth;
268 double MaxDCHUBToPSCLThroughput;
269 double MaxPSCLToLBThroughput;
270 unsigned int PTEBufferSizeInRequests;
271 double DISPCLKRampingMargin;
272 unsigned int MaxInterDCNTileRepeaters;
273 bool XFCSupported;
274 double XFCSlvChunkSize;
275 double XFCFillBWOverhead;
276 double XFCFillConstant;
277 double XFCTSlvVupdateOffset;
278 double XFCTSlvVupdateWidth;
279 double XFCTSlvVreadyOffset;
280 double DPPCLKDelaySubtotal;
281 double DPPCLKDelaySCL;
282 double DPPCLKDelaySCLLBOnly;
283 double DPPCLKDelayCNVCFormater;
284 double DPPCLKDelayCNVCCursor;
285 double DISPCLKDelaySubtotal;
286 bool ProgressiveToInterlaceUnitInOPP;
287 unsigned int PDEProcessingBufIn64KBReqs;
288
289 // Pipe/Plane Parameters
290 int VoltageLevel;
291 double FabricAndDRAMBandwidth;
292 double FabricClock;
293 double DRAMSpeed;
294 double DISPCLK;
295 double SOCCLK;
296 double DCFCLK;
297
298 unsigned int NumberOfActivePlanes;
299 unsigned int ViewportWidth[DC__NUM_DPP__MAX];
300 unsigned int ViewportHeight[DC__NUM_DPP__MAX];
301 unsigned int ViewportYStartY[DC__NUM_DPP__MAX];
302 unsigned int ViewportYStartC[DC__NUM_DPP__MAX];
303 unsigned int PitchY[DC__NUM_DPP__MAX];
304 unsigned int PitchC[DC__NUM_DPP__MAX];
305 double HRatio[DC__NUM_DPP__MAX];
306 double VRatio[DC__NUM_DPP__MAX];
307 unsigned int htaps[DC__NUM_DPP__MAX];
308 unsigned int vtaps[DC__NUM_DPP__MAX];
309 unsigned int HTAPsChroma[DC__NUM_DPP__MAX];
310 unsigned int VTAPsChroma[DC__NUM_DPP__MAX];
311 unsigned int HTotal[DC__NUM_DPP__MAX];
312 unsigned int VTotal[DC__NUM_DPP__MAX];
313 unsigned int DPPPerPlane[DC__NUM_DPP__MAX];
314 double PixelClock[DC__NUM_DPP__MAX];
315 double PixelClockBackEnd[DC__NUM_DPP__MAX];
316 double DPPCLK[DC__NUM_DPP__MAX];
317 bool DCCEnable[DC__NUM_DPP__MAX];
318 unsigned int DCCMetaPitchY[DC__NUM_DPP__MAX];
319 enum scan_direction_class SourceScan[DC__NUM_DPP__MAX];
320 enum source_format_class SourcePixelFormat[DC__NUM_DPP__MAX];
321 bool WritebackEnable[DC__NUM_DPP__MAX];
322 double WritebackDestinationWidth[DC__NUM_DPP__MAX];
323 double WritebackDestinationHeight[DC__NUM_DPP__MAX];
324 double WritebackSourceHeight[DC__NUM_DPP__MAX];
325 enum source_format_class WritebackPixelFormat[DC__NUM_DPP__MAX];
326 unsigned int WritebackLumaHTaps[DC__NUM_DPP__MAX];
327 unsigned int WritebackLumaVTaps[DC__NUM_DPP__MAX];
328 unsigned int WritebackChromaHTaps[DC__NUM_DPP__MAX];
329 unsigned int WritebackChromaVTaps[DC__NUM_DPP__MAX];
330 double WritebackHRatio[DC__NUM_DPP__MAX];
331 double WritebackVRatio[DC__NUM_DPP__MAX];
332 unsigned int HActive[DC__NUM_DPP__MAX];
333 unsigned int VActive[DC__NUM_DPP__MAX];
334 bool Interlace[DC__NUM_DPP__MAX];
335 enum dm_swizzle_mode SurfaceTiling[DC__NUM_DPP__MAX];
336 unsigned int ScalerRecoutWidth[DC__NUM_DPP__MAX];
337 bool DynamicMetadataEnable[DC__NUM_DPP__MAX];
338 unsigned int DynamicMetadataLinesBeforeActiveRequired[DC__NUM_DPP__MAX];
339 unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
340 double DCCRate[DC__NUM_DPP__MAX];
341 bool ODMCombineEnabled[DC__NUM_DPP__MAX];
342 double OutputBpp[DC__NUM_DPP__MAX];
343 unsigned int NumberOfDSCSlices[DC__NUM_DPP__MAX];
344 bool DSCEnabled[DC__NUM_DPP__MAX];
345 unsigned int DSCDelay[DC__NUM_DPP__MAX];
346 unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
347 enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
348 enum output_encoder_class Output[DC__NUM_DPP__MAX];
349 unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
350 bool SynchronizedVBlank;
351 unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
352 unsigned int CursorWidth[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
353 unsigned int CursorBPP[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
354 bool XFCEnabled[DC__NUM_DPP__MAX];
355 bool ScalerEnabled[DC__NUM_DPP__MAX];
356
357 // Intermediates/Informational
358 bool ImmediateFlipSupport;
359 unsigned int SwathHeightY[DC__NUM_DPP__MAX];
360 unsigned int SwathHeightC[DC__NUM_DPP__MAX];
361 unsigned int DETBufferSizeY[DC__NUM_DPP__MAX];
362 unsigned int DETBufferSizeC[DC__NUM_DPP__MAX];
363 unsigned int LBBitPerPixel[DC__NUM_DPP__MAX];
364 double LastPixelOfLineExtraWatermark;
365 double TotalDataReadBandwidth;
366 unsigned int TotalActiveWriteback;
367 unsigned int EffectiveLBLatencyHidingSourceLinesLuma;
368 unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
369 double BandwidthAvailableForImmediateFlip;
370 unsigned int PrefetchMode;
371 bool IgnoreViewportPositioning;
372 double PrefetchBandwidth[DC__NUM_DPP__MAX];
373 bool ErrorResult[DC__NUM_DPP__MAX];
374 double PDEAndMetaPTEBytesFrame[DC__NUM_DPP__MAX];
375
376 //
377 // Calculated dml_ml->vba.Outputs
378 //
379 double DCFClkDeepSleep;
380 double UrgentWatermark;
381 double UrgentExtraLatency;
382 double MemoryTripWatermark;
383 double WritebackUrgentWatermark;
384 double StutterExitWatermark;
385 double StutterEnterPlusExitWatermark;
386 double DRAMClockChangeWatermark;
387 double WritebackDRAMClockChangeWatermark;
388 double StutterEfficiency;
389 double StutterEfficiencyNotIncludingVBlank;
390 double MinUrgentLatencySupportUs;
391 double NonUrgentLatencyTolerance;
392 double MinActiveDRAMClockChangeLatencySupported;
393 enum clock_change_support DRAMClockChangeSupport;
394
395 // These are the clocks calcuated by the library but they are not actually
396 // used explicitly. They are fetched by tests and then possibly used. The
397 // ultimate values to use are the ones specified by the parameters to DML
398 double DISPCLK_calculated;
399 double DSCCLK_calculated[DC__NUM_DPP__MAX];
400 double DPPCLK_calculated[DC__NUM_DPP__MAX];
401
402 unsigned int VStartup[DC__NUM_DPP__MAX];
403 unsigned int VUpdateOffsetPix[DC__NUM_DPP__MAX];
404 unsigned int VUpdateWidthPix[DC__NUM_DPP__MAX];
405 unsigned int VReadyOffsetPix[DC__NUM_DPP__MAX];
406 unsigned int VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
407
408 double ImmediateFlipBW;
409 unsigned int TotImmediateFlipBytes;
410 double TCalc;
411 double MinTTUVBlank[DC__NUM_DPP__MAX];
412 double VRatioPrefetchY[DC__NUM_DPP__MAX];
413 double VRatioPrefetchC[DC__NUM_DPP__MAX];
414 double DSTXAfterScaler[DC__NUM_DPP__MAX];
415 double DSTYAfterScaler[DC__NUM_DPP__MAX];
416
417 double DestinationLinesToRequestVMInVBlank[DC__NUM_DPP__MAX];
418 double DestinationLinesToRequestRowInVBlank[DC__NUM_DPP__MAX];
419 double DestinationLinesForPrefetch[DC__NUM_DPP__MAX];
420 double DestinationLinesToRequestRowInImmediateFlip[DC__NUM_DPP__MAX];
421 double DestinationLinesToRequestVMInImmediateFlip[DC__NUM_DPP__MAX];
422
423 double XFCTransferDelay[DC__NUM_DPP__MAX];
424 double XFCPrechargeDelay[DC__NUM_DPP__MAX];
425 double XFCRemoteSurfaceFlipLatency[DC__NUM_DPP__MAX];
426 double XFCPrefetchMargin[DC__NUM_DPP__MAX];
427
428 display_e2e_pipe_params_st cache_pipes[DC__NUM_DPP__MAX];
429 unsigned int cache_num_pipes;
430 unsigned int pipe_plane[DC__NUM_DPP__MAX];
431
432 /* vba mode support */
433 /*inputs*/
434 bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
435 double MaxHSCLRatio;
436 double MaxVSCLRatio;
437 unsigned int MaxNumWriteback;
438 bool WritebackLumaAndChromaScalingSupported;
439 bool Cursor64BppSupport;
440 double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
441 double FabricClockPerState[DC__VOLTAGE_STATES + 1];
442 double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
443 double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
444 double MaxDppclk[DC__VOLTAGE_STATES + 1];
445 double MaxDSCCLK[DC__VOLTAGE_STATES + 1];
446 double DRAMSpeedPerState[DC__VOLTAGE_STATES + 1];
447 double MaxDispclk[DC__VOLTAGE_STATES + 1];
448
449 /*outputs*/
450 bool ScaleRatioAndTapsSupport;
451 bool SourceFormatPixelAndScanSupport;
452 unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
453 double BytePerPixelInDETY[DC__NUM_DPP__MAX];
454 double BytePerPixelInDETC[DC__NUM_DPP__MAX];
455 double TotalReadBandwidthConsumedGBytePerSecond;
456 double ReadBandwidth[DC__NUM_DPP__MAX];
457 double TotalWriteBandwidthConsumedGBytePerSecond;
458 double WriteBandwidth[DC__NUM_DPP__MAX];
459 double TotalBandwidthConsumedGBytePerSecond;
460 bool DCCEnabledInAnyPlane;
461 bool WritebackLatencySupport;
462 bool WritebackModeSupport;
463 bool Writeback10bpc420Supported;
464 bool BandwidthSupport[DC__VOLTAGE_STATES + 1];
465 unsigned int TotalNumberOfActiveWriteback;
466 double CriticalPoint;
467 double ReturnBWToDCNPerState;
468 double FabricAndDRAMBandwidthPerState[DC__VOLTAGE_STATES + 1];
469 double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
470 double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
471 bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
472 bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
473 bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
474 bool PrefetchSupported[DC__VOLTAGE_STATES + 1];
475 bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1];
476 bool DISPCLK_DPPCLK_Support[DC__VOLTAGE_STATES + 1];
477 bool TotalAvailablePipesSupport[DC__VOLTAGE_STATES + 1];
478 bool UrgentLatencySupport[DC__VOLTAGE_STATES + 1];
479 bool ModeSupport[DC__VOLTAGE_STATES + 1];
480 bool DIOSupport[DC__VOLTAGE_STATES + 1];
481 bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
482 bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
483 bool ROBSupport[DC__VOLTAGE_STATES + 1];
484 bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1];
485 bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
486 bool IsErrorResult[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
487 bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
488 bool prefetch_vm_bw_valid;
489 bool prefetch_row_bw_valid;
490 bool NumberOfOTGSupport;
491 bool NonsupportedDSCInputBPC;
492 bool WritebackScaleRatioAndTapsSupport;
493 bool CursorSupport;
494 bool PitchSupport;
495
496 double WritebackLineBufferLumaBufferSize;
497 double WritebackLineBufferChromaBufferSize;
498 double WritebackMinHSCLRatio;
499 double WritebackMinVSCLRatio;
500 double WritebackMaxHSCLRatio;
501 double WritebackMaxVSCLRatio;
502 double WritebackMaxHSCLTaps;
503 double WritebackMaxVSCLTaps;
504 unsigned int MaxNumDPP;
505 unsigned int MaxNumOTG;
506 double CursorBufferSize;
507 double CursorChunkSize;
508 unsigned int Mode;
509 unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
510 double OutputLinkDPLanes[DC__NUM_DPP__MAX];
511 double SwathWidthYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
512 double SwathHeightYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
513 double SwathHeightCPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
514 double UrgentLatencySupportUsPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
515 double VRatioPreY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
516 double VRatioPreC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
517 double RequiredPrefetchPixelDataBW[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
518 double RequiredDPPCLK[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
519 double RequiredDISPCLK[DC__VOLTAGE_STATES + 1];
520 double TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1];
521 double TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1];
522 double PrefetchBW[DC__NUM_DPP__MAX];
523 double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
524 double MetaRowBytes[DC__NUM_DPP__MAX];
525 double DPTEBytesPerRow[DC__NUM_DPP__MAX];
526 double PrefetchLinesY[DC__NUM_DPP__MAX];
527 double PrefetchLinesC[DC__NUM_DPP__MAX];
528 unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
529 unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
530 double PrefillY[DC__NUM_DPP__MAX];
531 double PrefillC[DC__NUM_DPP__MAX];
532 double LineTimesForPrefetch[DC__NUM_DPP__MAX];
533 double LinesForMetaPTE[DC__NUM_DPP__MAX];
534 double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
535 double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
536 double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
537 unsigned int OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
538 double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
539 unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
540 unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
541 unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
542 unsigned int Read256BlockWidthC[DC__NUM_DPP__MAX];
543 unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
544 double MaxSwathHeightY[DC__NUM_DPP__MAX];
545 double MaxSwathHeightC[DC__NUM_DPP__MAX];
546 double MinSwathHeightY[DC__NUM_DPP__MAX];
547 double MinSwathHeightC[DC__NUM_DPP__MAX];
548 double PSCL_FACTOR[DC__NUM_DPP__MAX];
549 double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
550 double MaximumVStartup[DC__NUM_DPP__MAX];
551 double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
552 double AlignedYPitch[DC__NUM_DPP__MAX];
553 double AlignedCPitch[DC__NUM_DPP__MAX];
554 double MaximumSwathWidth[DC__NUM_DPP__MAX];
555 double final_flip_bw[DC__NUM_DPP__MAX];
556 double ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1];
557
558 double WritebackLumaVExtra;
559 double WritebackChromaVExtra;
560 double WritebackRequiredDISPCLK;
561 double MaximumSwathWidthSupport;
562 double MaximumSwathWidthInDETBuffer;
563 double MaximumSwathWidthInLineBuffer;
564 double MaxDispclkRoundedDownToDFSGranularity;
565 double MaxDppclkRoundedDownToDFSGranularity;
566 double PlaneRequiredDISPCLKWithoutODMCombine;
567 double PlaneRequiredDISPCLK;
568 double TotalNumberOfActiveOTG;
569 double FECOverhead;
570 double EffectiveFECOverhead;
571 unsigned int Outbpp;
572 unsigned int OutbppDSC;
573 double TotalDSCUnitsRequired;
574 double bpp;
575 unsigned int slices;
576 double SwathWidthGranularityY;
577 double RoundedUpMaxSwathSizeBytesY;
578 double SwathWidthGranularityC;
579 double RoundedUpMaxSwathSizeBytesC;
580 double LinesInDETLuma;
581 double LinesInDETChroma;
582 double EffectiveDETLBLinesLuma;
583 double EffectiveDETLBLinesChroma;
584 double ProjectedDCFCLKDeepSleep;
585 double PDEAndMetaPTEBytesPerFrameY;
586 double PDEAndMetaPTEBytesPerFrameC;
587 unsigned int MetaRowBytesY;
588 unsigned int MetaRowBytesC;
589 unsigned int DPTEBytesPerRowC;
590 unsigned int DPTEBytesPerRowY;
591 double ExtraLatency;
592 double TimeCalc;
593 double TWait;
594 double MaximumReadBandwidthWithPrefetch;
595 double total_dcn_read_bw_with_flip;
596};
597
598#endif /* _DML2_DISPLAY_MODE_VBA_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
deleted file mode 100644
index 325dd2b757d6..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
+++ /dev/null
@@ -1,1772 +0,0 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "display_mode_lib.h"
27#include "display_mode_vba.h"
28#include "display_rq_dlg_calc.h"
29
30/*
31 * NOTE:
32 * This file is gcc-parseable HW gospel, coming straight from HW engineers.
33 *
34 * It doesn't adhere to Linux kernel style and sometimes will do things in odd
35 * ways. Unless there is something clearly wrong with it the code should
36 * remain as-is as it provides us with a guarantee from HW that it is correct.
37 */
38
39static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
40 double *refcyc_per_req_delivery_pre_cur,
41 double *refcyc_per_req_delivery_cur,
42 double refclk_freq_in_mhz,
43 double ref_freq_to_pix_freq,
44 double hscale_pixel_rate_l,
45 double hscl_ratio,
46 double vratio_pre_l,
47 double vratio_l,
48 unsigned int cur_width,
49 enum cursor_bpp cur_bpp);
50
51#include "dml_inline_defs.h"
52
53static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
54{
55 unsigned int ret_val = 0;
56
57 if (source_format == dm_444_16) {
58 if (!is_chroma)
59 ret_val = 2;
60 } else if (source_format == dm_444_32) {
61 if (!is_chroma)
62 ret_val = 4;
63 } else if (source_format == dm_444_64) {
64 if (!is_chroma)
65 ret_val = 8;
66 } else if (source_format == dm_420_8) {
67 if (is_chroma)
68 ret_val = 2;
69 else
70 ret_val = 1;
71 } else if (source_format == dm_420_10) {
72 if (is_chroma)
73 ret_val = 4;
74 else
75 ret_val = 2;
76 } else if (source_format == dm_444_8) {
77 ret_val = 1;
78 }
79 return ret_val;
80}
81
82static bool is_dual_plane(enum source_format_class source_format)
83{
84 bool ret_val = 0;
85
86 if ((source_format == dm_420_8) || (source_format == dm_420_10))
87 ret_val = 1;
88
89 return ret_val;
90}
91
92static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
93 double refclk_freq_in_mhz,
94 double pclk_freq_in_mhz,
95 bool odm_combine,
96 unsigned int recout_width,
97 unsigned int hactive,
98 double vratio,
99 double hscale_pixel_rate,
100 unsigned int delivery_width,
101 unsigned int req_per_swath_ub)
102{
103 double refcyc_per_delivery = 0.0;
104
105 if (vratio <= 1.0) {
106 if (odm_combine)
107 refcyc_per_delivery = (double) refclk_freq_in_mhz
108 * dml_min((double) recout_width, (double) hactive / 2.0)
109 / pclk_freq_in_mhz / (double) req_per_swath_ub;
110 else
111 refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
112 / pclk_freq_in_mhz / (double) req_per_swath_ub;
113 } else {
114 refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
115 / (double) hscale_pixel_rate / (double) req_per_swath_ub;
116 }
117
118 dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
119 dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
120 dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
121 dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
122 dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
123 dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
124
125 return refcyc_per_delivery;
126
127}
128
129static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
130{
131 if (tile_size == dm_256k_tile)
132 return (256 * 1024);
133 else if (tile_size == dm_64k_tile)
134 return (64 * 1024);
135 else
136 return (4 * 1024);
137}
138
139static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
140 display_data_rq_regs_st *rq_regs,
141 const display_data_rq_sizing_params_st rq_sizing)
142{
143 dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
144 print__data_rq_sizing_params_st(mode_lib, rq_sizing);
145
146 rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
147
148 if (rq_sizing.min_chunk_bytes == 0)
149 rq_regs->min_chunk_size = 0;
150 else
151 rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
152
153 rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
154 if (rq_sizing.min_meta_chunk_bytes == 0)
155 rq_regs->min_meta_chunk_size = 0;
156 else
157 rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
158
159 rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
160 rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
161}
162
163static void extract_rq_regs(struct display_mode_lib *mode_lib,
164 display_rq_regs_st *rq_regs,
165 const display_rq_params_st rq_param)
166{
167 unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
168 unsigned int detile_buf_plane1_addr = 0;
169
170 extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
171
172 rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
173 1) - 3;
174
175 if (rq_param.yuv420) {
176 extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
177 rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
178 1) - 3;
179 }
180
181 rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
182 rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
183
184 // FIXME: take the max between luma, chroma chunk size?
185 // okay for now, as we are setting chunk_bytes to 8kb anyways
186 if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
187 rq_regs->drq_expansion_mode = 0;
188 } else {
189 rq_regs->drq_expansion_mode = 2;
190 }
191 rq_regs->prq_expansion_mode = 1;
192 rq_regs->mrq_expansion_mode = 1;
193 rq_regs->crq_expansion_mode = 1;
194
195 if (rq_param.yuv420) {
196 if ((double) rq_param.misc.rq_l.stored_swath_bytes
197 / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
198 detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
199 } else {
200 detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
201 256,
202 0) / 64.0; // 2/3 to chroma
203 }
204 }
205 rq_regs->plane1_base_address = detile_buf_plane1_addr;
206}
207
208static void handle_det_buf_split(struct display_mode_lib *mode_lib,
209 display_rq_params_st *rq_param,
210 const display_pipe_source_params_st pipe_src_param)
211{
212 unsigned int total_swath_bytes = 0;
213 unsigned int swath_bytes_l = 0;
214 unsigned int swath_bytes_c = 0;
215 unsigned int full_swath_bytes_packed_l = 0;
216 unsigned int full_swath_bytes_packed_c = 0;
217 bool req128_l = 0;
218 bool req128_c = 0;
219 bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
220 bool surf_vert = (pipe_src_param.source_scan == dm_vert);
221 unsigned int log2_swath_height_l = 0;
222 unsigned int log2_swath_height_c = 0;
223 unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
224
225 full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
226 full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
227
228 if (rq_param->yuv420_10bpc) {
229 full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
230 256,
231 1) + 256;
232 full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
233 256,
234 1) + 256;
235 }
236
237 if (rq_param->yuv420) {
238 total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
239
240 if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
241 req128_l = 0;
242 req128_c = 0;
243 swath_bytes_l = full_swath_bytes_packed_l;
244 swath_bytes_c = full_swath_bytes_packed_c;
245 } else { //128b request (for luma only for yuv420 8bpc)
246 req128_l = 1;
247 req128_c = 0;
248 swath_bytes_l = full_swath_bytes_packed_l / 2;
249 swath_bytes_c = full_swath_bytes_packed_c;
250 }
251 // Note: assumption, the config that pass in will fit into
252 // the detiled buffer.
253 } else {
254 total_swath_bytes = 2 * full_swath_bytes_packed_l;
255
256 if (total_swath_bytes <= detile_buf_size_in_bytes)
257 req128_l = 0;
258 else
259 req128_l = 1;
260
261 swath_bytes_l = total_swath_bytes;
262 swath_bytes_c = 0;
263 }
264 rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
265 rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
266
267 if (surf_linear) {
268 log2_swath_height_l = 0;
269 log2_swath_height_c = 0;
270 } else if (!surf_vert) {
271 log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
272 log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
273 } else {
274 log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
275 log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
276 }
277 rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
278 rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
279
280 dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
281 dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
282 dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
283 __func__,
284 full_swath_bytes_packed_l);
285 dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
286 __func__,
287 full_swath_bytes_packed_c);
288}
289
290static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
291 display_data_rq_dlg_params_st *rq_dlg_param,
292 display_data_rq_misc_params_st *rq_misc_param,
293 display_data_rq_sizing_params_st *rq_sizing_param,
294 unsigned int vp_width,
295 unsigned int vp_height,
296 unsigned int data_pitch,
297 unsigned int meta_pitch,
298 unsigned int source_format,
299 unsigned int tiling,
300 unsigned int macro_tile_size,
301 unsigned int source_scan,
302 unsigned int is_chroma)
303{
304 bool surf_linear = (tiling == dm_sw_linear);
305 bool surf_vert = (source_scan == dm_vert);
306
307 unsigned int bytes_per_element;
308 unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
309 false);
310 unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
311 true);
312
313 unsigned int blk256_width = 0;
314 unsigned int blk256_height = 0;
315
316 unsigned int blk256_width_y = 0;
317 unsigned int blk256_height_y = 0;
318 unsigned int blk256_width_c = 0;
319 unsigned int blk256_height_c = 0;
320 unsigned int log2_bytes_per_element;
321 unsigned int log2_blk256_width;
322 unsigned int log2_blk256_height;
323 unsigned int blk_bytes;
324 unsigned int log2_blk_bytes;
325 unsigned int log2_blk_height;
326 unsigned int log2_blk_width;
327 unsigned int log2_meta_req_bytes;
328 unsigned int log2_meta_req_height;
329 unsigned int log2_meta_req_width;
330 unsigned int meta_req_width;
331 unsigned int meta_req_height;
332 unsigned int log2_meta_row_height;
333 unsigned int meta_row_width_ub;
334 unsigned int log2_meta_chunk_bytes;
335 unsigned int log2_meta_chunk_height;
336
337 //full sized meta chunk width in unit of data elements
338 unsigned int log2_meta_chunk_width;
339 unsigned int log2_min_meta_chunk_bytes;
340 unsigned int min_meta_chunk_width;
341 unsigned int meta_chunk_width;
342 unsigned int meta_chunk_per_row_int;
343 unsigned int meta_row_remainder;
344 unsigned int meta_chunk_threshold;
345 unsigned int meta_blk_bytes;
346 unsigned int meta_blk_height;
347 unsigned int meta_blk_width;
348 unsigned int meta_surface_bytes;
349 unsigned int vmpg_bytes;
350 unsigned int meta_pte_req_per_frame_ub;
351 unsigned int meta_pte_bytes_per_frame_ub;
352 const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
353 const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
354 const unsigned int pde_proc_buffer_size_64k_reqs =
355 mode_lib->ip.pde_proc_buffer_size_64k_reqs;
356
357 unsigned int log2_vmpg_height = 0;
358 unsigned int log2_vmpg_width = 0;
359 unsigned int log2_dpte_req_height_ptes = 0;
360 unsigned int log2_dpte_req_height = 0;
361 unsigned int log2_dpte_req_width = 0;
362 unsigned int log2_dpte_row_height_linear = 0;
363 unsigned int log2_dpte_row_height = 0;
364 unsigned int log2_dpte_group_width = 0;
365 unsigned int dpte_row_width_ub = 0;
366 unsigned int dpte_req_height = 0;
367 unsigned int dpte_req_width = 0;
368 unsigned int dpte_group_width = 0;
369 unsigned int log2_dpte_group_bytes = 0;
370 unsigned int log2_dpte_group_length = 0;
371 unsigned int pde_buf_entries;
372 bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
373
374 Calculate256BBlockSizes((enum source_format_class)(source_format),
375 (enum dm_swizzle_mode)(tiling),
376 bytes_per_element_y,
377 bytes_per_element_c,
378 &blk256_height_y,
379 &blk256_height_c,
380 &blk256_width_y,
381 &blk256_width_c);
382
383 if (!is_chroma) {
384 blk256_width = blk256_width_y;
385 blk256_height = blk256_height_y;
386 bytes_per_element = bytes_per_element_y;
387 } else {
388 blk256_width = blk256_width_c;
389 blk256_height = blk256_height_c;
390 bytes_per_element = bytes_per_element_c;
391 }
392
393 log2_bytes_per_element = dml_log2(bytes_per_element);
394
395 dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
396 dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
397 dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
398 dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
399
400 log2_blk256_width = dml_log2((double) blk256_width);
401 log2_blk256_height = dml_log2((double) blk256_height);
402 blk_bytes = surf_linear ?
403 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
404 log2_blk_bytes = dml_log2((double) blk_bytes);
405 log2_blk_height = 0;
406 log2_blk_width = 0;
407
408 // remember log rule
409 // "+" in log is multiply
410 // "-" in log is divide
411 // "/2" is like square root
412 // blk is vertical biased
413 if (tiling != dm_sw_linear)
414 log2_blk_height = log2_blk256_height
415 + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
416 else
417 log2_blk_height = 0; // blk height of 1
418
419 log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
420
421 if (!surf_vert) {
422 rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
423 + blk256_width;
424 rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
425 } else {
426 rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
427 + blk256_height;
428 rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
429 }
430
431 if (!surf_vert)
432 rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
433 * bytes_per_element;
434 else
435 rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
436 * bytes_per_element;
437
438 rq_misc_param->blk256_height = blk256_height;
439 rq_misc_param->blk256_width = blk256_width;
440
441 // -------
442 // meta
443 // -------
444 log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
445
446 // each 64b meta request for dcn is 8x8 meta elements and
447 // a meta element covers one 256b block of the the data surface.
448 log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
449 log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
450 - log2_meta_req_height;
451 meta_req_width = 1 << log2_meta_req_width;
452 meta_req_height = 1 << log2_meta_req_height;
453 log2_meta_row_height = 0;
454 meta_row_width_ub = 0;
455
456 // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
457 // calculate upper bound of the meta_row_width
458 if (!surf_vert) {
459 log2_meta_row_height = log2_meta_req_height;
460 meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
461 + meta_req_width;
462 rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
463 } else {
464 log2_meta_row_height = log2_meta_req_width;
465 meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
466 + meta_req_height;
467 rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
468 }
469 rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
470
471 rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
472
473 log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
474 log2_meta_chunk_height = log2_meta_row_height;
475
476 //full sized meta chunk width in unit of data elements
477 log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
478 - log2_meta_chunk_height;
479 log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
480 min_meta_chunk_width = 1
481 << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
482 - log2_meta_chunk_height);
483 meta_chunk_width = 1 << log2_meta_chunk_width;
484 meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
485 meta_row_remainder = meta_row_width_ub % meta_chunk_width;
486 meta_chunk_threshold = 0;
487 meta_blk_bytes = 4096;
488 meta_blk_height = blk256_height * 64;
489 meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
490 meta_surface_bytes = meta_pitch
491 * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
492 * bytes_per_element / 256;
493 vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
494 meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
495 8 * vmpg_bytes,
496 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
497 meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
498 rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
499
500 dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
501 dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
502 dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
503 dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
504 __func__,
505 meta_pte_req_per_frame_ub);
506 dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
507 __func__,
508 meta_pte_bytes_per_frame_ub);
509
510 if (!surf_vert)
511 meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
512 else
513 meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
514
515 if (meta_row_remainder <= meta_chunk_threshold)
516 rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
517 else
518 rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
519
520 // ------
521 // dpte
522 // ------
523 if (surf_linear) {
524 log2_vmpg_height = 0; // one line high
525 } else {
526 log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
527 }
528 log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
529
530 // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
531 if (surf_linear) { //one 64B PTE request returns 8 PTEs
532 log2_dpte_req_height_ptes = 0;
533 log2_dpte_req_width = log2_vmpg_width + 3;
534 log2_dpte_req_height = 0;
535 } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
536 //one 64B req gives 8x1 PTEs for 4KB tile
537 log2_dpte_req_height_ptes = 0;
538 log2_dpte_req_width = log2_blk_width + 3;
539 log2_dpte_req_height = log2_blk_height + 0;
540 } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
541 //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
542 log2_dpte_req_height_ptes = 4;
543 log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
544 log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
545 } else { //64KB page size and must 64KB tile block
546 //one 64B req gives 8x1 PTEs for 64KB tile
547 log2_dpte_req_height_ptes = 0;
548 log2_dpte_req_width = log2_blk_width + 3;
549 log2_dpte_req_height = log2_blk_height + 0;
550 }
551
552 // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
553 // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
554 // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
555 //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
556 //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
557 dpte_req_height = 1 << log2_dpte_req_height;
558 dpte_req_width = 1 << log2_dpte_req_width;
559
560 // calculate pitch dpte row buffer can hold
561 // round the result down to a power of two.
562 pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
563 if (surf_linear) {
564 unsigned int dpte_row_height;
565
566 log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
567 / bytes_per_element,
568 dpte_buf_in_pte_reqs
569 * dpte_req_width)
570 / data_pitch),
571 1);
572
573 ASSERT(log2_dpte_row_height_linear >= 3);
574
575 if (log2_dpte_row_height_linear > 7)
576 log2_dpte_row_height_linear = 7;
577
578 log2_dpte_row_height = log2_dpte_row_height_linear;
579 // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
580 // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
581 dpte_row_height = 1 << log2_dpte_row_height;
582 dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
583 dpte_req_width,
584 1) + dpte_req_width;
585 rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
586 } else {
587 // the upper bound of the dpte_row_width without dependency on viewport position follows.
588 // for tiled mode, row height is the same as req height and row store up to vp size upper bound
589 if (!surf_vert) {
590 log2_dpte_row_height = log2_dpte_req_height;
591 dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
592 + dpte_req_width;
593 rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
594 } else {
595 log2_dpte_row_height =
596 (log2_blk_width < log2_dpte_req_width) ?
597 log2_blk_width : log2_dpte_req_width;
598 dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
599 + dpte_req_height;
600 rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
601 }
602 }
603 if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
604 rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
605 else
606 rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
607
608 rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
609
610 // the dpte_group_bytes is reduced for the specific case of vertical
611 // access of a tile surface that has dpte request of 8x1 ptes.
612 if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
613 rq_sizing_param->dpte_group_bytes = 512;
614 else
615 //full size
616 rq_sizing_param->dpte_group_bytes = 2048;
617
618 //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
619 log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
620 log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
621
622 // full sized data pte group width in elements
623 if (!surf_vert)
624 log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
625 else
626 log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
627
628 //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
629 if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
630 log2_dpte_group_width = log2_dpte_group_width - 1;
631
632 dpte_group_width = 1 << log2_dpte_group_width;
633
634 // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
635 // the upper bound for the dpte groups per row is as follows.
636 rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
637 1);
638}
639
640static void get_surf_rq_param(struct display_mode_lib *mode_lib,
641 display_data_rq_sizing_params_st *rq_sizing_param,
642 display_data_rq_dlg_params_st *rq_dlg_param,
643 display_data_rq_misc_params_st *rq_misc_param,
644 const display_pipe_source_params_st pipe_src_param,
645 bool is_chroma)
646{
647 bool mode_422 = 0;
648 unsigned int vp_width = 0;
649 unsigned int vp_height = 0;
650 unsigned int data_pitch = 0;
651 unsigned int meta_pitch = 0;
652 unsigned int ppe = mode_422 ? 2 : 1;
653
654 // FIXME check if ppe apply for both luma and chroma in 422 case
655 if (is_chroma) {
656 vp_width = pipe_src_param.viewport_width_c / ppe;
657 vp_height = pipe_src_param.viewport_height_c;
658 data_pitch = pipe_src_param.data_pitch_c;
659 meta_pitch = pipe_src_param.meta_pitch_c;
660 } else {
661 vp_width = pipe_src_param.viewport_width / ppe;
662 vp_height = pipe_src_param.viewport_height;
663 data_pitch = pipe_src_param.data_pitch;
664 meta_pitch = pipe_src_param.meta_pitch;
665 }
666
667 rq_sizing_param->chunk_bytes = 8192;
668
669 if (rq_sizing_param->chunk_bytes == 64 * 1024)
670 rq_sizing_param->min_chunk_bytes = 0;
671 else
672 rq_sizing_param->min_chunk_bytes = 1024;
673
674 rq_sizing_param->meta_chunk_bytes = 2048;
675 rq_sizing_param->min_meta_chunk_bytes = 256;
676
677 rq_sizing_param->mpte_group_bytes = 2048;
678
679 get_meta_and_pte_attr(mode_lib,
680 rq_dlg_param,
681 rq_misc_param,
682 rq_sizing_param,
683 vp_width,
684 vp_height,
685 data_pitch,
686 meta_pitch,
687 pipe_src_param.source_format,
688 pipe_src_param.sw_mode,
689 pipe_src_param.macro_tile_size,
690 pipe_src_param.source_scan,
691 is_chroma);
692}
693
694void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
695 display_rq_params_st *rq_param,
696 const display_pipe_source_params_st pipe_src_param)
697{
698 // get param for luma surface
699 rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
700 || pipe_src_param.source_format == dm_420_10;
701 rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
702
703 get_surf_rq_param(mode_lib,
704 &(rq_param->sizing.rq_l),
705 &(rq_param->dlg.rq_l),
706 &(rq_param->misc.rq_l),
707 pipe_src_param,
708 0);
709
710 if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
711 // get param for chroma surface
712 get_surf_rq_param(mode_lib,
713 &(rq_param->sizing.rq_c),
714 &(rq_param->dlg.rq_c),
715 &(rq_param->misc.rq_c),
716 pipe_src_param,
717 1);
718 }
719
720 // calculate how to split the det buffer space between luma and chroma
721 handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
722 print__rq_params_st(mode_lib, *rq_param);
723}
724
725void dml_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
726 display_rq_regs_st *rq_regs,
727 const display_pipe_source_params_st pipe_src_param)
728{
729 display_rq_params_st rq_param = {0};
730
731 memset(rq_regs, 0, sizeof(*rq_regs));
732 dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_src_param);
733 extract_rq_regs(mode_lib, rq_regs, rq_param);
734
735 print__rq_regs_st(mode_lib, *rq_regs);
736}
737
738// Note: currently taken in as is.
739// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
740void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
741 const display_e2e_pipe_params_st *e2e_pipe_param,
742 const unsigned int num_pipes,
743 const unsigned int pipe_idx,
744 display_dlg_regs_st *disp_dlg_regs,
745 display_ttu_regs_st *disp_ttu_regs,
746 const display_rq_dlg_params_st rq_dlg_param,
747 const display_dlg_sys_params_st dlg_sys_param,
748 const bool cstate_en,
749 const bool pstate_en,
750 const bool vm_en,
751 const bool ignore_viewport_pos,
752 const bool immediate_flip_support)
753{
754 const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
755 const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
756 const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
757 const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
758 const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
759 const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
760
761 // -------------------------
762 // Section 1.15.2.1: OTG dependent Params
763 // -------------------------
764 // Timing
765 unsigned int htotal = dst->htotal;
766// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
767 unsigned int hblank_end = dst->hblank_end;
768 unsigned int vblank_start = dst->vblank_start;
769 unsigned int vblank_end = dst->vblank_end;
770 unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
771
772 double dppclk_freq_in_mhz = clks->dppclk_mhz;
773 double dispclk_freq_in_mhz = clks->dispclk_mhz;
774 double refclk_freq_in_mhz = clks->refclk_mhz;
775 double pclk_freq_in_mhz = dst->pixel_rate_mhz;
776 bool interlaced = dst->interlaced;
777
778 double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
779
780 double min_dcfclk_mhz;
781 double t_calc_us;
782 double min_ttu_vblank;
783
784 double min_dst_y_ttu_vblank;
785 unsigned int dlg_vblank_start;
786 bool dual_plane;
787 bool mode_422;
788 unsigned int access_dir;
789 unsigned int vp_height_l;
790 unsigned int vp_width_l;
791 unsigned int vp_height_c;
792 unsigned int vp_width_c;
793
794 // Scaling
795 unsigned int htaps_l;
796 unsigned int htaps_c;
797 double hratio_l;
798 double hratio_c;
799 double vratio_l;
800 double vratio_c;
801 bool scl_enable;
802
803 double line_time_in_us;
804 // double vinit_l;
805 // double vinit_c;
806 // double vinit_bot_l;
807 // double vinit_bot_c;
808
809 // unsigned int swath_height_l;
810 unsigned int swath_width_ub_l;
811 // unsigned int dpte_bytes_per_row_ub_l;
812 unsigned int dpte_groups_per_row_ub_l;
813 // unsigned int meta_pte_bytes_per_frame_ub_l;
814 // unsigned int meta_bytes_per_row_ub_l;
815
816 // unsigned int swath_height_c;
817 unsigned int swath_width_ub_c;
818 // unsigned int dpte_bytes_per_row_ub_c;
819 unsigned int dpte_groups_per_row_ub_c;
820
821 unsigned int meta_chunks_per_row_ub_l;
822 unsigned int meta_chunks_per_row_ub_c;
823 unsigned int vupdate_offset;
824 unsigned int vupdate_width;
825 unsigned int vready_offset;
826
827 unsigned int dppclk_delay_subtotal;
828 unsigned int dispclk_delay_subtotal;
829 unsigned int pixel_rate_delay_subtotal;
830
831 unsigned int vstartup_start;
832 unsigned int dst_x_after_scaler;
833 unsigned int dst_y_after_scaler;
834 double line_wait;
835 double dst_y_prefetch;
836 double dst_y_per_vm_vblank;
837 double dst_y_per_row_vblank;
838 double dst_y_per_vm_flip;
839 double dst_y_per_row_flip;
840 double min_dst_y_per_vm_vblank;
841 double min_dst_y_per_row_vblank;
842 double lsw;
843 double vratio_pre_l;
844 double vratio_pre_c;
845 unsigned int req_per_swath_ub_l;
846 unsigned int req_per_swath_ub_c;
847 unsigned int meta_row_height_l;
848 unsigned int meta_row_height_c;
849 unsigned int swath_width_pixels_ub_l;
850 unsigned int swath_width_pixels_ub_c;
851 unsigned int scaler_rec_in_width_l;
852 unsigned int scaler_rec_in_width_c;
853 unsigned int dpte_row_height_l;
854 unsigned int dpte_row_height_c;
855 double hscale_pixel_rate_l;
856 double hscale_pixel_rate_c;
857 double min_hratio_fact_l;
858 double min_hratio_fact_c;
859 double refcyc_per_line_delivery_pre_l;
860 double refcyc_per_line_delivery_pre_c;
861 double refcyc_per_line_delivery_l;
862 double refcyc_per_line_delivery_c;
863
864 double refcyc_per_req_delivery_pre_l;
865 double refcyc_per_req_delivery_pre_c;
866 double refcyc_per_req_delivery_l;
867 double refcyc_per_req_delivery_c;
868
869 unsigned int full_recout_width;
870 double xfc_transfer_delay;
871 double xfc_precharge_delay;
872 double xfc_remote_surface_flip_latency;
873 double xfc_dst_y_delta_drq_limit;
874 double xfc_prefetch_margin;
875 double refcyc_per_req_delivery_pre_cur0;
876 double refcyc_per_req_delivery_cur0;
877 double refcyc_per_req_delivery_pre_cur1;
878 double refcyc_per_req_delivery_cur1;
879
880 memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
881 memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
882
883 dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
884 dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
885 dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
886 dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
887 dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
888
889 dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
890 dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
891 dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
892 dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
893 dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
894 ASSERT(ref_freq_to_pix_freq < 4.0);
895
896 disp_dlg_regs->ref_freq_to_pix_freq =
897 (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
898 disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
899 * dml_pow(2, 8));
900 disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
901 disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
902 * (double) ref_freq_to_pix_freq);
903 ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
904
905 min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
906 set_prefetch_mode(mode_lib, cstate_en, pstate_en, ignore_viewport_pos, immediate_flip_support);
907 t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
908 min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
909
910 min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
911 dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
912
913 disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
914 + min_dst_y_ttu_vblank) * dml_pow(2, 2));
915 ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
916
917 dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
918 __func__,
919 min_dcfclk_mhz);
920 dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
921 __func__,
922 min_ttu_vblank);
923 dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
924 __func__,
925 min_dst_y_ttu_vblank);
926 dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
927 __func__,
928 t_calc_us);
929 dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
930 __func__,
931 disp_dlg_regs->min_dst_y_next_start);
932 dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
933 __func__,
934 ref_freq_to_pix_freq);
935
936 // -------------------------
937 // Section 1.15.2.2: Prefetch, Active and TTU
938 // -------------------------
939 // Prefetch Calc
940 // Source
941// dcc_en = src.dcc;
942 dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
943 mode_422 = 0; // FIXME
944 access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
945// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
946// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
947 vp_height_l = src->viewport_height;
948 vp_width_l = src->viewport_width;
949 vp_height_c = src->viewport_height_c;
950 vp_width_c = src->viewport_width_c;
951
952 // Scaling
953 htaps_l = taps->htaps;
954 htaps_c = taps->htaps_c;
955 hratio_l = scl->hscl_ratio;
956 hratio_c = scl->hscl_ratio_c;
957 vratio_l = scl->vscl_ratio;
958 vratio_c = scl->vscl_ratio_c;
959 scl_enable = scl->scl_enable;
960
961 line_time_in_us = (htotal / pclk_freq_in_mhz);
962// vinit_l = scl.vinit;
963// vinit_c = scl.vinit_c;
964// vinit_bot_l = scl.vinit_bot;
965// vinit_bot_c = scl.vinit_bot_c;
966
967// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
968 swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
969// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
970 dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
971// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
972// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
973
974// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
975 swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
976 // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
977 dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
978
979 meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
980 meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
981 vupdate_offset = dst->vupdate_offset;
982 vupdate_width = dst->vupdate_width;
983 vready_offset = dst->vready_offset;
984
985 dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
986 dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
987
988 if (scl_enable)
989 dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
990 else
991 dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
992
993 dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
994 + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
995
996 if (dout->dsc_enable) {
997 double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
998
999 dispclk_delay_subtotal += dsc_delay;
1000 }
1001
1002 pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
1003 + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
1004
1005 vstartup_start = dst->vstartup_start;
1006 if (interlaced) {
1007 if (vstartup_start / 2.0
1008 - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
1009 <= vblank_end / 2.0)
1010 disp_dlg_regs->vready_after_vcount0 = 1;
1011 else
1012 disp_dlg_regs->vready_after_vcount0 = 0;
1013 } else {
1014 if (vstartup_start
1015 - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
1016 <= vblank_end)
1017 disp_dlg_regs->vready_after_vcount0 = 1;
1018 else
1019 disp_dlg_regs->vready_after_vcount0 = 0;
1020 }
1021
1022 // TODO: Where is this coming from?
1023 if (interlaced)
1024 vstartup_start = vstartup_start / 2;
1025
1026 // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
1027 if (vstartup_start >= min_vblank) {
1028 dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
1029 __func__,
1030 vblank_start,
1031 vblank_end);
1032 dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
1033 __func__,
1034 vstartup_start,
1035 min_vblank);
1036 min_vblank = vstartup_start + 1;
1037 dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
1038 __func__,
1039 vstartup_start,
1040 min_vblank);
1041 }
1042
1043 dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1044 dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1045
1046 dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
1047 dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
1048 __func__,
1049 pixel_rate_delay_subtotal);
1050 dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
1051 __func__,
1052 dst_x_after_scaler);
1053 dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
1054 __func__,
1055 dst_y_after_scaler);
1056
1057 // Lwait
1058 line_wait = mode_lib->soc.urgent_latency_us;
1059 if (cstate_en)
1060 line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
1061 if (pstate_en)
1062 line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
1063 + mode_lib->soc.urgent_latency_us,
1064 line_wait);
1065 line_wait = line_wait / line_time_in_us;
1066
1067 dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1068 dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
1069
1070 dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
1071 e2e_pipe_param,
1072 num_pipes,
1073 pipe_idx);
1074 dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
1075 e2e_pipe_param,
1076 num_pipes,
1077 pipe_idx);
1078 dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1079 dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1080
1081 min_dst_y_per_vm_vblank = 8.0;
1082 min_dst_y_per_row_vblank = 16.0;
1083
1084 // magic!
1085 if (htotal <= 75) {
1086 min_vblank = 300;
1087 min_dst_y_per_vm_vblank = 100.0;
1088 min_dst_y_per_row_vblank = 100.0;
1089 }
1090
1091 dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
1092 dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
1093
1094 ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
1095 ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
1096
1097 ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
1098 lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
1099
1100 dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
1101
1102 vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1103 vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1104
1105 dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
1106 dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
1107
1108 // Active
1109 req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
1110 req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
1111 meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
1112 meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
1113 swath_width_pixels_ub_l = 0;
1114 swath_width_pixels_ub_c = 0;
1115 scaler_rec_in_width_l = 0;
1116 scaler_rec_in_width_c = 0;
1117 dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
1118 dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
1119
1120 if (mode_422) {
1121 swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
1122 swath_width_pixels_ub_c = swath_width_ub_c * 2;
1123 } else {
1124 swath_width_pixels_ub_l = swath_width_ub_l * 1;
1125 swath_width_pixels_ub_c = swath_width_ub_c * 1;
1126 }
1127
1128 hscale_pixel_rate_l = 0.;
1129 hscale_pixel_rate_c = 0.;
1130 min_hratio_fact_l = 1.0;
1131 min_hratio_fact_c = 1.0;
1132
1133 if (htaps_l <= 1)
1134 min_hratio_fact_l = 2.0;
1135 else if (htaps_l <= 6) {
1136 if ((hratio_l * 2.0) > 4.0)
1137 min_hratio_fact_l = 4.0;
1138 else
1139 min_hratio_fact_l = hratio_l * 2.0;
1140 } else {
1141 if (hratio_l > 4.0)
1142 min_hratio_fact_l = 4.0;
1143 else
1144 min_hratio_fact_l = hratio_l;
1145 }
1146
1147 hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
1148
1149 if (htaps_c <= 1)
1150 min_hratio_fact_c = 2.0;
1151 else if (htaps_c <= 6) {
1152 if ((hratio_c * 2.0) > 4.0)
1153 min_hratio_fact_c = 4.0;
1154 else
1155 min_hratio_fact_c = hratio_c * 2.0;
1156 } else {
1157 if (hratio_c > 4.0)
1158 min_hratio_fact_c = 4.0;
1159 else
1160 min_hratio_fact_c = hratio_c;
1161 }
1162
1163 hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
1164
1165 refcyc_per_line_delivery_pre_l = 0.;
1166 refcyc_per_line_delivery_pre_c = 0.;
1167 refcyc_per_line_delivery_l = 0.;
1168 refcyc_per_line_delivery_c = 0.;
1169
1170 refcyc_per_req_delivery_pre_l = 0.;
1171 refcyc_per_req_delivery_pre_c = 0.;
1172 refcyc_per_req_delivery_l = 0.;
1173 refcyc_per_req_delivery_c = 0.;
1174
1175 full_recout_width = 0;
1176 // In ODM
1177 if (src->is_hsplit) {
1178 // This "hack" is only allowed (and valid) for MPC combine. In ODM
1179 // combine, you MUST specify the full_recout_width...according to Oswin
1180 if (dst->full_recout_width == 0 && !dst->odm_combine) {
1181 dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
1182 __func__);
1183 full_recout_width = dst->recout_width * 2; // assume half split for dcn1
1184 } else
1185 full_recout_width = dst->full_recout_width;
1186 } else
1187 full_recout_width = dst->recout_width;
1188
1189 // mpc_combine and odm_combine are mutually exclusive
1190 refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
1191 refclk_freq_in_mhz,
1192 pclk_freq_in_mhz,
1193 dst->odm_combine,
1194 full_recout_width,
1195 dst->hactive,
1196 vratio_pre_l,
1197 hscale_pixel_rate_l,
1198 swath_width_pixels_ub_l,
1199 1); // per line
1200
1201 refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
1202 refclk_freq_in_mhz,
1203 pclk_freq_in_mhz,
1204 dst->odm_combine,
1205 full_recout_width,
1206 dst->hactive,
1207 vratio_l,
1208 hscale_pixel_rate_l,
1209 swath_width_pixels_ub_l,
1210 1); // per line
1211
1212 dml_print("DML_DLG: %s: full_recout_width = %d\n",
1213 __func__,
1214 full_recout_width);
1215 dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
1216 __func__,
1217 hscale_pixel_rate_l);
1218 dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
1219 __func__,
1220 refcyc_per_line_delivery_pre_l);
1221 dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
1222 __func__,
1223 refcyc_per_line_delivery_l);
1224
1225 if (dual_plane) {
1226 refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
1227 refclk_freq_in_mhz,
1228 pclk_freq_in_mhz,
1229 dst->odm_combine,
1230 full_recout_width,
1231 dst->hactive,
1232 vratio_pre_c,
1233 hscale_pixel_rate_c,
1234 swath_width_pixels_ub_c,
1235 1); // per line
1236
1237 refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
1238 refclk_freq_in_mhz,
1239 pclk_freq_in_mhz,
1240 dst->odm_combine,
1241 full_recout_width,
1242 dst->hactive,
1243 vratio_c,
1244 hscale_pixel_rate_c,
1245 swath_width_pixels_ub_c,
1246 1); // per line
1247
1248 dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
1249 __func__,
1250 refcyc_per_line_delivery_pre_c);
1251 dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
1252 __func__,
1253 refcyc_per_line_delivery_c);
1254 }
1255
1256 // TTU - Luma / Chroma
1257 if (access_dir) { // vertical access
1258 scaler_rec_in_width_l = vp_height_l;
1259 scaler_rec_in_width_c = vp_height_c;
1260 } else {
1261 scaler_rec_in_width_l = vp_width_l;
1262 scaler_rec_in_width_c = vp_width_c;
1263 }
1264
1265 refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
1266 refclk_freq_in_mhz,
1267 pclk_freq_in_mhz,
1268 dst->odm_combine,
1269 full_recout_width,
1270 dst->hactive,
1271 vratio_pre_l,
1272 hscale_pixel_rate_l,
1273 scaler_rec_in_width_l,
1274 req_per_swath_ub_l); // per req
1275 refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
1276 refclk_freq_in_mhz,
1277 pclk_freq_in_mhz,
1278 dst->odm_combine,
1279 full_recout_width,
1280 dst->hactive,
1281 vratio_l,
1282 hscale_pixel_rate_l,
1283 scaler_rec_in_width_l,
1284 req_per_swath_ub_l); // per req
1285
1286 dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
1287 __func__,
1288 refcyc_per_req_delivery_pre_l);
1289 dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
1290 __func__,
1291 refcyc_per_req_delivery_l);
1292
1293 ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
1294 ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
1295
1296 if (dual_plane) {
1297 refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
1298 refclk_freq_in_mhz,
1299 pclk_freq_in_mhz,
1300 dst->odm_combine,
1301 full_recout_width,
1302 dst->hactive,
1303 vratio_pre_c,
1304 hscale_pixel_rate_c,
1305 scaler_rec_in_width_c,
1306 req_per_swath_ub_c); // per req
1307 refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
1308 refclk_freq_in_mhz,
1309 pclk_freq_in_mhz,
1310 dst->odm_combine,
1311 full_recout_width,
1312 dst->hactive,
1313 vratio_c,
1314 hscale_pixel_rate_c,
1315 scaler_rec_in_width_c,
1316 req_per_swath_ub_c); // per req
1317
1318 dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
1319 __func__,
1320 refcyc_per_req_delivery_pre_c);
1321 dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
1322 __func__,
1323 refcyc_per_req_delivery_c);
1324
1325 ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
1326 ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
1327 }
1328
1329 // XFC
1330 xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
1331 xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
1332 e2e_pipe_param,
1333 num_pipes,
1334 pipe_idx);
1335 xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
1336 e2e_pipe_param,
1337 num_pipes,
1338 pipe_idx);
1339 xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
1340 xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
1341 e2e_pipe_param,
1342 num_pipes,
1343 pipe_idx);
1344
1345 // TTU - Cursor
1346 refcyc_per_req_delivery_pre_cur0 = 0.0;
1347 refcyc_per_req_delivery_cur0 = 0.0;
1348 if (src->num_cursors > 0) {
1349 calculate_ttu_cursor(mode_lib,
1350 &refcyc_per_req_delivery_pre_cur0,
1351 &refcyc_per_req_delivery_cur0,
1352 refclk_freq_in_mhz,
1353 ref_freq_to_pix_freq,
1354 hscale_pixel_rate_l,
1355 scl->hscl_ratio,
1356 vratio_pre_l,
1357 vratio_l,
1358 src->cur0_src_width,
1359 (enum cursor_bpp)(src->cur0_bpp));
1360 }
1361
1362 refcyc_per_req_delivery_pre_cur1 = 0.0;
1363 refcyc_per_req_delivery_cur1 = 0.0;
1364 if (src->num_cursors > 1) {
1365 calculate_ttu_cursor(mode_lib,
1366 &refcyc_per_req_delivery_pre_cur1,
1367 &refcyc_per_req_delivery_cur1,
1368 refclk_freq_in_mhz,
1369 ref_freq_to_pix_freq,
1370 hscale_pixel_rate_l,
1371 scl->hscl_ratio,
1372 vratio_pre_l,
1373 vratio_l,
1374 src->cur1_src_width,
1375 (enum cursor_bpp)(src->cur1_bpp));
1376 }
1377
1378 // TTU - Misc
1379 // all hard-coded
1380
1381 // Assignment to register structures
1382 disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
1383 disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
1384 ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
1385 disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
1386 disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
1387 disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
1388 disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
1389 disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
1390
1391 disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
1392 disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
1393
1394 disp_dlg_regs->refcyc_per_pte_group_vblank_l =
1395 (unsigned int) (dst_y_per_row_vblank * (double) htotal
1396 * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
1397 ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
1398
1399 if (dual_plane) {
1400 disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
1401 * (double) htotal * ref_freq_to_pix_freq
1402 / (double) dpte_groups_per_row_ub_c);
1403 ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
1404 < (unsigned int) dml_pow(2, 13));
1405 }
1406
1407 disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
1408 (unsigned int) (dst_y_per_row_vblank * (double) htotal
1409 * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
1410 ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
1411
1412 disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
1413 disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
1414
1415 disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
1416 * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
1417 disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
1418 * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
1419
1420 if (dual_plane) {
1421 disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
1422 * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
1423 disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
1424 * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
1425 }
1426
1427 disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
1428 / (double) vratio_l * dml_pow(2, 2));
1429 ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
1430
1431 if (dual_plane) {
1432 disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
1433 / (double) vratio_c * dml_pow(2, 2));
1434 if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
1435 dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
1436 __func__,
1437 disp_dlg_regs->dst_y_per_pte_row_nom_c,
1438 (unsigned int) dml_pow(2, 17) - 1);
1439 }
1440 }
1441
1442 disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
1443 / (double) vratio_l * dml_pow(2, 2));
1444 ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
1445
1446 disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
1447
1448 disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
1449 / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
1450 / (double) dpte_groups_per_row_ub_l);
1451 if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
1452 disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
1453 disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
1454 / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
1455 / (double) meta_chunks_per_row_ub_l);
1456 if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
1457 disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
1458
1459 if (dual_plane) {
1460 disp_dlg_regs->refcyc_per_pte_group_nom_c =
1461 (unsigned int) ((double) dpte_row_height_c / (double) vratio_c
1462 * (double) htotal * ref_freq_to_pix_freq
1463 / (double) dpte_groups_per_row_ub_c);
1464 if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
1465 disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
1466
1467 // TODO: Is this the right calculation? Does htotal need to be halved?
1468 disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
1469 (unsigned int) ((double) meta_row_height_c / (double) vratio_c
1470 * (double) htotal * ref_freq_to_pix_freq
1471 / (double) meta_chunks_per_row_ub_c);
1472 if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
1473 disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
1474 }
1475
1476 disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
1477 1);
1478 disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
1479 1);
1480 ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
1481 ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
1482
1483 disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
1484 1);
1485 disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
1486 1);
1487 ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
1488 ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
1489
1490 disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
1491 disp_dlg_regs->dst_y_offset_cur0 = 0;
1492 disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
1493 disp_dlg_regs->dst_y_offset_cur1 = 0;
1494
1495 disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
1496 disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
1497 disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
1498 disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
1499 1);
1500
1501 // slave has to have this value also set to off
1502 if (src->xfc_enable && !src->xfc_slave)
1503 disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
1504 else
1505 disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
1506
1507 disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
1508 * dml_pow(2, 10));
1509 disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
1510 * dml_pow(2, 10));
1511 disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
1512 * dml_pow(2, 10));
1513 disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
1514 * dml_pow(2, 10));
1515 disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
1516 (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
1517 disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
1518 * dml_pow(2, 10));
1519 disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
1520 (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
1521 disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
1522 * dml_pow(2, 10));
1523 disp_ttu_regs->qos_level_low_wm = 0;
1524 ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
1525 disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
1526 * ref_freq_to_pix_freq);
1527 ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
1528
1529 disp_ttu_regs->qos_level_flip = 14;
1530 disp_ttu_regs->qos_level_fixed_l = 8;
1531 disp_ttu_regs->qos_level_fixed_c = 8;
1532 disp_ttu_regs->qos_level_fixed_cur0 = 8;
1533 disp_ttu_regs->qos_ramp_disable_l = 0;
1534 disp_ttu_regs->qos_ramp_disable_c = 0;
1535 disp_ttu_regs->qos_ramp_disable_cur0 = 0;
1536
1537 disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
1538 ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
1539
1540 print__ttu_regs_st(mode_lib, *disp_ttu_regs);
1541 print__dlg_regs_st(mode_lib, *disp_dlg_regs);
1542}
1543
1544void dml_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
1545 display_dlg_regs_st *dlg_regs,
1546 display_ttu_regs_st *ttu_regs,
1547 display_e2e_pipe_params_st *e2e_pipe_param,
1548 const unsigned int num_pipes,
1549 const unsigned int pipe_idx,
1550 const bool cstate_en,
1551 const bool pstate_en,
1552 const bool vm_en,
1553 const bool ignore_viewport_pos,
1554 const bool immediate_flip_support)
1555{
1556 display_rq_params_st rq_param = {0};
1557 display_dlg_sys_params_st dlg_sys_param = {0};
1558
1559 // Get watermark and Tex.
1560 dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
1561 dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
1562 e2e_pipe_param,
1563 num_pipes);
1564 dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
1565 dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
1566 dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
1567 dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
1568 dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
1569 e2e_pipe_param,
1570 num_pipes);
1571 dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
1572 e2e_pipe_param,
1573 num_pipes);
1574 dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
1575 / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
1576
1577 print__dlg_sys_params_st(mode_lib, dlg_sys_param);
1578
1579 // system parameter calculation done
1580
1581 dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
1582 dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
1583 dml_rq_dlg_get_dlg_params(mode_lib,
1584 e2e_pipe_param,
1585 num_pipes,
1586 pipe_idx,
1587 dlg_regs,
1588 ttu_regs,
1589 rq_param.dlg,
1590 dlg_sys_param,
1591 cstate_en,
1592 pstate_en,
1593 vm_en,
1594 ignore_viewport_pos,
1595 immediate_flip_support);
1596 dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
1597}
1598
1599void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
1600{
1601 memset(arb_param, 0, sizeof(*arb_param));
1602 arb_param->max_req_outstanding = 256;
1603 arb_param->min_req_outstanding = 68;
1604 arb_param->sat_level_us = 60;
1605}
1606
1607void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
1608 double *refcyc_per_req_delivery_pre_cur,
1609 double *refcyc_per_req_delivery_cur,
1610 double refclk_freq_in_mhz,
1611 double ref_freq_to_pix_freq,
1612 double hscale_pixel_rate_l,
1613 double hscl_ratio,
1614 double vratio_pre_l,
1615 double vratio_l,
1616 unsigned int cur_width,
1617 enum cursor_bpp cur_bpp)
1618{
1619 unsigned int cur_src_width = cur_width;
1620 unsigned int cur_req_size = 0;
1621 unsigned int cur_req_width = 0;
1622 double cur_width_ub = 0.0;
1623 double cur_req_per_width = 0.0;
1624 double hactive_cur = 0.0;
1625
1626 ASSERT(cur_src_width <= 256);
1627
1628 *refcyc_per_req_delivery_pre_cur = 0.0;
1629 *refcyc_per_req_delivery_cur = 0.0;
1630 if (cur_src_width > 0) {
1631 unsigned int cur_bit_per_pixel = 0;
1632
1633 if (cur_bpp == dm_cur_2bit) {
1634 cur_req_size = 64; // byte
1635 cur_bit_per_pixel = 2;
1636 } else { // 32bit
1637 cur_bit_per_pixel = 32;
1638 if (cur_src_width >= 1 && cur_src_width <= 16)
1639 cur_req_size = 64;
1640 else if (cur_src_width >= 17 && cur_src_width <= 31)
1641 cur_req_size = 128;
1642 else
1643 cur_req_size = 256;
1644 }
1645
1646 cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
1647 cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
1648 * (double) cur_req_width;
1649 cur_req_per_width = cur_width_ub / (double) cur_req_width;
1650 hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
1651
1652 if (vratio_pre_l <= 1.0) {
1653 *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
1654 / (double) cur_req_per_width;
1655 } else {
1656 *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
1657 * (double) cur_src_width / hscale_pixel_rate_l
1658 / (double) cur_req_per_width;
1659 }
1660
1661 ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
1662
1663 if (vratio_l <= 1.0) {
1664 *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
1665 / (double) cur_req_per_width;
1666 } else {
1667 *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
1668 * (double) cur_src_width / hscale_pixel_rate_l
1669 / (double) cur_req_per_width;
1670 }
1671
1672 dml_print("DML_DLG: %s: cur_req_width = %d\n",
1673 __func__,
1674 cur_req_width);
1675 dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
1676 __func__,
1677 cur_width_ub);
1678 dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
1679 __func__,
1680 cur_req_per_width);
1681 dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
1682 __func__,
1683 hactive_cur);
1684 dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
1685 __func__,
1686 *refcyc_per_req_delivery_pre_cur);
1687 dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
1688 __func__,
1689 *refcyc_per_req_delivery_cur);
1690
1691 ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
1692 }
1693}
1694
1695unsigned int dml_rq_dlg_get_calculated_vstartup(struct display_mode_lib *mode_lib,
1696 display_e2e_pipe_params_st *e2e_pipe_param,
1697 const unsigned int num_pipes,
1698 const unsigned int pipe_idx)
1699{
1700 unsigned int vstartup_pipe[DC__NUM_PIPES__MAX];
1701 bool visited[DC__NUM_PIPES__MAX];
1702 unsigned int pipe_inst = 0;
1703 unsigned int i, j, k;
1704
1705 for (k = 0; k < num_pipes; ++k)
1706 visited[k] = false;
1707
1708 for (i = 0; i < num_pipes; i++) {
1709 if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
1710 unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
1711
1712 for (j = i; j < num_pipes; j++) {
1713 if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
1714 && e2e_pipe_param[j].pipe.src.is_hsplit
1715 && !visited[j]) {
1716 vstartup_pipe[j] = get_vstartup_calculated(mode_lib,
1717 e2e_pipe_param,
1718 num_pipes,
1719 pipe_inst);
1720 visited[j] = true;
1721 }
1722 }
1723
1724 pipe_inst++;
1725 }
1726
1727 if (!visited[i]) {
1728 vstartup_pipe[i] = get_vstartup_calculated(mode_lib,
1729 e2e_pipe_param,
1730 num_pipes,
1731 pipe_inst);
1732 visited[i] = true;
1733 pipe_inst++;
1734 }
1735 }
1736
1737 return vstartup_pipe[pipe_idx];
1738
1739}
1740
1741void dml_rq_dlg_get_row_heights(struct display_mode_lib *mode_lib,
1742 unsigned int *o_dpte_row_height,
1743 unsigned int *o_meta_row_height,
1744 unsigned int vp_width,
1745 unsigned int data_pitch,
1746 int source_format,
1747 int tiling,
1748 int macro_tile_size,
1749 int source_scan,
1750 int is_chroma)
1751{
1752 display_data_rq_dlg_params_st rq_dlg_param;
1753 display_data_rq_misc_params_st rq_misc_param;
1754 display_data_rq_sizing_params_st rq_sizing_param;
1755
1756 get_meta_and_pte_attr(mode_lib,
1757 &rq_dlg_param,
1758 &rq_misc_param,
1759 &rq_sizing_param,
1760 vp_width,
1761 0, // dummy
1762 data_pitch,
1763 0, // dummy
1764 source_format,
1765 tiling,
1766 macro_tile_size,
1767 source_scan,
1768 is_chroma);
1769
1770 *o_dpte_row_height = rq_dlg_param.dpte_row_height;
1771 *o_meta_row_height = rq_dlg_param.meta_row_height;
1772}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
deleted file mode 100644
index efdd4c73d8f3..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DML2_DISPLAY_RQ_DLG_CALC_H__
27#define __DML2_DISPLAY_RQ_DLG_CALC_H__
28
29#include "dml_common_defs.h"
30#include "display_rq_dlg_helpers.h"
31
32struct display_mode_lib;
33
34// Function: dml_rq_dlg_get_rq_params
35// Calculate requestor related parameters that register definition agnostic
36// (i.e. this layer does try to separate real values from register definition)
37// Input:
38// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
39// Output:
40// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
41//
42void dml_rq_dlg_get_rq_params(
43 struct display_mode_lib *mode_lib,
44 display_rq_params_st *rq_param,
45 const display_pipe_source_params_st pipe_src_param);
46
47// Function: dml_rq_dlg_get_rq_reg
48// Main entry point for test to get the register values out of this DML class.
49// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
50// and then populate the rq_regs struct
51// Input:
52// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
53// Output:
54// rq_regs - struct that holds all the RQ registers field value.
55// See also: <display_rq_regs_st>
56void dml_rq_dlg_get_rq_reg(
57 struct display_mode_lib *mode_lib,
58 display_rq_regs_st *rq_regs,
59 const display_pipe_source_params_st pipe_src_param);
60
61// Function: dml_rq_dlg_get_dlg_params
62// Calculate deadline related parameters
63//
64void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
65 const display_e2e_pipe_params_st *e2e_pipe_param,
66 const unsigned int num_pipes,
67 const unsigned int pipe_idx,
68 display_dlg_regs_st *disp_dlg_regs,
69 display_ttu_regs_st *disp_ttu_regs,
70 const display_rq_dlg_params_st rq_dlg_param,
71 const display_dlg_sys_params_st dlg_sys_param,
72 const bool cstate_en,
73 const bool pstate_en,
74 const bool vm_en,
75 const bool ignore_viewport_pos,
76 const bool immediate_flip_support);
77
78// Function: dml_rq_dlg_get_dlg_param_prefetch
79// For flip_bw programming guide change, now dml needs to calculate the flip_bytes and prefetch_bw
80// for ALL pipes and use this info to calculate the prefetch programming.
81// Output: prefetch_param.prefetch_bw and flip_bytes
82void dml_rq_dlg_get_dlg_params_prefetch(
83 struct display_mode_lib *mode_lib,
84 display_dlg_prefetch_param_st *prefetch_param,
85 display_rq_dlg_params_st rq_dlg_param,
86 display_dlg_sys_params_st dlg_sys_param,
87 display_e2e_pipe_params_st e2e_pipe_param,
88 const bool cstate_en,
89 const bool pstate_en,
90 const bool vm_en);
91
92// Function: dml_rq_dlg_get_dlg_reg
93// Calculate and return DLG and TTU register struct given the system setting
94// Output:
95// dlg_regs - output DLG register struct
96// ttu_regs - output DLG TTU register struct
97// Input:
98// e2e_pipe_param - "compacted" array of e2e pipe param struct
99// num_pipes - num of active "pipe" or "route"
100// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
101// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
102// Added for legacy or unrealistic timing tests.
103void dml_rq_dlg_get_dlg_reg(
104 struct display_mode_lib *mode_lib,
105 display_dlg_regs_st *dlg_regs,
106 display_ttu_regs_st *ttu_regs,
107 display_e2e_pipe_params_st *e2e_pipe_param,
108 const unsigned int num_pipes,
109 const unsigned int pipe_idx,
110 const bool cstate_en,
111 const bool pstate_en,
112 const bool vm_en,
113 const bool ignore_viewport_pos,
114 const bool immediate_flip_support);
115
116// Function: dml_rq_dlg_get_calculated_vstartup
117// Calculate and return vstartup
118// Output:
119// unsigned int vstartup
120// Input:
121// e2e_pipe_param - "compacted" array of e2e pipe param struct
122// num_pipes - num of active "pipe" or "route"
123// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
124// NOTE: this MUST be called after setting the prefetch mode!
125unsigned int dml_rq_dlg_get_calculated_vstartup(
126 struct display_mode_lib *mode_lib,
127 display_e2e_pipe_params_st *e2e_pipe_param,
128 const unsigned int num_pipes,
129 const unsigned int pipe_idx);
130
131// Function: dml_rq_dlg_get_row_heights
132// Calculate dpte and meta row heights
133void dml_rq_dlg_get_row_heights(
134 struct display_mode_lib *mode_lib,
135 unsigned int *o_dpte_row_height,
136 unsigned int *o_meta_row_height,
137 unsigned int vp_width,
138 unsigned int data_pitch,
139 int source_format,
140 int tiling,
141 int macro_tile_size,
142 int source_scan,
143 int is_chroma);
144
145// Function: dml_rq_dlg_get_arb_params
146void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param);
147
148#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 189052e911fc..48400d642610 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include "display_rq_dlg_helpers.h" 26#include "display_rq_dlg_helpers.h"
27#include "dml_logger.h"
27 28
28void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param) 29void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
29{ 30{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
index 987d7671cd0f..304164986bd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -27,10 +27,11 @@
27#define __DISPLAY_RQ_DLG_CALC_H__ 27#define __DISPLAY_RQ_DLG_CALC_H__
28 28
29#include "dml_common_defs.h" 29#include "dml_common_defs.h"
30#include "display_rq_dlg_helpers.h"
31 30
32struct display_mode_lib; 31struct display_mode_lib;
33 32
33#include "display_rq_dlg_helpers.h"
34
34void dml1_extract_rq_regs( 35void dml1_extract_rq_regs(
35 struct display_mode_lib *mode_lib, 36 struct display_mode_lib *mode_lib,
36 struct _vcs_dpi_display_rq_regs_st *rq_regs, 37 struct _vcs_dpi_display_rq_regs_st *rq_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
index b2847bc469fe..f78cbae9db88 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -31,8 +31,6 @@
31#include "display_mode_structs.h" 31#include "display_mode_structs.h"
32#include "display_mode_enums.h" 32#include "display_mode_enums.h"
33 33
34#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
35#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
36 34
37double dml_round(double a); 35double dml_round(double a);
38 36
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index e68086b8a22f..f9cf08357989 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -28,6 +28,7 @@
28 28
29#include "dml_common_defs.h" 29#include "dml_common_defs.h"
30#include "../calcs/dcn_calc_math.h" 30#include "../calcs/dcn_calc_math.h"
31#include "dml_logger.h"
31 32
32static inline double dml_min(double a, double b) 33static inline double dml_min(double a, double b)
33{ 34{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
index c0c4bfdcdb14..465859b77248 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,11 +23,16 @@
23 * 23 *
24 */ 24 */
25 25
26#ifndef __AMDGPU_POWERPLAY_H__
27#define __AMDGPU_POWERPLAY_H__
28 26
29#include "amd_shared.h" 27#ifndef __DML_LOGGER_H_
28#define __DML_LOGGER_H_
29
30#define DC_LOGGER \
31 mode_lib->logger
32
33#define dml_print(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
34#define DTRACE(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
35
36#endif
30 37
31extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
32 38
33#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
index 0c2314efb47e..ea3f888e5c65 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
@@ -36,7 +36,8 @@
36 36
37#include "dce/dce_12_0_offset.h" 37#include "dce/dce_12_0_offset.h"
38#include "dce/dce_12_0_sh_mask.h" 38#include "dce/dce_12_0_sh_mask.h"
39#include "soc15ip.h" 39#include "soc15_hw_ip.h"
40#include "vega10_ip_offset.h"
40 41
41#define block HPD 42#define block HPD
42#define reg_num 0 43#define reg_num 0
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
index a225b02cc779..39ef5c7dad97 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
@@ -35,7 +35,8 @@
35 35
36#include "dce/dce_12_0_offset.h" 36#include "dce/dce_12_0_offset.h"
37#include "dce/dce_12_0_sh_mask.h" 37#include "dce/dce_12_0_sh_mask.h"
38#include "soc15ip.h" 38#include "soc15_hw_ip.h"
39#include "vega10_ip_offset.h"
39 40
40/* begin ********************* 41/* begin *********************
41 * macros to expend register list macro defined in HW object header file */ 42 * macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
index 5235f69f0602..32aa47a04a0d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
@@ -36,7 +36,8 @@
36 36
37#include "dcn/dcn_1_0_offset.h" 37#include "dcn/dcn_1_0_offset.h"
38#include "dcn/dcn_1_0_sh_mask.h" 38#include "dcn/dcn_1_0_sh_mask.h"
39#include "soc15ip.h" 39#include "soc15_hw_ip.h"
40#include "vega10_ip_offset.h"
40 41
41#define block HPD 42#define block HPD
42#define reg_num 0 43#define reg_num 0
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
index 347864810d01..fecc8688048d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
@@ -35,7 +35,8 @@
35 35
36#include "dcn/dcn_1_0_offset.h" 36#include "dcn/dcn_1_0_offset.h"
37#include "dcn/dcn_1_0_sh_mask.h" 37#include "dcn/dcn_1_0_sh_mask.h"
38#include "soc15ip.h" 38#include "soc15_hw_ip.h"
39#include "vega10_ip_offset.h"
39 40
40/* begin ********************* 41/* begin *********************
41 * macros to expend register list macro defined in HW object header file */ 42 * macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
index fc7a7d4ebca5..bb526ad326e5 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -55,6 +55,8 @@ enum {
55 55
56#define FROM_ENGINE(ptr) \ 56#define FROM_ENGINE(ptr) \
57 container_of((ptr), struct aux_engine, base) 57 container_of((ptr), struct aux_engine, base)
58#define DC_LOGGER \
59 engine->base.ctx->logger
58 60
59enum i2caux_engine_type dal_aux_engine_get_engine_type( 61enum i2caux_engine_type dal_aux_engine_get_engine_type(
60 const struct engine *engine) 62 const struct engine *engine)
@@ -126,20 +128,8 @@ static void process_read_reply(
126 ctx->status = 128 ctx->status =
127 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; 129 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
128 ctx->operation_succeeded = false; 130 ctx->operation_succeeded = false;
129 } else if (ctx->returned_byte < ctx->current_read_length) {
130 ctx->current_read_length -= ctx->returned_byte;
131
132 ctx->offset += ctx->returned_byte;
133
134 ++ctx->invalid_reply_retry_aux_on_ack;
135
136 if (ctx->invalid_reply_retry_aux_on_ack >
137 AUX_INVALID_REPLY_RETRY_COUNTER) {
138 ctx->status =
139 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
140 ctx->operation_succeeded = false;
141 }
142 } else { 131 } else {
132 ctx->current_read_length = ctx->returned_byte;
143 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED; 133 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
144 ctx->transaction_complete = true; 134 ctx->transaction_complete = true;
145 ctx->operation_succeeded = true; 135 ctx->operation_succeeded = true;
@@ -284,6 +274,15 @@ static bool read_command(
284 msleep(engine->delay); 274 msleep(engine->delay);
285 } while (ctx.operation_succeeded && !ctx.transaction_complete); 275 } while (ctx.operation_succeeded && !ctx.transaction_complete);
286 276
277 if (request->payload.address_space ==
278 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
279 DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
280 request->payload.address,
281 request->payload.data[0],
282 ctx.operation_succeeded);
283 }
284
285 request->payload.length = ctx.reply.length;
287 return ctx.operation_succeeded; 286 return ctx.operation_succeeded;
288} 287}
289 288
@@ -484,6 +483,14 @@ static bool write_command(
484 msleep(engine->delay); 483 msleep(engine->delay);
485 } while (ctx.operation_succeeded && !ctx.transaction_complete); 484 } while (ctx.operation_succeeded && !ctx.transaction_complete);
486 485
486 if (request->payload.address_space ==
487 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
488 DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
489 request->payload.address,
490 request->payload.data[0],
491 ctx.operation_succeeded);
492 }
493
487 return ctx.operation_succeeded; 494 return ctx.operation_succeeded;
488} 495}
489 496
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
index 81f9f3e34c10..5f47f6c007ac 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -441,10 +441,6 @@ static void construct(
441static void destruct( 441static void destruct(
442 struct aux_engine_dce110 *engine) 442 struct aux_engine_dce110 *engine)
443{ 443{
444 struct aux_engine_dce110 *aux110 = engine;
445/*temp w/a, to do*/
446 REG_UPDATE(AUX_ARB_CONTROL, AUX_DMCU_DONE_USING_AUX_REG, 1);
447 REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
448 dal_aux_engine_destruct(&engine->base); 444 dal_aux_engine_destruct(&engine->base);
449} 445}
450 446
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index 56e25b3d65fd..abd0095ced30 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -48,6 +48,8 @@
48/* 48/*
49 * This unit 49 * This unit
50 */ 50 */
51#define DC_LOGGER \
52 hw_engine->base.base.base.ctx->logger
51 53
52enum dc_i2c_status { 54enum dc_i2c_status {
53 DC_I2C_STATUS__DC_I2C_STATUS_IDLE, 55 DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
@@ -525,9 +527,7 @@ static void construct(
525 REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); 527 REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
526 528
527 if (xtal_ref_div == 0) { 529 if (xtal_ref_div == 0) {
528 dm_logger_write( 530 DC_LOG_WARNING("Invalid base timer divider\n",
529 hw_engine->base.base.base.ctx->logger, LOG_WARNING,
530 "Invalid base timer divider\n",
531 __func__); 531 __func__);
532 xtal_ref_div = 2; 532 xtal_ref_div = 2;
533 } 533 }
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
index a401636bf3f8..0e7b18260027 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -38,7 +38,8 @@
38 38
39#include "dce/dce_12_0_offset.h" 39#include "dce/dce_12_0_offset.h"
40#include "dce/dce_12_0_sh_mask.h" 40#include "dce/dce_12_0_sh_mask.h"
41#include "soc15ip.h" 41#include "soc15_hw_ip.h"
42#include "vega10_ip_offset.h"
42 43
43/* begin ********************* 44/* begin *********************
44 * macros to expend register list macro defined in HW object header file */ 45 * macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
index bed7cc3e77de..e44a8901f38b 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -38,7 +38,8 @@
38 38
39#include "dcn/dcn_1_0_offset.h" 39#include "dcn/dcn_1_0_offset.h"
40#include "dcn/dcn_1_0_sh_mask.h" 40#include "dcn/dcn_1_0_sh_mask.h"
41#include "soc15ip.h" 41#include "soc15_hw_ip.h"
42#include "vega10_ip_offset.h"
42 43
43/* begin ********************* 44/* begin *********************
44 * macros to expend register list macro defined in HW object header file */ 45 * macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index e1593ffe5a2b..5cbf6626b8d4 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -253,6 +253,7 @@ bool dal_i2caux_submit_aux_command(
253 break; 253 break;
254 } 254 }
255 255
256 cmd->payloads->length = request.payload.length;
256 ++index_of_payload; 257 ++index_of_payload;
257 } 258 }
258 259
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d6971054ec07..8c51ad70cace 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -119,6 +119,11 @@ struct resource_funcs {
119 struct dc *dc, 119 struct dc *dc,
120 struct dc_state *new_ctx, 120 struct dc_state *new_ctx,
121 struct dc_stream_state *dc_stream); 121 struct dc_stream_state *dc_stream);
122
123 enum dc_status (*remove_stream_from_ctx)(
124 struct dc *dc,
125 struct dc_state *new_ctx,
126 struct dc_stream_state *stream);
122}; 127};
123 128
124struct audio_support{ 129struct audio_support{
@@ -148,6 +153,7 @@ struct resource_pool {
148 unsigned int underlay_pipe_index; 153 unsigned int underlay_pipe_index;
149 unsigned int stream_enc_count; 154 unsigned int stream_enc_count;
150 unsigned int ref_clock_inKhz; 155 unsigned int ref_clock_inKhz;
156 unsigned int timing_generator_count;
151 157
152 /* 158 /*
153 * reserved clock source for DP 159 * reserved clock source for DP
@@ -171,6 +177,15 @@ struct resource_pool {
171 const struct resource_caps *res_cap; 177 const struct resource_caps *res_cap;
172}; 178};
173 179
180struct dcn_fe_clocks {
181 int dppclk_khz;
182};
183
184struct dcn_fe_bandwidth {
185 struct dcn_fe_clocks calc;
186 struct dcn_fe_clocks cur;
187};
188
174struct stream_resource { 189struct stream_resource {
175 struct output_pixel_processor *opp; 190 struct output_pixel_processor *opp;
176 struct timing_generator *tg; 191 struct timing_generator *tg;
@@ -179,6 +194,8 @@ struct stream_resource {
179 194
180 struct pixel_clk_params pix_clk_params; 195 struct pixel_clk_params pix_clk_params;
181 struct encoder_info_frame encoder_info_frame; 196 struct encoder_info_frame encoder_info_frame;
197
198 struct abm *abm;
182}; 199};
183 200
184struct plane_resource { 201struct plane_resource {
@@ -188,6 +205,9 @@ struct plane_resource {
188 struct input_pixel_processor *ipp; 205 struct input_pixel_processor *ipp;
189 struct transform *xfm; 206 struct transform *xfm;
190 struct dpp *dpp; 207 struct dpp *dpp;
208 uint8_t mpcc_inst;
209
210 struct dcn_fe_bandwidth bw;
191}; 211};
192 212
193struct pipe_ctx { 213struct pipe_ctx {
@@ -238,20 +258,9 @@ struct dce_bw_output {
238 int blackout_recovery_time_us; 258 int blackout_recovery_time_us;
239}; 259};
240 260
241struct dcn_bw_clocks {
242 int dispclk_khz;
243 int dppclk_khz;
244 bool dppclk_div;
245 int dcfclk_khz;
246 int dcfclk_deep_sleep_khz;
247 int fclk_khz;
248 int dram_ccm_us;
249 int min_active_dram_ccm_us;
250};
251
252struct dcn_bw_output { 261struct dcn_bw_output {
253 struct dcn_bw_clocks cur_clk; 262 struct dc_clocks cur_clk;
254 struct dcn_bw_clocks calc_clk; 263 struct dc_clocks calc_clk;
255 struct dcn_watermark_set watermarks; 264 struct dcn_watermark_set watermarks;
256}; 265};
257 266
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 0bf73b742f1f..090b7a8dd67b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,7 +102,7 @@ bool dal_ddc_service_query_ddc_data(
102 uint8_t *read_buf, 102 uint8_t *read_buf,
103 uint32_t read_size); 103 uint32_t read_size);
104 104
105enum ddc_result dal_ddc_service_read_dpcd_data( 105ssize_t dal_ddc_service_read_dpcd_data(
106 struct ddc_service *ddc, 106 struct ddc_service *ddc,
107 bool i2c, 107 bool i2c,
108 enum i2c_mot_mode mot, 108 enum i2c_mot_mode mot,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 616c73e2b0bd..2f783c650084 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -53,7 +53,7 @@ bool perform_link_training_with_retries(
53 53
54bool is_mst_supported(struct dc_link *link); 54bool is_mst_supported(struct dc_link *link);
55 55
56void detect_dp_sink_caps(struct dc_link *link); 56bool detect_dp_sink_caps(struct dc_link *link);
57 57
58void detect_edp_sink_caps(struct dc_link *link); 58void detect_edp_sink_caps(struct dc_link *link);
59 59
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index ae2399f16d1c..a9bfe9ff8ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -130,6 +130,9 @@ enum bw_defines {
130 130
131struct bw_calcs_dceip { 131struct bw_calcs_dceip {
132 enum bw_calcs_version version; 132 enum bw_calcs_version version;
133 uint32_t percent_of_ideal_port_bw_received_after_urgent_latency;
134 uint32_t max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation;
135 uint32_t max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation;
133 bool large_cursor; 136 bool large_cursor;
134 uint32_t cursor_max_outstanding_group_num; 137 uint32_t cursor_max_outstanding_group_num;
135 bool dmif_pipe_en_fbc_chunk_tracker; 138 bool dmif_pipe_en_fbc_chunk_tracker;
@@ -230,6 +233,7 @@ struct bw_calcs_vbios {
230 233
231struct bw_calcs_data { 234struct bw_calcs_data {
232 /* data for all displays */ 235 /* data for all displays */
236 bool display_synchronization_enabled;
233 uint32_t number_of_displays; 237 uint32_t number_of_displays;
234 enum bw_defines underlay_surface_type; 238 enum bw_defines underlay_surface_type;
235 enum bw_defines panning_and_bezel_adjustment; 239 enum bw_defines panning_and_bezel_adjustment;
@@ -241,6 +245,7 @@ struct bw_calcs_data {
241 bool d1_display_write_back_dwb_enable; 245 bool d1_display_write_back_dwb_enable;
242 enum bw_defines d1_underlay_mode; 246 enum bw_defines d1_underlay_mode;
243 247
248 bool increase_voltage_to_support_mclk_switch;
244 bool cpup_state_change_enable; 249 bool cpup_state_change_enable;
245 bool cpuc_state_change_enable; 250 bool cpuc_state_change_enable;
246 bool nbp_state_change_enable; 251 bool nbp_state_change_enable;
@@ -449,6 +454,7 @@ struct bw_calcs_data {
449 struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8]; 454 struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
450 struct bw_fixed min_dram_speed_change_margin[3][8]; 455 struct bw_fixed min_dram_speed_change_margin[3][8];
451 struct bw_fixed dispclk_required_for_dram_speed_change[3][8]; 456 struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
457 struct bw_fixed dispclk_required_for_dram_speed_change_pipe[3][8];
452 struct bw_fixed blackout_duration_margin[3][8]; 458 struct bw_fixed blackout_duration_margin[3][8];
453 struct bw_fixed dispclk_required_for_blackout_duration[3][8]; 459 struct bw_fixed dispclk_required_for_blackout_duration[3][8];
454 struct bw_fixed dispclk_required_for_blackout_recovery[3][8]; 460 struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index ce206355461b..de60f940030d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -32,13 +32,6 @@ enum dmcu_state {
32 DMCU_RUNNING = 1 32 DMCU_RUNNING = 1
33}; 33};
34 34
35struct dmcu_version {
36 unsigned int day;
37 unsigned int month;
38 unsigned int year;
39 unsigned int interface_version;
40};
41
42struct dmcu { 35struct dmcu {
43 struct dc_context *ctx; 36 struct dc_context *ctx;
44 const struct dmcu_funcs *funcs; 37 const struct dmcu_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 25edbde6163e..99995608b620 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -35,6 +35,8 @@ struct dpp {
35 int inst; 35 int inst;
36 struct dpp_caps *caps; 36 struct dpp_caps *caps;
37 struct pwl_params regamma_params; 37 struct pwl_params regamma_params;
38 struct pwl_params degamma_params;
39
38}; 40};
39 41
40struct dpp_grph_csc_adjustment { 42struct dpp_grph_csc_adjustment {
@@ -130,6 +132,14 @@ struct dpp_funcs {
130 const struct dc_cursor_mi_param *param, 132 const struct dc_cursor_mi_param *param,
131 uint32_t width 133 uint32_t width
132 ); 134 );
135 void (*dpp_set_hdr_multiplier)(
136 struct dpp *dpp_base,
137 uint32_t multiplier);
138
139 void (*dpp_dppclk_control)(
140 struct dpp *dpp_base,
141 bool dppclk_div,
142 bool enable);
133 143
134}; 144};
135 145
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index b7c7e70022e4..9ced254e652c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -119,6 +119,9 @@ struct hubp_funcs {
119 119
120 void (*hubp_disconnect)(struct hubp *hubp); 120 void (*hubp_disconnect)(struct hubp *hubp);
121 121
122 void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
123 void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
124
122}; 125};
123 126
124#endif 127#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index e3f0b4056318..b22158190262 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -136,7 +136,7 @@ struct out_csc_color_matrix {
136enum opp_regamma { 136enum opp_regamma {
137 OPP_REGAMMA_BYPASS = 0, 137 OPP_REGAMMA_BYPASS = 0,
138 OPP_REGAMMA_SRGB, 138 OPP_REGAMMA_SRGB,
139 OPP_REGAMMA_3_6, 139 OPP_REGAMMA_XVYCC,
140 OPP_REGAMMA_USER 140 OPP_REGAMMA_USER
141}; 141};
142 142
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 0fd329deacd8..54d8a1386142 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -123,8 +123,7 @@ struct link_encoder_funcs {
123 void (*enable_tmds_output)(struct link_encoder *enc, 123 void (*enable_tmds_output)(struct link_encoder *enc,
124 enum clock_source_id clock_source, 124 enum clock_source_id clock_source,
125 enum dc_color_depth color_depth, 125 enum dc_color_depth color_depth,
126 bool hdmi, 126 enum signal_type signal,
127 bool dual_link,
128 uint32_t pixel_clock); 127 uint32_t pixel_clock);
129 void (*enable_dp_output)(struct link_encoder *enc, 128 void (*enable_dp_output)(struct link_encoder *enc,
130 const struct dc_link_settings *link_settings, 129 const struct dc_link_settings *link_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index ab8fb77f1ae5..d974d9e18612 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -297,6 +297,10 @@ struct opp_funcs {
297 bool enable, 297 bool enable,
298 const struct dc_crtc_timing *timing); 298 const struct dc_crtc_timing *timing);
299 299
300 void (*opp_pipe_clock_control)(
301 struct output_pixel_processor *opp,
302 bool enable);
303
300}; 304};
301 305
302#endif 306#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index ec312f1a3e55..3217b5bf6c7a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -92,6 +92,36 @@ struct crtc_stereo_flags {
92 uint8_t DISABLE_STEREO_DP_SYNC : 1; 92 uint8_t DISABLE_STEREO_DP_SYNC : 1;
93}; 93};
94 94
95enum crc_selection {
96 /* Order must match values expected by hardware */
97 UNION_WINDOW_A_B = 0,
98 UNION_WINDOW_A_NOT_B,
99 UNION_WINDOW_NOT_A_B,
100 UNION_WINDOW_NOT_A_NOT_B,
101 INTERSECT_WINDOW_A_B,
102 INTERSECT_WINDOW_A_NOT_B,
103 INTERSECT_WINDOW_NOT_A_B,
104 INTERSECT_WINDOW_NOT_A_NOT_B,
105};
106
107struct crc_params {
108 /* Regions used to calculate CRC*/
109 uint16_t windowa_x_start;
110 uint16_t windowa_x_end;
111 uint16_t windowa_y_start;
112 uint16_t windowa_y_end;
113
114 uint16_t windowb_x_start;
115 uint16_t windowb_x_end;
116 uint16_t windowb_y_start;
117 uint16_t windowb_y_end;
118
119 enum crc_selection selection;
120
121 bool continuous_mode;
122 bool enable;
123};
124
95struct timing_generator { 125struct timing_generator {
96 const struct timing_generator_funcs *funcs; 126 const struct timing_generator_funcs *funcs;
97 struct dc_bios *bp; 127 struct dc_bios *bp;
@@ -173,6 +203,21 @@ struct timing_generator_funcs {
173 bool (*is_tg_enabled)(struct timing_generator *tg); 203 bool (*is_tg_enabled)(struct timing_generator *tg);
174 bool (*is_optc_underflow_occurred)(struct timing_generator *tg); 204 bool (*is_optc_underflow_occurred)(struct timing_generator *tg);
175 void (*clear_optc_underflow)(struct timing_generator *tg); 205 void (*clear_optc_underflow)(struct timing_generator *tg);
206
207 /**
208 * Configure CRCs for the given timing generator. Return false if TG is
209 * not on.
210 */
211 bool (*configure_crc)(struct timing_generator *tg,
212 const struct crc_params *params);
213
214 /**
215 * Get CRCs for the given timing generator. Return false if CRCs are
216 * not enabled (via configure_crc).
217 */
218 bool (*get_crc)(struct timing_generator *tg,
219 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
220
176}; 221};
177 222
178#endif 223#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 6f6c02b89f90..c5b3623bcbd9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -30,7 +30,7 @@
30#include "dc_hw_types.h" 30#include "dc_hw_types.h"
31#include "fixed31_32.h" 31#include "fixed31_32.h"
32 32
33#define CSC_TEMPERATURE_MATRIX_SIZE 9 33#define CSC_TEMPERATURE_MATRIX_SIZE 12
34 34
35struct bit_depth_reduction_params; 35struct bit_depth_reduction_params;
36 36
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 4c0aa56f7bae..e764cbad881b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -114,7 +114,7 @@ struct hw_sequencer_funcs {
114 114
115 void (*power_down)(struct dc *dc); 115 void (*power_down)(struct dc *dc);
116 116
117 void (*enable_accelerated_mode)(struct dc *dc); 117 void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context);
118 118
119 void (*enable_timing_synchronization)( 119 void (*enable_timing_synchronization)(
120 struct dc *dc, 120 struct dc *dc,
@@ -149,6 +149,7 @@ struct hw_sequencer_funcs {
149 void (*unblank_stream)(struct pipe_ctx *pipe_ctx, 149 void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
150 struct dc_link_settings *link_settings); 150 struct dc_link_settings *link_settings);
151 151
152 void (*blank_stream)(struct pipe_ctx *pipe_ctx);
152 void (*pipe_control_lock)( 153 void (*pipe_control_lock)(
153 struct dc *dc, 154 struct dc *dc,
154 struct pipe_ctx *pipe, 155 struct pipe_ctx *pipe,
@@ -198,6 +199,8 @@ struct hw_sequencer_funcs {
198 bool enable); 199 bool enable);
199 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); 200 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
200 201
202 void (*set_cursor_position)(struct pipe_ctx *pipe);
203 void (*set_cursor_attribute)(struct pipe_ctx *pipe);
201}; 204};
202 205
203void color_space_to_black_color( 206void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index f2b8c9a376d5..30be7bb4a01a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -51,6 +51,8 @@ void dp_enable_link_phy(
51 const struct dc_link_settings *link_settings); 51 const struct dc_link_settings *link_settings);
52 52
53void dp_receiver_power_ctrl(struct dc_link *link, bool on); 53void dp_receiver_power_ctrl(struct dc_link *link, bool on);
54bool edp_receiver_ready_T9(struct dc_link *link);
55bool edp_receiver_ready_T7(struct dc_link *link);
54 56
55void dp_disable_link_phy(struct dc_link *link, enum signal_type signal); 57void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
56 58
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index f7e40b292dfb..afe0876fe6f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -36,27 +36,25 @@
36 36
37#include "dc.h" 37#include "dc.h"
38#include "core_types.h" 38#include "core_types.h"
39static bool hpd_ack( 39#define DC_LOGGER \
40 struct irq_service *irq_service, 40 irq_service->ctx->logger
41 const struct irq_source_info *info) 41
42static bool hpd_ack(struct irq_service *irq_service,
43 const struct irq_source_info *info)
42{ 44{
43 uint32_t addr = info->status_reg; 45 uint32_t addr = info->status_reg;
44 uint32_t value = dm_read_reg(irq_service->ctx, addr); 46 uint32_t value = dm_read_reg(irq_service->ctx, addr);
45 uint32_t current_status = 47 uint32_t current_status = get_reg_field_value(value,
46 get_reg_field_value( 48 DC_HPD_INT_STATUS,
47 value, 49 DC_HPD_SENSE_DELAYED);
48 DC_HPD_INT_STATUS,
49 DC_HPD_SENSE_DELAYED);
50 50
51 dal_irq_service_ack_generic(irq_service, info); 51 dal_irq_service_ack_generic(irq_service, info);
52 52
53 value = dm_read_reg(irq_service->ctx, info->enable_reg); 53 value = dm_read_reg(irq_service->ctx, info->enable_reg);
54 54
55 set_reg_field_value( 55 set_reg_field_value(value, current_status ? 0 : 1,
56 value, 56 DC_HPD_INT_CONTROL,
57 current_status ? 0 : 1, 57 DC_HPD_INT_POLARITY);
58 DC_HPD_INT_CONTROL,
59 DC_HPD_INT_POLARITY);
60 58
61 dm_write_reg(irq_service->ctx, info->enable_reg, value); 59 dm_write_reg(irq_service->ctx, info->enable_reg, value);
62 60
@@ -176,48 +174,41 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
176#define dc_underflow_int_entry(reg_num) \ 174#define dc_underflow_int_entry(reg_num) \
177 [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry() 175 [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
178 176
179bool dal_irq_service_dummy_set( 177bool dal_irq_service_dummy_set(struct irq_service *irq_service,
180 struct irq_service *irq_service, 178 const struct irq_source_info *info,
181 const struct irq_source_info *info, 179 bool enable)
182 bool enable)
183{ 180{
184 dm_logger_write( 181 DC_LOG_ERROR("%s: called for non-implemented irq source\n",
185 irq_service->ctx->logger, LOG_ERROR, 182 __func__);
186 "%s: called for non-implemented irq source\n",
187 __func__);
188 return false; 183 return false;
189} 184}
190 185
191bool dal_irq_service_dummy_ack( 186bool dal_irq_service_dummy_ack(struct irq_service *irq_service,
192 struct irq_service *irq_service, 187 const struct irq_source_info *info)
193 const struct irq_source_info *info)
194{ 188{
195 dm_logger_write( 189 DC_LOG_ERROR("%s: called for non-implemented irq source\n",
196 irq_service->ctx->logger, LOG_ERROR, 190 __func__);
197 "%s: called for non-implemented irq source\n",
198 __func__);
199 return false; 191 return false;
200} 192}
201 193
202 194
203bool dce110_vblank_set( 195bool dce110_vblank_set(struct irq_service *irq_service,
204 struct irq_service *irq_service, 196 const struct irq_source_info *info,
205 const struct irq_source_info *info, 197 bool enable)
206 bool enable)
207{ 198{
208 struct dc_context *dc_ctx = irq_service->ctx; 199 struct dc_context *dc_ctx = irq_service->ctx;
209 struct dc *core_dc = irq_service->ctx->dc; 200 struct dc *core_dc = irq_service->ctx->dc;
210 enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source( 201 enum dc_irq_source dal_irq_src =
211 irq_service->ctx->dc, 202 dc_interrupt_to_irq_source(irq_service->ctx->dc,
212 info->src_id, 203 info->src_id,
213 info->ext_id); 204 info->ext_id);
214 uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK; 205 uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
215 206
216 struct timing_generator *tg = 207 struct timing_generator *tg =
217 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; 208 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
218 209
219 if (enable) { 210 if (enable) {
220 if (!tg->funcs->arm_vert_intr(tg, 2)) { 211 if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
221 DC_ERROR("Failed to get VBLANK!\n"); 212 DC_ERROR("Failed to get VBLANK!\n");
222 return false; 213 return false;
223 } 214 }
@@ -225,7 +216,6 @@ bool dce110_vblank_set(
225 216
226 dal_irq_service_set_generic(irq_service, info, enable); 217 dal_irq_service_set_generic(irq_service, info, enable);
227 return true; 218 return true;
228
229} 219}
230 220
231static const struct irq_source_info_funcs dummy_irq_info_funcs = { 221static const struct irq_source_info_funcs dummy_irq_info_funcs = {
@@ -406,9 +396,8 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = {
406 .to_dal_irq_source = to_dal_irq_source_dce110 396 .to_dal_irq_source = to_dal_irq_source_dce110
407}; 397};
408 398
409static void construct( 399static void construct(struct irq_service *irq_service,
410 struct irq_service *irq_service, 400 struct irq_service_init_data *init_data)
411 struct irq_service_init_data *init_data)
412{ 401{
413 dal_irq_service_construct(irq_service, init_data); 402 dal_irq_service_construct(irq_service, init_data);
414 403
@@ -416,8 +405,8 @@ static void construct(
416 irq_service->funcs = &irq_service_funcs_dce110; 405 irq_service->funcs = &irq_service_funcs_dce110;
417} 406}
418 407
419struct irq_service *dal_irq_service_dce110_create( 408struct irq_service *
420 struct irq_service_init_data *init_data) 409dal_irq_service_dce110_create(struct irq_service_init_data *init_data)
421{ 410{
422 struct irq_service *irq_service = kzalloc(sizeof(*irq_service), 411 struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
423 GFP_KERNEL); 412 GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 66d52580e29f..1ea7256ec89b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -32,7 +32,8 @@
32 32
33#include "dce/dce_12_0_offset.h" 33#include "dce/dce_12_0_offset.h"
34#include "dce/dce_12_0_sh_mask.h" 34#include "dce/dce_12_0_sh_mask.h"
35#include "soc15ip.h" 35#include "soc15_hw_ip.h"
36#include "vega10_ip_offset.h"
36 37
37#include "ivsrcid/ivsrcid_vislands30.h" 38#include "ivsrcid/ivsrcid_vislands30.h"
38 39
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 7f7db66c48b0..e04ae49243f6 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -31,7 +31,8 @@
31 31
32#include "dcn/dcn_1_0_offset.h" 32#include "dcn/dcn_1_0_offset.h"
33#include "dcn/dcn_1_0_sh_mask.h" 33#include "dcn/dcn_1_0_sh_mask.h"
34#include "soc15ip.h" 34#include "soc15_hw_ip.h"
35#include "vega10_ip_offset.h"
35 36
36#include "irq_service_dcn10.h" 37#include "irq_service_dcn10.h"
37 38
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index b106513fc2dc..dcdfa0f01551 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -47,6 +47,8 @@
47 47
48#define CTX \ 48#define CTX \
49 irq_service->ctx 49 irq_service->ctx
50#define DC_LOGGER \
51 irq_service->ctx->logger
50 52
51void dal_irq_service_construct( 53void dal_irq_service_construct(
52 struct irq_service *irq_service, 54 struct irq_service *irq_service,
@@ -104,9 +106,7 @@ bool dal_irq_service_set(
104 find_irq_source_info(irq_service, source); 106 find_irq_source_info(irq_service, source);
105 107
106 if (!info) { 108 if (!info) {
107 dm_logger_write( 109 DC_LOG_ERROR("%s: cannot find irq info table entry for %d\n",
108 irq_service->ctx->logger, LOG_ERROR,
109 "%s: cannot find irq info table entry for %d\n",
110 __func__, 110 __func__,
111 source); 111 source);
112 return false; 112 return false;
@@ -142,9 +142,7 @@ bool dal_irq_service_ack(
142 find_irq_source_info(irq_service, source); 142 find_irq_source_info(irq_service, source);
143 143
144 if (!info) { 144 if (!info) {
145 dm_logger_write( 145 DC_LOG_ERROR("%s: cannot find irq info table entry for %d\n",
146 irq_service->ctx->logger, LOG_ERROR,
147 "%s: cannot find irq info table entry for %d\n",
148 __func__, 146 __func__,
149 source); 147 source);
150 return false; 148 return false;
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 57a54a7b89e5..1c079ba37c30 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(
42 struct link_encoder *enc, 42 struct link_encoder *enc,
43 enum clock_source_id clock_source, 43 enum clock_source_id clock_source,
44 enum dc_color_depth color_depth, 44 enum dc_color_depth color_depth,
45 bool hdmi, 45 enum signal_type signal,
46 bool dual_link,
47 uint32_t pixel_clock) {} 46 uint32_t pixel_clock) {}
48 47
49static void virtual_link_encoder_enable_dp_output( 48static void virtual_link_encoder_enable_dp_output(
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 7abe663ecc6e..9831cb5eaa7c 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -109,6 +109,14 @@
109#define ASIC_REV_IS_STONEY(rev) \ 109#define ASIC_REV_IS_STONEY(rev) \
110 ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN)) 110 ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
111 111
112/* DCE12 */
113
114#define AI_GREENLAND_P_A0 1
115#define AI_GREENLAND_P_A1 2
116
117#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN)
118#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN)
119
112/* DCN1_0 */ 120/* DCN1_0 */
113#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ 121#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
114#define RAVEN_A0 0x01 122#define RAVEN_A0 0x01
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 4badaedbaadd..0de258622c12 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -470,4 +470,7 @@ uint32_t dal_fixed31_32_clamp_u0d14(
470uint32_t dal_fixed31_32_clamp_u0d10( 470uint32_t dal_fixed31_32_clamp_u0d10(
471 struct fixed31_32 arg); 471 struct fixed31_32 arg);
472 472
473int32_t dal_fixed31_32_s4d19(
474 struct fixed31_32 arg);
475
473#endif 476#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 7a9b43f84a31..36bbad594267 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -419,11 +419,6 @@ struct bios_event_info {
419 bool backlight_changed; 419 bool backlight_changed;
420}; 420};
421 421
422enum {
423 HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
424 TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
425};
426
427/* 422/*
428 * DFS-bypass flag 423 * DFS-bypass flag
429 */ 424 */
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index adea1a59f620..80f0d93cfd94 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -58,11 +58,14 @@ enum {
58 58
59enum link_training_result { 59enum link_training_result {
60 LINK_TRAINING_SUCCESS, 60 LINK_TRAINING_SUCCESS,
61 LINK_TRAINING_CR_FAIL, 61 LINK_TRAINING_CR_FAIL_LANE0,
62 LINK_TRAINING_CR_FAIL_LANE1,
63 LINK_TRAINING_CR_FAIL_LANE23,
62 /* CR DONE bit is cleared during EQ step */ 64 /* CR DONE bit is cleared during EQ step */
63 LINK_TRAINING_EQ_FAIL_CR, 65 LINK_TRAINING_EQ_FAIL_CR,
64 /* other failure during EQ step */ 66 /* other failure during EQ step */
65 LINK_TRAINING_EQ_FAIL_EQ, 67 LINK_TRAINING_EQ_FAIL_EQ,
68 LINK_TRAINING_LQA_FAIL,
66}; 69};
67 70
68struct link_training_settings { 71struct link_training_settings {
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index e2ff8cd423d6..427796bdc14a 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -29,6 +29,39 @@
29#include "os_types.h" 29#include "os_types.h"
30 30
31#define MAX_NAME_LEN 32 31#define MAX_NAME_LEN 32
32#define DC_LOG_ERROR(a, ...) dm_logger_write(DC_LOGGER, LOG_ERROR, a, ## __VA_ARGS__)
33#define DC_LOG_WARNING(a, ...) dm_logger_write(DC_LOGGER, LOG_WARNING, a, ## __VA_ARGS__)
34#define DC_LOG_DEBUG(a, ...) dm_logger_write(DC_LOGGER, LOG_DEBUG, a, ## __VA_ARGS__)
35#define DC_LOG_DC(a, ...) dm_logger_write(DC_LOGGER, LOG_DC, a, ## __VA_ARGS__)
36#define DC_LOG_DTN(a, ...) dm_logger_write(DC_LOGGER, LOG_DTN, a, ## __VA_ARGS__)
37#define DC_LOG_SURFACE(a, ...) dm_logger_write(DC_LOGGER, LOG_SURFACE, a, ## __VA_ARGS__)
38#define DC_LOG_HW_HOTPLUG(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HOTPLUG, a, ## __VA_ARGS__)
39#define DC_LOG_HW_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_LINK_TRAINING, a, ## __VA_ARGS__)
40#define DC_LOG_HW_SET_MODE(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_SET_MODE, a, ## __VA_ARGS__)
41#define DC_LOG_HW_RESUME_S3(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_RESUME_S3, a, ## __VA_ARGS__)
42#define DC_LOG_HW_AUDIO(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_AUDIO, a, ## __VA_ARGS__)
43#define DC_LOG_HW_HPD_IRQ(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HPD_IRQ, a, ## __VA_ARGS__)
44#define DC_LOG_MST(a, ...) dm_logger_write(DC_LOGGER, LOG_MST, a, ## __VA_ARGS__)
45#define DC_LOG_SCALER(a, ...) dm_logger_write(DC_LOGGER, LOG_SCALER, a, ## __VA_ARGS__)
46#define DC_LOG_BIOS(a, ...) dm_logger_write(DC_LOGGER, LOG_BIOS, a, ## __VA_ARGS__)
47#define DC_LOG_BANDWIDTH_CALCS(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_CALCS, a, ## __VA_ARGS__)
48#define DC_LOG_BANDWIDTH_VALIDATION(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_VALIDATION, a, ## __VA_ARGS__)
49#define DC_LOG_I2C_AUX(a, ...) dm_logger_write(DC_LOGGER, LOG_I2C_AUX, a, ## __VA_ARGS__)
50#define DC_LOG_SYNC(a, ...) dm_logger_write(DC_LOGGER, LOG_SYNC, a, ## __VA_ARGS__)
51#define DC_LOG_BACKLIGHT(a, ...) dm_logger_write(DC_LOGGER, LOG_BACKLIGHT, a, ## __VA_ARGS__)
52#define DC_LOG_FEATURE_OVERRIDE(a, ...) dm_logger_write(DC_LOGGER, LOG_FEATURE_OVERRIDE, a, ## __VA_ARGS__)
53#define DC_LOG_DETECTION_EDID_PARSER(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_EDID_PARSER, a, ## __VA_ARGS__)
54#define DC_LOG_DETECTION_DP_CAPS(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_DP_CAPS, a, ## __VA_ARGS__)
55#define DC_LOG_RESOURCE(a, ...) dm_logger_write(DC_LOGGER, LOG_RESOURCE, a, ## __VA_ARGS__)
56#define DC_LOG_DML(a, ...) dm_logger_write(DC_LOGGER, LOG_DML, a, ## __VA_ARGS__)
57#define DC_LOG_EVENT_MODE_SET(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_MODE_SET, a, ## __VA_ARGS__)
58#define DC_LOG_EVENT_DETECTION(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_DETECTION, a, ## __VA_ARGS__)
59#define DC_LOG_EVENT_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_TRAINING, a, ## __VA_ARGS__)
60#define DC_LOG_EVENT_LINK_LOSS(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_LOSS, a, ## __VA_ARGS__)
61#define DC_LOG_EVENT_UNDERFLOW(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_UNDERFLOW, a, ## __VA_ARGS__)
62#define DC_LOG_IF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_IF_TRACE, a, ## __VA_ARGS__)
63#define DC_LOG_PERF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_PERF_TRACE, a, ## __VA_ARGS__)
64
32 65
33struct dal_logger; 66struct dal_logger;
34 67
@@ -65,6 +98,7 @@ enum dc_log_type {
65 LOG_EVENT_UNDERFLOW, 98 LOG_EVENT_UNDERFLOW,
66 LOG_IF_TRACE, 99 LOG_IF_TRACE,
67 LOG_PERF_TRACE, 100 LOG_PERF_TRACE,
101 LOG_PROFILING,
68 102
69 LOG_SECTION_TOTAL_COUNT 103 LOG_SECTION_TOTAL_COUNT
70}; 104};
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index b5ebde642207..199c5db67cbc 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -26,6 +26,11 @@
26#ifndef __DC_SIGNAL_TYPES_H__ 26#ifndef __DC_SIGNAL_TYPES_H__
27#define __DC_SIGNAL_TYPES_H__ 27#define __DC_SIGNAL_TYPES_H__
28 28
29/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
30#define TMDS_MIN_PIXEL_CLOCK 25000
31/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
32#define TMDS_MAX_PIXEL_CLOCK 165000
33
29enum signal_type { 34enum signal_type {
30 SIGNAL_TYPE_NONE = 0L, /* no signal */ 35 SIGNAL_TYPE_NONE = 0L, /* no signal */
31 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0), 36 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
diff --git a/drivers/gpu/drm/amd/display/modules/color/Makefile b/drivers/gpu/drm/amd/display/modules/color/Makefile
new file mode 100644
index 000000000000..65c33a76951a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/Makefile
@@ -0,0 +1,31 @@
1#
2# Copyright 2018 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
23# Makefile for the color sub-module of DAL.
24#
25
26MOD_COLOR = color_gamma.o
27
28AMD_DAL_MOD_COLOR = $(addprefix $(AMDDALPATH)/modules/color/,$(MOD_COLOR))
29#$(info ************ DAL COLOR MODULE MAKEFILE ************)
30
31AMD_DISPLAY_FILES += $(AMD_DAL_MOD_COLOR)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
new file mode 100644
index 000000000000..e7e374f56864
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -0,0 +1,1396 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dc.h"
27#include "opp.h"
28#include "color_gamma.h"
29
30
31#define NUM_PTS_IN_REGION 16
32#define NUM_REGIONS 32
33#define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
34
35static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
36
37static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
38static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
39
40static bool pq_initialized; /* = false; */
41static bool de_pq_initialized; /* = false; */
42
43/* one-time setup of X points */
44void setup_x_points_distribution(void)
45{
46 struct fixed31_32 region_size = dal_fixed31_32_from_int(128);
47 int32_t segment;
48 uint32_t seg_offset;
49 uint32_t index;
50 struct fixed31_32 increment;
51
52 coordinates_x[MAX_HW_POINTS].x = region_size;
53 coordinates_x[MAX_HW_POINTS + 1].x = region_size;
54
55 for (segment = 6; segment > (6 - NUM_REGIONS); segment--) {
56 region_size = dal_fixed31_32_div_int(region_size, 2);
57 increment = dal_fixed31_32_div_int(region_size,
58 NUM_PTS_IN_REGION);
59 seg_offset = (segment + (NUM_REGIONS - 7)) * NUM_PTS_IN_REGION;
60 coordinates_x[seg_offset].x = region_size;
61
62 for (index = seg_offset + 1;
63 index < seg_offset + NUM_PTS_IN_REGION;
64 index++) {
65 coordinates_x[index].x = dal_fixed31_32_add
66 (coordinates_x[index-1].x, increment);
67 }
68 }
69}
70
71static void compute_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
72{
73 /* consts for PQ gamma formula. */
74 const struct fixed31_32 m1 =
75 dal_fixed31_32_from_fraction(159301758, 1000000000);
76 const struct fixed31_32 m2 =
77 dal_fixed31_32_from_fraction(7884375, 100000);
78 const struct fixed31_32 c1 =
79 dal_fixed31_32_from_fraction(8359375, 10000000);
80 const struct fixed31_32 c2 =
81 dal_fixed31_32_from_fraction(188515625, 10000000);
82 const struct fixed31_32 c3 =
83 dal_fixed31_32_from_fraction(186875, 10000);
84
85 struct fixed31_32 l_pow_m1;
86 struct fixed31_32 base;
87
88 if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
89 in_x = dal_fixed31_32_zero;
90
91 l_pow_m1 = dal_fixed31_32_pow(in_x, m1);
92 base = dal_fixed31_32_div(
93 dal_fixed31_32_add(c1,
94 (dal_fixed31_32_mul(c2, l_pow_m1))),
95 dal_fixed31_32_add(dal_fixed31_32_one,
96 (dal_fixed31_32_mul(c3, l_pow_m1))));
97 *out_y = dal_fixed31_32_pow(base, m2);
98}
99
100static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
101{
102 /* consts for dePQ gamma formula. */
103 const struct fixed31_32 m1 =
104 dal_fixed31_32_from_fraction(159301758, 1000000000);
105 const struct fixed31_32 m2 =
106 dal_fixed31_32_from_fraction(7884375, 100000);
107 const struct fixed31_32 c1 =
108 dal_fixed31_32_from_fraction(8359375, 10000000);
109 const struct fixed31_32 c2 =
110 dal_fixed31_32_from_fraction(188515625, 10000000);
111 const struct fixed31_32 c3 =
112 dal_fixed31_32_from_fraction(186875, 10000);
113
114 struct fixed31_32 l_pow_m1;
115 struct fixed31_32 base, div;
116
117
118 if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
119 in_x = dal_fixed31_32_zero;
120
121 l_pow_m1 = dal_fixed31_32_pow(in_x,
122 dal_fixed31_32_div(dal_fixed31_32_one, m2));
123 base = dal_fixed31_32_sub(l_pow_m1, c1);
124
125 if (dal_fixed31_32_lt(base, dal_fixed31_32_zero))
126 base = dal_fixed31_32_zero;
127
128 div = dal_fixed31_32_sub(c2, dal_fixed31_32_mul(c3, l_pow_m1));
129
130 *out_y = dal_fixed31_32_pow(dal_fixed31_32_div(base, div),
131 dal_fixed31_32_div(dal_fixed31_32_one, m1));
132
133}
134/* one-time pre-compute PQ values - only for sdr_white_level 80 */
135void precompute_pq(void)
136{
137 int i;
138 struct fixed31_32 x;
139 const struct hw_x_point *coord_x = coordinates_x + 32;
140 struct fixed31_32 scaling_factor =
141 dal_fixed31_32_from_fraction(80, 10000);
142
143 /* pow function has problems with arguments too small */
144 for (i = 0; i < 32; i++)
145 pq_table[i] = dal_fixed31_32_zero;
146
147 for (i = 32; i <= MAX_HW_POINTS; i++) {
148 x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
149 compute_pq(x, &pq_table[i]);
150 ++coord_x;
151 }
152}
153
154/* one-time pre-compute dePQ values - only for max pixel value 125 FP16 */
155void precompute_de_pq(void)
156{
157 int i;
158 struct fixed31_32 y;
159 uint32_t begin_index, end_index;
160
161 struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
162
163 /* X points is 2^-25 to 2^7
164 * De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
165 */
166 begin_index = 13 * NUM_PTS_IN_REGION;
167 end_index = begin_index + 12 * NUM_PTS_IN_REGION;
168
169 for (i = 0; i <= begin_index; i++)
170 de_pq_table[i] = dal_fixed31_32_zero;
171
172 for (; i <= end_index; i++) {
173 compute_de_pq(coordinates_x[i].x, &y);
174 de_pq_table[i] = dal_fixed31_32_mul(y, scaling_factor);
175 }
176
177 for (; i <= MAX_HW_POINTS; i++)
178 de_pq_table[i] = de_pq_table[i-1];
179}
180struct dividers {
181 struct fixed31_32 divider1;
182 struct fixed31_32 divider2;
183 struct fixed31_32 divider3;
184};
185
186static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
187{
188 static const int32_t numerator01[] = { 31308, 180000};
189 static const int32_t numerator02[] = { 12920, 4500};
190 static const int32_t numerator03[] = { 55, 99};
191 static const int32_t numerator04[] = { 55, 99};
192 static const int32_t numerator05[] = { 2400, 2200};
193
194 uint32_t i = 0;
195 uint32_t index = is_2_4 == true ? 0:1;
196
197 do {
198 coefficients->a0[i] = dal_fixed31_32_from_fraction(
199 numerator01[index], 10000000);
200 coefficients->a1[i] = dal_fixed31_32_from_fraction(
201 numerator02[index], 1000);
202 coefficients->a2[i] = dal_fixed31_32_from_fraction(
203 numerator03[index], 1000);
204 coefficients->a3[i] = dal_fixed31_32_from_fraction(
205 numerator04[index], 1000);
206 coefficients->user_gamma[i] = dal_fixed31_32_from_fraction(
207 numerator05[index], 1000);
208
209 ++i;
210 } while (i != ARRAY_SIZE(coefficients->a0));
211}
212
213static struct fixed31_32 translate_from_linear_space(
214 struct fixed31_32 arg,
215 struct fixed31_32 a0,
216 struct fixed31_32 a1,
217 struct fixed31_32 a2,
218 struct fixed31_32 a3,
219 struct fixed31_32 gamma)
220{
221 const struct fixed31_32 one = dal_fixed31_32_from_int(1);
222
223 if (dal_fixed31_32_lt(one, arg))
224 return one;
225
226 if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
227 return dal_fixed31_32_sub(
228 a2,
229 dal_fixed31_32_mul(
230 dal_fixed31_32_add(
231 one,
232 a3),
233 dal_fixed31_32_pow(
234 dal_fixed31_32_neg(arg),
235 dal_fixed31_32_recip(gamma))));
236 else if (dal_fixed31_32_le(a0, arg))
237 return dal_fixed31_32_sub(
238 dal_fixed31_32_mul(
239 dal_fixed31_32_add(
240 one,
241 a3),
242 dal_fixed31_32_pow(
243 arg,
244 dal_fixed31_32_recip(gamma))),
245 a2);
246 else
247 return dal_fixed31_32_mul(
248 arg,
249 a1);
250}
251
252static struct fixed31_32 translate_to_linear_space(
253 struct fixed31_32 arg,
254 struct fixed31_32 a0,
255 struct fixed31_32 a1,
256 struct fixed31_32 a2,
257 struct fixed31_32 a3,
258 struct fixed31_32 gamma)
259{
260 struct fixed31_32 linear;
261
262 a0 = dal_fixed31_32_mul(a0, a1);
263 if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
264
265 linear = dal_fixed31_32_neg(
266 dal_fixed31_32_pow(
267 dal_fixed31_32_div(
268 dal_fixed31_32_sub(a2, arg),
269 dal_fixed31_32_add(
270 dal_fixed31_32_one, a3)), gamma));
271
272 else if (dal_fixed31_32_le(dal_fixed31_32_neg(a0), arg) &&
273 dal_fixed31_32_le(arg, a0))
274 linear = dal_fixed31_32_div(arg, a1);
275 else
276 linear = dal_fixed31_32_pow(
277 dal_fixed31_32_div(
278 dal_fixed31_32_add(a2, arg),
279 dal_fixed31_32_add(
280 dal_fixed31_32_one, a3)), gamma);
281
282 return linear;
283}
284
285static inline struct fixed31_32 translate_from_linear_space_ex(
286 struct fixed31_32 arg,
287 struct gamma_coefficients *coeff,
288 uint32_t color_index)
289{
290 return translate_from_linear_space(
291 arg,
292 coeff->a0[color_index],
293 coeff->a1[color_index],
294 coeff->a2[color_index],
295 coeff->a3[color_index],
296 coeff->user_gamma[color_index]);
297}
298
299
300static inline struct fixed31_32 translate_to_linear_space_ex(
301 struct fixed31_32 arg,
302 struct gamma_coefficients *coeff,
303 uint32_t color_index)
304{
305 return translate_to_linear_space(
306 arg,
307 coeff->a0[color_index],
308 coeff->a1[color_index],
309 coeff->a2[color_index],
310 coeff->a3[color_index],
311 coeff->user_gamma[color_index]);
312}
313
314
315static bool find_software_points(
316 const struct dc_gamma *ramp,
317 const struct gamma_pixel *axis_x,
318 struct fixed31_32 hw_point,
319 enum channel_name channel,
320 uint32_t *index_to_start,
321 uint32_t *index_left,
322 uint32_t *index_right,
323 enum hw_point_position *pos)
324{
325 const uint32_t max_number = ramp->num_entries + 3;
326
327 struct fixed31_32 left, right;
328
329 uint32_t i = *index_to_start;
330
331 while (i < max_number) {
332 if (channel == CHANNEL_NAME_RED) {
333 left = axis_x[i].r;
334
335 if (i < max_number - 1)
336 right = axis_x[i + 1].r;
337 else
338 right = axis_x[max_number - 1].r;
339 } else if (channel == CHANNEL_NAME_GREEN) {
340 left = axis_x[i].g;
341
342 if (i < max_number - 1)
343 right = axis_x[i + 1].g;
344 else
345 right = axis_x[max_number - 1].g;
346 } else {
347 left = axis_x[i].b;
348
349 if (i < max_number - 1)
350 right = axis_x[i + 1].b;
351 else
352 right = axis_x[max_number - 1].b;
353 }
354
355 if (dal_fixed31_32_le(left, hw_point) &&
356 dal_fixed31_32_le(hw_point, right)) {
357 *index_to_start = i;
358 *index_left = i;
359
360 if (i < max_number - 1)
361 *index_right = i + 1;
362 else
363 *index_right = max_number - 1;
364
365 *pos = HW_POINT_POSITION_MIDDLE;
366
367 return true;
368 } else if ((i == *index_to_start) &&
369 dal_fixed31_32_le(hw_point, left)) {
370 *index_to_start = i;
371 *index_left = i;
372 *index_right = i;
373
374 *pos = HW_POINT_POSITION_LEFT;
375
376 return true;
377 } else if ((i == max_number - 1) &&
378 dal_fixed31_32_le(right, hw_point)) {
379 *index_to_start = i;
380 *index_left = i;
381 *index_right = i;
382
383 *pos = HW_POINT_POSITION_RIGHT;
384
385 return true;
386 }
387
388 ++i;
389 }
390
391 return false;
392}
393
394static bool build_custom_gamma_mapping_coefficients_worker(
395 const struct dc_gamma *ramp,
396 struct pixel_gamma_point *coeff,
397 const struct hw_x_point *coordinates_x,
398 const struct gamma_pixel *axis_x,
399 enum channel_name channel,
400 uint32_t number_of_points)
401{
402 uint32_t i = 0;
403
404 while (i <= number_of_points) {
405 struct fixed31_32 coord_x;
406
407 uint32_t index_to_start = 0;
408 uint32_t index_left = 0;
409 uint32_t index_right = 0;
410
411 enum hw_point_position hw_pos;
412
413 struct gamma_point *point;
414
415 struct fixed31_32 left_pos;
416 struct fixed31_32 right_pos;
417
418 if (channel == CHANNEL_NAME_RED)
419 coord_x = coordinates_x[i].regamma_y_red;
420 else if (channel == CHANNEL_NAME_GREEN)
421 coord_x = coordinates_x[i].regamma_y_green;
422 else
423 coord_x = coordinates_x[i].regamma_y_blue;
424
425 if (!find_software_points(
426 ramp, axis_x, coord_x, channel,
427 &index_to_start, &index_left, &index_right, &hw_pos)) {
428 BREAK_TO_DEBUGGER();
429 return false;
430 }
431
432 if (index_left >= ramp->num_entries + 3) {
433 BREAK_TO_DEBUGGER();
434 return false;
435 }
436
437 if (index_right >= ramp->num_entries + 3) {
438 BREAK_TO_DEBUGGER();
439 return false;
440 }
441
442 if (channel == CHANNEL_NAME_RED) {
443 point = &coeff[i].r;
444
445 left_pos = axis_x[index_left].r;
446 right_pos = axis_x[index_right].r;
447 } else if (channel == CHANNEL_NAME_GREEN) {
448 point = &coeff[i].g;
449
450 left_pos = axis_x[index_left].g;
451 right_pos = axis_x[index_right].g;
452 } else {
453 point = &coeff[i].b;
454
455 left_pos = axis_x[index_left].b;
456 right_pos = axis_x[index_right].b;
457 }
458
459 if (hw_pos == HW_POINT_POSITION_MIDDLE)
460 point->coeff = dal_fixed31_32_div(
461 dal_fixed31_32_sub(
462 coord_x,
463 left_pos),
464 dal_fixed31_32_sub(
465 right_pos,
466 left_pos));
467 else if (hw_pos == HW_POINT_POSITION_LEFT)
468 point->coeff = dal_fixed31_32_zero;
469 else if (hw_pos == HW_POINT_POSITION_RIGHT)
470 point->coeff = dal_fixed31_32_from_int(2);
471 else {
472 BREAK_TO_DEBUGGER();
473 return false;
474 }
475
476 point->left_index = index_left;
477 point->right_index = index_right;
478 point->pos = hw_pos;
479
480 ++i;
481 }
482
483 return true;
484}
485
486static struct fixed31_32 calculate_mapped_value(
487 struct pwl_float_data *rgb,
488 const struct pixel_gamma_point *coeff,
489 enum channel_name channel,
490 uint32_t max_index)
491{
492 const struct gamma_point *point;
493
494 struct fixed31_32 result;
495
496 if (channel == CHANNEL_NAME_RED)
497 point = &coeff->r;
498 else if (channel == CHANNEL_NAME_GREEN)
499 point = &coeff->g;
500 else
501 point = &coeff->b;
502
503 if ((point->left_index < 0) || (point->left_index > max_index)) {
504 BREAK_TO_DEBUGGER();
505 return dal_fixed31_32_zero;
506 }
507
508 if ((point->right_index < 0) || (point->right_index > max_index)) {
509 BREAK_TO_DEBUGGER();
510 return dal_fixed31_32_zero;
511 }
512
513 if (point->pos == HW_POINT_POSITION_MIDDLE)
514 if (channel == CHANNEL_NAME_RED)
515 result = dal_fixed31_32_add(
516 dal_fixed31_32_mul(
517 point->coeff,
518 dal_fixed31_32_sub(
519 rgb[point->right_index].r,
520 rgb[point->left_index].r)),
521 rgb[point->left_index].r);
522 else if (channel == CHANNEL_NAME_GREEN)
523 result = dal_fixed31_32_add(
524 dal_fixed31_32_mul(
525 point->coeff,
526 dal_fixed31_32_sub(
527 rgb[point->right_index].g,
528 rgb[point->left_index].g)),
529 rgb[point->left_index].g);
530 else
531 result = dal_fixed31_32_add(
532 dal_fixed31_32_mul(
533 point->coeff,
534 dal_fixed31_32_sub(
535 rgb[point->right_index].b,
536 rgb[point->left_index].b)),
537 rgb[point->left_index].b);
538 else if (point->pos == HW_POINT_POSITION_LEFT) {
539 BREAK_TO_DEBUGGER();
540 result = dal_fixed31_32_zero;
541 } else {
542 BREAK_TO_DEBUGGER();
543 result = dal_fixed31_32_one;
544 }
545
546 return result;
547}
548
549static void build_pq(struct pwl_float_data_ex *rgb_regamma,
550 uint32_t hw_points_num,
551 const struct hw_x_point *coordinate_x,
552 uint32_t sdr_white_level)
553{
554 uint32_t i, start_index;
555
556 struct pwl_float_data_ex *rgb = rgb_regamma;
557 const struct hw_x_point *coord_x = coordinate_x;
558 struct fixed31_32 x;
559 struct fixed31_32 output;
560 struct fixed31_32 scaling_factor =
561 dal_fixed31_32_from_fraction(sdr_white_level, 10000);
562
563 if (!pq_initialized && sdr_white_level == 80) {
564 precompute_pq();
565 pq_initialized = true;
566 }
567
568 /* TODO: start index is from segment 2^-24, skipping first segment
569 * due to x values too small for power calculations
570 */
571 start_index = 32;
572 rgb += start_index;
573 coord_x += start_index;
574
575 for (i = start_index; i <= hw_points_num; i++) {
576 /* Multiply 0.008 as regamma is 0-1 and FP16 input is 0-125.
577 * FP 1.0 = 80nits
578 */
579 if (sdr_white_level == 80) {
580 output = pq_table[i];
581 } else {
582 x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
583 compute_pq(x, &output);
584 }
585
586 /* should really not happen? */
587 if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
588 output = dal_fixed31_32_zero;
589 else if (dal_fixed31_32_lt(dal_fixed31_32_one, output))
590 output = dal_fixed31_32_one;
591
592 rgb->r = output;
593 rgb->g = output;
594 rgb->b = output;
595
596 ++coord_x;
597 ++rgb;
598 }
599}
600
601static void build_de_pq(struct pwl_float_data_ex *de_pq,
602 uint32_t hw_points_num,
603 const struct hw_x_point *coordinate_x)
604{
605 uint32_t i;
606 struct fixed31_32 output;
607
608 struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
609
610 if (!de_pq_initialized) {
611 precompute_de_pq();
612 de_pq_initialized = true;
613 }
614
615
616 for (i = 0; i <= hw_points_num; i++) {
617 output = de_pq_table[i];
618 /* should really not happen? */
619 if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
620 output = dal_fixed31_32_zero;
621 else if (dal_fixed31_32_lt(scaling_factor, output))
622 output = scaling_factor;
623 de_pq[i].r = output;
624 de_pq[i].g = output;
625 de_pq[i].b = output;
626 }
627}
628
629static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
630 uint32_t hw_points_num,
631 const struct hw_x_point *coordinate_x, bool is_2_4)
632{
633 uint32_t i;
634
635 struct gamma_coefficients coeff;
636 struct pwl_float_data_ex *rgb = rgb_regamma;
637 const struct hw_x_point *coord_x = coordinate_x;
638
639 build_coefficients(&coeff, is_2_4);
640
641 i = 0;
642
643 while (i != hw_points_num + 1) {
644 /*TODO use y vs r,g,b*/
645 rgb->r = translate_from_linear_space_ex(
646 coord_x->x, &coeff, 0);
647 rgb->g = rgb->r;
648 rgb->b = rgb->r;
649 ++coord_x;
650 ++rgb;
651 ++i;
652 }
653}
654
655static void build_degamma(struct pwl_float_data_ex *curve,
656 uint32_t hw_points_num,
657 const struct hw_x_point *coordinate_x, bool is_2_4)
658{
659 uint32_t i;
660 struct gamma_coefficients coeff;
661 uint32_t begin_index, end_index;
662
663 build_coefficients(&coeff, is_2_4);
664 i = 0;
665
666 /* X points is 2^-25 to 2^7
667 * De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
668 */
669 begin_index = 13 * NUM_PTS_IN_REGION;
670 end_index = begin_index + 12 * NUM_PTS_IN_REGION;
671
672 while (i != begin_index) {
673 curve[i].r = dal_fixed31_32_zero;
674 curve[i].g = dal_fixed31_32_zero;
675 curve[i].b = dal_fixed31_32_zero;
676 i++;
677 }
678
679 while (i != end_index) {
680 curve[i].r = translate_to_linear_space_ex(
681 coordinate_x[i].x, &coeff, 0);
682 curve[i].g = curve[i].r;
683 curve[i].b = curve[i].r;
684 i++;
685 }
686 while (i != hw_points_num + 1) {
687 curve[i].r = dal_fixed31_32_one;
688 curve[i].g = dal_fixed31_32_one;
689 curve[i].b = dal_fixed31_32_one;
690 i++;
691 }
692}
693
694static bool scale_gamma(struct pwl_float_data *pwl_rgb,
695 const struct dc_gamma *ramp,
696 struct dividers dividers)
697{
698 const struct fixed31_32 max_driver = dal_fixed31_32_from_int(0xFFFF);
699 const struct fixed31_32 max_os = dal_fixed31_32_from_int(0xFF00);
700 struct fixed31_32 scaler = max_os;
701 uint32_t i;
702 struct pwl_float_data *rgb = pwl_rgb;
703 struct pwl_float_data *rgb_last = rgb + ramp->num_entries - 1;
704
705 i = 0;
706
707 do {
708 if (dal_fixed31_32_lt(max_os, ramp->entries.red[i]) ||
709 dal_fixed31_32_lt(max_os, ramp->entries.green[i]) ||
710 dal_fixed31_32_lt(max_os, ramp->entries.blue[i])) {
711 scaler = max_driver;
712 break;
713 }
714 ++i;
715 } while (i != ramp->num_entries);
716
717 i = 0;
718
719 do {
720 rgb->r = dal_fixed31_32_div(
721 ramp->entries.red[i], scaler);
722 rgb->g = dal_fixed31_32_div(
723 ramp->entries.green[i], scaler);
724 rgb->b = dal_fixed31_32_div(
725 ramp->entries.blue[i], scaler);
726
727 ++rgb;
728 ++i;
729 } while (i != ramp->num_entries);
730
731 rgb->r = dal_fixed31_32_mul(rgb_last->r,
732 dividers.divider1);
733 rgb->g = dal_fixed31_32_mul(rgb_last->g,
734 dividers.divider1);
735 rgb->b = dal_fixed31_32_mul(rgb_last->b,
736 dividers.divider1);
737
738 ++rgb;
739
740 rgb->r = dal_fixed31_32_mul(rgb_last->r,
741 dividers.divider2);
742 rgb->g = dal_fixed31_32_mul(rgb_last->g,
743 dividers.divider2);
744 rgb->b = dal_fixed31_32_mul(rgb_last->b,
745 dividers.divider2);
746
747 ++rgb;
748
749 rgb->r = dal_fixed31_32_mul(rgb_last->r,
750 dividers.divider3);
751 rgb->g = dal_fixed31_32_mul(rgb_last->g,
752 dividers.divider3);
753 rgb->b = dal_fixed31_32_mul(rgb_last->b,
754 dividers.divider3);
755
756 return true;
757}
758
759static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
760 const struct dc_gamma *ramp,
761 struct dividers dividers)
762{
763 uint32_t i;
764 struct fixed31_32 min = dal_fixed31_32_zero;
765 struct fixed31_32 max = dal_fixed31_32_one;
766
767 struct fixed31_32 delta = dal_fixed31_32_zero;
768 struct fixed31_32 offset = dal_fixed31_32_zero;
769
770 for (i = 0 ; i < ramp->num_entries; i++) {
771 if (dal_fixed31_32_lt(ramp->entries.red[i], min))
772 min = ramp->entries.red[i];
773
774 if (dal_fixed31_32_lt(ramp->entries.green[i], min))
775 min = ramp->entries.green[i];
776
777 if (dal_fixed31_32_lt(ramp->entries.blue[i], min))
778 min = ramp->entries.blue[i];
779
780 if (dal_fixed31_32_lt(max, ramp->entries.red[i]))
781 max = ramp->entries.red[i];
782
783 if (dal_fixed31_32_lt(max, ramp->entries.green[i]))
784 max = ramp->entries.green[i];
785
786 if (dal_fixed31_32_lt(max, ramp->entries.blue[i]))
787 max = ramp->entries.blue[i];
788 }
789
790 if (dal_fixed31_32_lt(min, dal_fixed31_32_zero))
791 delta = dal_fixed31_32_neg(min);
792
793 offset = dal_fixed31_32_add(min, max);
794
795 for (i = 0 ; i < ramp->num_entries; i++) {
796 pwl_rgb[i].r = dal_fixed31_32_div(
797 dal_fixed31_32_add(
798 ramp->entries.red[i], delta), offset);
799 pwl_rgb[i].g = dal_fixed31_32_div(
800 dal_fixed31_32_add(
801 ramp->entries.green[i], delta), offset);
802 pwl_rgb[i].b = dal_fixed31_32_div(
803 dal_fixed31_32_add(
804 ramp->entries.blue[i], delta), offset);
805
806 }
807
808 pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
809 pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
810 pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
811 pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
812 pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
813 pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
814 ++i;
815 pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
816 pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
817 pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
818 pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
819 pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
820 pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
821
822 return true;
823}
824
825/*
826 * RS3+ color transform DDI - 1D LUT adjustment is composed with regamma here
827 * Input is evenly distributed in the output color space as specified in
828 * SetTimings
829 *
830 * Interpolation details:
831 * 1D LUT has 4096 values which give curve correction in 0-1 float range
832 * for evenly spaced points in 0-1 range. lut1D[index] gives correction
833 * for index/4095.
834 * First we find index for which:
835 * index/4095 < regamma_y < (index+1)/4095 =>
836 * index < 4095*regamma_y < index + 1
837 * norm_y = 4095*regamma_y, and index is just truncating to nearest integer
838 * lut1 = lut1D[index], lut2 = lut1D[index+1]
839 *
840 *adjustedY is then linearly interpolating regamma Y between lut1 and lut2
841 */
842static void apply_lut_1d(
843 const struct dc_gamma *ramp,
844 uint32_t num_hw_points,
845 struct dc_transfer_func_distributed_points *tf_pts)
846{
847 int i = 0;
848 int color = 0;
849 struct fixed31_32 *regamma_y;
850 struct fixed31_32 norm_y;
851 struct fixed31_32 lut1;
852 struct fixed31_32 lut2;
853 const int max_lut_index = 4095;
854 const struct fixed31_32 max_lut_index_f =
855 dal_fixed31_32_from_int_nonconst(max_lut_index);
856 int32_t index = 0, index_next = 0;
857 struct fixed31_32 index_f;
858 struct fixed31_32 delta_lut;
859 struct fixed31_32 delta_index;
860
861 if (ramp->type != GAMMA_CS_TFM_1D)
862 return; // this is not expected
863
864 for (i = 0; i < num_hw_points; i++) {
865 for (color = 0; color < 3; color++) {
866 if (color == 0)
867 regamma_y = &tf_pts->red[i];
868 else if (color == 1)
869 regamma_y = &tf_pts->green[i];
870 else
871 regamma_y = &tf_pts->blue[i];
872
873 norm_y = dal_fixed31_32_mul(max_lut_index_f,
874 *regamma_y);
875 index = dal_fixed31_32_floor(norm_y);
876 index_f = dal_fixed31_32_from_int_nonconst(index);
877
878 if (index < 0 || index > max_lut_index)
879 continue;
880
881 index_next = (index == max_lut_index) ? index : index+1;
882
883 if (color == 0) {
884 lut1 = ramp->entries.red[index];
885 lut2 = ramp->entries.red[index_next];
886 } else if (color == 1) {
887 lut1 = ramp->entries.green[index];
888 lut2 = ramp->entries.green[index_next];
889 } else {
890 lut1 = ramp->entries.blue[index];
891 lut2 = ramp->entries.blue[index_next];
892 }
893
894 // we have everything now, so interpolate
895 delta_lut = dal_fixed31_32_sub(lut2, lut1);
896 delta_index = dal_fixed31_32_sub(norm_y, index_f);
897
898 *regamma_y = dal_fixed31_32_add(lut1,
899 dal_fixed31_32_mul(delta_index, delta_lut));
900 }
901 }
902}
903
904static void build_evenly_distributed_points(
905 struct gamma_pixel *points,
906 uint32_t numberof_points,
907 struct dividers dividers)
908{
909 struct gamma_pixel *p = points;
910 struct gamma_pixel *p_last = p + numberof_points - 1;
911
912 uint32_t i = 0;
913
914 do {
915 struct fixed31_32 value = dal_fixed31_32_from_fraction(i,
916 numberof_points - 1);
917
918 p->r = value;
919 p->g = value;
920 p->b = value;
921
922 ++p;
923 ++i;
924 } while (i != numberof_points);
925
926 p->r = dal_fixed31_32_div(p_last->r, dividers.divider1);
927 p->g = dal_fixed31_32_div(p_last->g, dividers.divider1);
928 p->b = dal_fixed31_32_div(p_last->b, dividers.divider1);
929
930 ++p;
931
932 p->r = dal_fixed31_32_div(p_last->r, dividers.divider2);
933 p->g = dal_fixed31_32_div(p_last->g, dividers.divider2);
934 p->b = dal_fixed31_32_div(p_last->b, dividers.divider2);
935
936 ++p;
937
938 p->r = dal_fixed31_32_div(p_last->r, dividers.divider3);
939 p->g = dal_fixed31_32_div(p_last->g, dividers.divider3);
940 p->b = dal_fixed31_32_div(p_last->b, dividers.divider3);
941}
942
943static inline void copy_rgb_regamma_to_coordinates_x(
944 struct hw_x_point *coordinates_x,
945 uint32_t hw_points_num,
946 const struct pwl_float_data_ex *rgb_ex)
947{
948 struct hw_x_point *coords = coordinates_x;
949 uint32_t i = 0;
950 const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
951
952 while (i <= hw_points_num) {
953 coords->regamma_y_red = rgb_regamma->r;
954 coords->regamma_y_green = rgb_regamma->g;
955 coords->regamma_y_blue = rgb_regamma->b;
956
957 ++coords;
958 ++rgb_regamma;
959 ++i;
960 }
961}
962
963static bool calculate_interpolated_hardware_curve(
964 const struct dc_gamma *ramp,
965 struct pixel_gamma_point *coeff128,
966 struct pwl_float_data *rgb_user,
967 const struct hw_x_point *coordinates_x,
968 const struct gamma_pixel *axis_x,
969 uint32_t number_of_points,
970 struct dc_transfer_func_distributed_points *tf_pts)
971{
972
973 const struct pixel_gamma_point *coeff = coeff128;
974 uint32_t max_entries = 3 - 1;
975
976 uint32_t i = 0;
977
978 for (i = 0; i < 3; i++) {
979 if (!build_custom_gamma_mapping_coefficients_worker(
980 ramp, coeff128, coordinates_x, axis_x, i,
981 number_of_points))
982 return false;
983 }
984
985 i = 0;
986 max_entries += ramp->num_entries;
987
988 /* TODO: float point case */
989
990 while (i <= number_of_points) {
991 tf_pts->red[i] = calculate_mapped_value(
992 rgb_user, coeff, CHANNEL_NAME_RED, max_entries);
993 tf_pts->green[i] = calculate_mapped_value(
994 rgb_user, coeff, CHANNEL_NAME_GREEN, max_entries);
995 tf_pts->blue[i] = calculate_mapped_value(
996 rgb_user, coeff, CHANNEL_NAME_BLUE, max_entries);
997
998 ++coeff;
999 ++i;
1000 }
1001
1002 return true;
1003}
1004
1005static void build_new_custom_resulted_curve(
1006 uint32_t hw_points_num,
1007 struct dc_transfer_func_distributed_points *tf_pts)
1008{
1009 uint32_t i;
1010
1011 i = 0;
1012
1013 while (i != hw_points_num + 1) {
1014 tf_pts->red[i] = dal_fixed31_32_clamp(
1015 tf_pts->red[i], dal_fixed31_32_zero,
1016 dal_fixed31_32_one);
1017 tf_pts->green[i] = dal_fixed31_32_clamp(
1018 tf_pts->green[i], dal_fixed31_32_zero,
1019 dal_fixed31_32_one);
1020 tf_pts->blue[i] = dal_fixed31_32_clamp(
1021 tf_pts->blue[i], dal_fixed31_32_zero,
1022 dal_fixed31_32_one);
1023
1024 ++i;
1025 }
1026}
1027
1028static bool map_regamma_hw_to_x_user(
1029 const struct dc_gamma *ramp,
1030 struct pixel_gamma_point *coeff128,
1031 struct pwl_float_data *rgb_user,
1032 struct hw_x_point *coords_x,
1033 const struct gamma_pixel *axis_x,
1034 const struct pwl_float_data_ex *rgb_regamma,
1035 uint32_t hw_points_num,
1036 struct dc_transfer_func_distributed_points *tf_pts,
1037 bool mapUserRamp)
1038{
1039 /* setup to spare calculated ideal regamma values */
1040
1041 int i = 0;
1042 struct hw_x_point *coords = coords_x;
1043 const struct pwl_float_data_ex *regamma = rgb_regamma;
1044
1045 if (mapUserRamp) {
1046 copy_rgb_regamma_to_coordinates_x(coords,
1047 hw_points_num,
1048 rgb_regamma);
1049
1050 calculate_interpolated_hardware_curve(
1051 ramp, coeff128, rgb_user, coords, axis_x,
1052 hw_points_num, tf_pts);
1053 } else {
1054 /* just copy current rgb_regamma into tf_pts */
1055 while (i <= hw_points_num) {
1056 tf_pts->red[i] = regamma->r;
1057 tf_pts->green[i] = regamma->g;
1058 tf_pts->blue[i] = regamma->b;
1059
1060 ++regamma;
1061 ++i;
1062 }
1063 }
1064
1065 build_new_custom_resulted_curve(hw_points_num, tf_pts);
1066
1067 return true;
1068}
1069
1070#define _EXTRA_POINTS 3
1071
1072bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
1073 const struct dc_gamma *ramp, bool mapUserRamp)
1074{
1075 struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
1076 struct dividers dividers;
1077
1078 struct pwl_float_data *rgb_user = NULL;
1079 struct pwl_float_data_ex *rgb_regamma = NULL;
1080 struct gamma_pixel *axix_x = NULL;
1081 struct pixel_gamma_point *coeff = NULL;
1082 enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
1083 bool ret = false;
1084
1085 if (output_tf->type == TF_TYPE_BYPASS)
1086 return false;
1087
1088 /* we can use hardcoded curve for plain SRGB TF */
1089 if (output_tf->type == TF_TYPE_PREDEFINED &&
1090 output_tf->tf == TRANSFER_FUNCTION_SRGB &&
1091 (!mapUserRamp && ramp->type == GAMMA_RGB_256))
1092 return true;
1093
1094 output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
1095
1096 rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
1097 GFP_KERNEL);
1098 if (!rgb_user)
1099 goto rgb_user_alloc_fail;
1100 rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
1101 GFP_KERNEL);
1102 if (!rgb_regamma)
1103 goto rgb_regamma_alloc_fail;
1104 axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
1105 GFP_KERNEL);
1106 if (!axix_x)
1107 goto axix_x_alloc_fail;
1108 coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
1109 if (!coeff)
1110 goto coeff_alloc_fail;
1111
1112 dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
1113 dividers.divider2 = dal_fixed31_32_from_int(2);
1114 dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
1115
1116 tf = output_tf->tf;
1117
1118 build_evenly_distributed_points(
1119 axix_x,
1120 ramp->num_entries,
1121 dividers);
1122
1123 if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
1124 scale_gamma(rgb_user, ramp, dividers);
1125 else if (ramp->type == GAMMA_RGB_FLOAT_1024)
1126 scale_gamma_dx(rgb_user, ramp, dividers);
1127
1128 if (tf == TRANSFER_FUNCTION_PQ) {
1129 tf_pts->end_exponent = 7;
1130 tf_pts->x_point_at_y1_red = 125;
1131 tf_pts->x_point_at_y1_green = 125;
1132 tf_pts->x_point_at_y1_blue = 125;
1133
1134 build_pq(rgb_regamma,
1135 MAX_HW_POINTS,
1136 coordinates_x,
1137 output_tf->sdr_ref_white_level);
1138 } else {
1139 tf_pts->end_exponent = 0;
1140 tf_pts->x_point_at_y1_red = 1;
1141 tf_pts->x_point_at_y1_green = 1;
1142 tf_pts->x_point_at_y1_blue = 1;
1143
1144 build_regamma(rgb_regamma,
1145 MAX_HW_POINTS,
1146 coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false);
1147 }
1148
1149 map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
1150 coordinates_x, axix_x, rgb_regamma,
1151 MAX_HW_POINTS, tf_pts,
1152 (mapUserRamp || ramp->type != GAMMA_RGB_256) &&
1153 ramp->type != GAMMA_CS_TFM_1D);
1154
1155 if (ramp->type == GAMMA_CS_TFM_1D)
1156 apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
1157
1158 ret = true;
1159
1160 kfree(coeff);
1161coeff_alloc_fail:
1162 kfree(axix_x);
1163axix_x_alloc_fail:
1164 kfree(rgb_regamma);
1165rgb_regamma_alloc_fail:
1166 kfree(rgb_user);
1167rgb_user_alloc_fail:
1168 return ret;
1169}
1170
1171bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
1172 const struct dc_gamma *ramp, bool mapUserRamp)
1173{
1174 struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
1175 struct dividers dividers;
1176
1177 struct pwl_float_data *rgb_user = NULL;
1178 struct pwl_float_data_ex *curve = NULL;
1179 struct gamma_pixel *axix_x = NULL;
1180 struct pixel_gamma_point *coeff = NULL;
1181 enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
1182 bool ret = false;
1183
1184 if (input_tf->type == TF_TYPE_BYPASS)
1185 return false;
1186
1187 /* we can use hardcoded curve for plain SRGB TF */
1188 if (input_tf->type == TF_TYPE_PREDEFINED &&
1189 input_tf->tf == TRANSFER_FUNCTION_SRGB &&
1190 (!mapUserRamp && ramp->type == GAMMA_RGB_256))
1191 return true;
1192
1193 input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
1194
1195 rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
1196 GFP_KERNEL);
1197 if (!rgb_user)
1198 goto rgb_user_alloc_fail;
1199 curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
1200 GFP_KERNEL);
1201 if (!curve)
1202 goto curve_alloc_fail;
1203 axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
1204 GFP_KERNEL);
1205 if (!axix_x)
1206 goto axix_x_alloc_fail;
1207 coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
1208 if (!coeff)
1209 goto coeff_alloc_fail;
1210
1211 dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
1212 dividers.divider2 = dal_fixed31_32_from_int(2);
1213 dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
1214
1215 tf = input_tf->tf;
1216
1217 build_evenly_distributed_points(
1218 axix_x,
1219 ramp->num_entries,
1220 dividers);
1221
1222 if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
1223 scale_gamma(rgb_user, ramp, dividers);
1224 else if (ramp->type == GAMMA_RGB_FLOAT_1024)
1225 scale_gamma_dx(rgb_user, ramp, dividers);
1226
1227 if (tf == TRANSFER_FUNCTION_PQ)
1228 build_de_pq(curve,
1229 MAX_HW_POINTS,
1230 coordinates_x);
1231 else
1232 build_degamma(curve,
1233 MAX_HW_POINTS,
1234 coordinates_x,
1235 tf == TRANSFER_FUNCTION_SRGB ? true:false);
1236
1237 tf_pts->end_exponent = 0;
1238 tf_pts->x_point_at_y1_red = 1;
1239 tf_pts->x_point_at_y1_green = 1;
1240 tf_pts->x_point_at_y1_blue = 1;
1241
1242 map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
1243 coordinates_x, axix_x, curve,
1244 MAX_HW_POINTS, tf_pts,
1245 mapUserRamp);
1246
1247 ret = true;
1248
1249 kfree(coeff);
1250coeff_alloc_fail:
1251 kfree(axix_x);
1252axix_x_alloc_fail:
1253 kfree(curve);
1254curve_alloc_fail:
1255 kfree(rgb_user);
1256rgb_user_alloc_fail:
1257
1258 return ret;
1259
1260}
1261
1262
1263bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
1264 struct dc_transfer_func_distributed_points *points)
1265{
1266 uint32_t i;
1267 bool ret = false;
1268 struct pwl_float_data_ex *rgb_regamma = NULL;
1269
1270 if (trans == TRANSFER_FUNCTION_UNITY ||
1271 trans == TRANSFER_FUNCTION_LINEAR) {
1272 points->end_exponent = 0;
1273 points->x_point_at_y1_red = 1;
1274 points->x_point_at_y1_green = 1;
1275 points->x_point_at_y1_blue = 1;
1276
1277 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1278 points->red[i] = coordinates_x[i].x;
1279 points->green[i] = coordinates_x[i].x;
1280 points->blue[i] = coordinates_x[i].x;
1281 }
1282 ret = true;
1283 } else if (trans == TRANSFER_FUNCTION_PQ) {
1284 rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
1285 _EXTRA_POINTS), GFP_KERNEL);
1286 if (!rgb_regamma)
1287 goto rgb_regamma_alloc_fail;
1288 points->end_exponent = 7;
1289 points->x_point_at_y1_red = 125;
1290 points->x_point_at_y1_green = 125;
1291 points->x_point_at_y1_blue = 125;
1292
1293
1294 build_pq(rgb_regamma,
1295 MAX_HW_POINTS,
1296 coordinates_x,
1297 80);
1298 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1299 points->red[i] = rgb_regamma[i].r;
1300 points->green[i] = rgb_regamma[i].g;
1301 points->blue[i] = rgb_regamma[i].b;
1302 }
1303 ret = true;
1304
1305 kfree(rgb_regamma);
1306 } else if (trans == TRANSFER_FUNCTION_SRGB ||
1307 trans == TRANSFER_FUNCTION_BT709) {
1308 rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
1309 _EXTRA_POINTS), GFP_KERNEL);
1310 if (!rgb_regamma)
1311 goto rgb_regamma_alloc_fail;
1312 points->end_exponent = 0;
1313 points->x_point_at_y1_red = 1;
1314 points->x_point_at_y1_green = 1;
1315 points->x_point_at_y1_blue = 1;
1316
1317 build_regamma(rgb_regamma,
1318 MAX_HW_POINTS,
1319 coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
1320 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1321 points->red[i] = rgb_regamma[i].r;
1322 points->green[i] = rgb_regamma[i].g;
1323 points->blue[i] = rgb_regamma[i].b;
1324 }
1325 ret = true;
1326
1327 kfree(rgb_regamma);
1328 }
1329rgb_regamma_alloc_fail:
1330 return ret;
1331}
1332
1333
1334bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
1335 struct dc_transfer_func_distributed_points *points)
1336{
1337 uint32_t i;
1338 bool ret = false;
1339 struct pwl_float_data_ex *rgb_degamma = NULL;
1340
1341 if (trans == TRANSFER_FUNCTION_UNITY ||
1342 trans == TRANSFER_FUNCTION_LINEAR) {
1343
1344 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1345 points->red[i] = coordinates_x[i].x;
1346 points->green[i] = coordinates_x[i].x;
1347 points->blue[i] = coordinates_x[i].x;
1348 }
1349 ret = true;
1350 } else if (trans == TRANSFER_FUNCTION_PQ) {
1351 rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
1352 _EXTRA_POINTS), GFP_KERNEL);
1353 if (!rgb_degamma)
1354 goto rgb_degamma_alloc_fail;
1355
1356
1357 build_de_pq(rgb_degamma,
1358 MAX_HW_POINTS,
1359 coordinates_x);
1360 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1361 points->red[i] = rgb_degamma[i].r;
1362 points->green[i] = rgb_degamma[i].g;
1363 points->blue[i] = rgb_degamma[i].b;
1364 }
1365 ret = true;
1366
1367 kfree(rgb_degamma);
1368 } else if (trans == TRANSFER_FUNCTION_SRGB ||
1369 trans == TRANSFER_FUNCTION_BT709) {
1370 rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
1371 _EXTRA_POINTS), GFP_KERNEL);
1372 if (!rgb_degamma)
1373 goto rgb_degamma_alloc_fail;
1374
1375 build_degamma(rgb_degamma,
1376 MAX_HW_POINTS,
1377 coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
1378 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1379 points->red[i] = rgb_degamma[i].r;
1380 points->green[i] = rgb_degamma[i].g;
1381 points->blue[i] = rgb_degamma[i].b;
1382 }
1383 ret = true;
1384
1385 kfree(rgb_degamma);
1386 }
1387 points->end_exponent = 0;
1388 points->x_point_at_y1_red = 1;
1389 points->x_point_at_y1_green = 1;
1390 points->x_point_at_y1_blue = 1;
1391
1392rgb_degamma_alloc_fail:
1393 return ret;
1394}
1395
1396
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 0c1593e53654..b7f9bc27d101 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,29 +19,35 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: AMD
23 *
22 */ 24 */
23#ifndef PP_ASICBLOCKS_H 25
24#define PP_ASICBLOCKS_H 26#ifndef COLOR_MOD_COLOR_GAMMA_H_
25 27#define COLOR_MOD_COLOR_GAMMA_H_
26 28
27enum PHM_AsicBlock { 29struct dc_transfer_func;
28 PHM_AsicBlock_GFX, 30struct dc_gamma;
29 PHM_AsicBlock_UVD_MVC, 31struct dc_transfer_func_distributed_points;
30 PHM_AsicBlock_UVD, 32struct dc_rgb_fixed;
31 PHM_AsicBlock_UVD_HD, 33enum dc_transfer_func_predefined;
32 PHM_AsicBlock_UVD_SD, 34
33 PHM_AsicBlock_Count 35void setup_x_points_distribution(void);
34}; 36void precompute_pq(void);
35 37void precompute_de_pq(void);
36enum PHM_ClockGateSetting { 38
37 PHM_ClockGateSetting_StaticOn, 39bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
38 PHM_ClockGateSetting_StaticOff, 40 const struct dc_gamma *ramp, bool mapUserRamp);
39 PHM_ClockGateSetting_Dynamic 41
40}; 42bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
41 43 const struct dc_gamma *ramp, bool mapUserRamp);
42struct phm_asic_blocks { 44
43 bool gfx : 1; 45bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
44 bool uvd : 1; 46 struct dc_transfer_func_distributed_points *points);
45}; 47
46 48bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
47#endif 49 struct dc_transfer_func_distributed_points *points);
50
51
52
53#endif /* COLOR_MOD_COLOR_GAMMA_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b4723af368a5..27d4003aa2c7 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -33,7 +33,7 @@
33/* Refresh rate ramp at a fixed rate of 65 Hz/second */ 33/* Refresh rate ramp at a fixed rate of 65 Hz/second */
34#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) 34#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
35/* Number of elements in the render times cache array */ 35/* Number of elements in the render times cache array */
36#define RENDER_TIMES_MAX_COUNT 20 36#define RENDER_TIMES_MAX_COUNT 10
37/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ 37/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
38#define BTR_EXIT_MARGIN 2000 38#define BTR_EXIT_MARGIN 2000
39/* Number of consecutive frames to check before entering/exiting fixed refresh*/ 39/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -46,13 +46,15 @@
46 46
47#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal" 47#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
48 48
49#define FREESYNC_DEFAULT_REGKEY "LCDFreeSyncDefault"
50
49struct gradual_static_ramp { 51struct gradual_static_ramp {
50 bool ramp_is_active; 52 bool ramp_is_active;
51 bool ramp_direction_is_up; 53 bool ramp_direction_is_up;
52 unsigned int ramp_current_frame_duration_in_ns; 54 unsigned int ramp_current_frame_duration_in_ns;
53}; 55};
54 56
55struct time_cache { 57struct freesync_time {
56 /* video (48Hz feature) related */ 58 /* video (48Hz feature) related */
57 unsigned int update_duration_in_ns; 59 unsigned int update_duration_in_ns;
58 60
@@ -64,6 +66,9 @@ struct time_cache {
64 66
65 unsigned int render_times_index; 67 unsigned int render_times_index;
66 unsigned int render_times[RENDER_TIMES_MAX_COUNT]; 68 unsigned int render_times[RENDER_TIMES_MAX_COUNT];
69
70 unsigned int min_window;
71 unsigned int max_window;
67}; 72};
68 73
69struct below_the_range { 74struct below_the_range {
@@ -98,11 +103,14 @@ struct freesync_state {
98 bool static_screen; 103 bool static_screen;
99 bool video; 104 bool video;
100 105
106 unsigned int vmin;
107 unsigned int vmax;
108
109 struct freesync_time time;
110
101 unsigned int nominal_refresh_rate_in_micro_hz; 111 unsigned int nominal_refresh_rate_in_micro_hz;
102 bool windowed_fullscreen; 112 bool windowed_fullscreen;
103 113
104 struct time_cache time;
105
106 struct gradual_static_ramp static_ramp; 114 struct gradual_static_ramp static_ramp;
107 struct below_the_range btr; 115 struct below_the_range btr;
108 struct fixed_refresh fixed_refresh; 116 struct fixed_refresh fixed_refresh;
@@ -119,14 +127,16 @@ struct freesync_entity {
119struct freesync_registry_options { 127struct freesync_registry_options {
120 bool drr_external_supported; 128 bool drr_external_supported;
121 bool drr_internal_supported; 129 bool drr_internal_supported;
130 bool lcd_freesync_default_set;
131 int lcd_freesync_default_value;
122}; 132};
123 133
124struct core_freesync { 134struct core_freesync {
125 struct mod_freesync public; 135 struct mod_freesync public;
126 struct dc *dc; 136 struct dc *dc;
137 struct freesync_registry_options opts;
127 struct freesync_entity *map; 138 struct freesync_entity *map;
128 int num_entities; 139 int num_entities;
129 struct freesync_registry_options opts;
130}; 140};
131 141
132#define MOD_FREESYNC_TO_CORE(mod_freesync)\ 142#define MOD_FREESYNC_TO_CORE(mod_freesync)\
@@ -146,7 +156,7 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
146 goto fail_alloc_context; 156 goto fail_alloc_context;
147 157
148 core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS, 158 core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
149 GFP_KERNEL); 159 GFP_KERNEL);
150 160
151 if (core_freesync->map == NULL) 161 if (core_freesync->map == NULL)
152 goto fail_alloc_map; 162 goto fail_alloc_map;
@@ -183,6 +193,16 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
183 (data & 1) ? false : true; 193 (data & 1) ? false : true;
184 } 194 }
185 195
196 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
197 FREESYNC_DEFAULT_REGKEY,
198 &data, sizeof(data), &flag)) {
199 core_freesync->opts.lcd_freesync_default_set = true;
200 core_freesync->opts.lcd_freesync_default_value = data;
201 } else {
202 core_freesync->opts.lcd_freesync_default_set = false;
203 core_freesync->opts.lcd_freesync_default_value = 0;
204 }
205
186 return &core_freesync->public; 206 return &core_freesync->public;
187 207
188fail_construct: 208fail_construct:
@@ -288,6 +308,18 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
288 core_freesync->map[core_freesync->num_entities].user_enable. 308 core_freesync->map[core_freesync->num_entities].user_enable.
289 enable_for_video = 309 enable_for_video =
290 (persistent_freesync_enable & 4) ? true : false; 310 (persistent_freesync_enable & 4) ? true : false;
311 /* If FreeSync display and LCDFreeSyncDefault is set, use as default values write back to userenable */
312 } else if (caps->supported && (core_freesync->opts.lcd_freesync_default_set)) {
313 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_gaming =
314 (core_freesync->opts.lcd_freesync_default_value & 1) ? true : false;
315 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_static =
316 (core_freesync->opts.lcd_freesync_default_value & 2) ? true : false;
317 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_video =
318 (core_freesync->opts.lcd_freesync_default_value & 4) ? true : false;
319 dm_write_persistent_data(dc->ctx, stream->sink,
320 FREESYNC_REGISTRY_NAME,
321 "userenable", &core_freesync->opts.lcd_freesync_default_value,
322 sizeof(int), &flag);
291 } else { 323 } else {
292 core_freesync->map[core_freesync->num_entities].user_enable. 324 core_freesync->map[core_freesync->num_entities].user_enable.
293 enable_for_gaming = false; 325 enable_for_gaming = false;
@@ -330,6 +362,25 @@ bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
330 return true; 362 return true;
331} 363}
332 364
365static void adjust_vmin_vmax(struct core_freesync *core_freesync,
366 struct dc_stream_state **streams,
367 int num_streams,
368 int map_index,
369 unsigned int v_total_min,
370 unsigned int v_total_max)
371{
372 if (num_streams == 0 || streams == NULL || num_streams > 1)
373 return;
374
375 core_freesync->map[map_index].state.vmin = v_total_min;
376 core_freesync->map[map_index].state.vmax = v_total_max;
377
378 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
379 num_streams, v_total_min,
380 v_total_max);
381}
382
383
333static void update_stream_freesync_context(struct core_freesync *core_freesync, 384static void update_stream_freesync_context(struct core_freesync *core_freesync,
334 struct dc_stream_state *stream) 385 struct dc_stream_state *stream)
335{ 386{
@@ -588,9 +639,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
588 update_stream_freesync_context(core_freesync, 639 update_stream_freesync_context(core_freesync,
589 streams[stream_idx]); 640 streams[stream_idx]);
590 641
591 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 642 adjust_vmin_vmax(core_freesync, streams,
592 num_streams, v_total_min, 643 num_streams, map_index,
593 v_total_max); 644 v_total_min,
645 v_total_max);
594 646
595 return true; 647 return true;
596 648
@@ -613,9 +665,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
613 core_freesync, 665 core_freesync,
614 streams[stream_idx]); 666 streams[stream_idx]);
615 667
616 dc_stream_adjust_vmin_vmax( 668 adjust_vmin_vmax(
617 core_freesync->dc, streams, 669 core_freesync, streams,
618 num_streams, v_total_nominal, 670 num_streams, map_index,
671 v_total_nominal,
619 v_total_nominal); 672 v_total_nominal);
620 } 673 }
621 return true; 674 return true;
@@ -632,9 +685,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
632 core_freesync, 685 core_freesync,
633 streams[stream_idx]); 686 streams[stream_idx]);
634 687
635 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 688 adjust_vmin_vmax(core_freesync, streams,
636 num_streams, v_total_nominal, 689 num_streams, map_index,
637 v_total_nominal); 690 v_total_nominal,
691 v_total_nominal);
638 692
639 /* Reset the cached variables */ 693 /* Reset the cached variables */
640 reset_freesync_state_variables(state); 694 reset_freesync_state_variables(state);
@@ -650,9 +704,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
650 * not support freesync because a former stream has 704 * not support freesync because a former stream has
651 * be programmed 705 * be programmed
652 */ 706 */
653 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 707 adjust_vmin_vmax(core_freesync, streams,
654 num_streams, v_total_nominal, 708 num_streams, map_index,
655 v_total_nominal); 709 v_total_nominal,
710 v_total_nominal);
656 /* Reset the cached variables */ 711 /* Reset the cached variables */
657 reset_freesync_state_variables(state); 712 reset_freesync_state_variables(state);
658 } 713 }
@@ -769,8 +824,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
769 vmin = inserted_frame_v_total; 824 vmin = inserted_frame_v_total;
770 825
771 /* Program V_TOTAL */ 826 /* Program V_TOTAL */
772 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 827 adjust_vmin_vmax(core_freesync, streams,
773 num_streams, vmin, vmax); 828 num_streams, index,
829 vmin, vmax);
774 } 830 }
775 831
776 if (state->btr.frame_counter > 0) 832 if (state->btr.frame_counter > 0)
@@ -804,9 +860,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
804 update_stream_freesync_context(core_freesync, streams[0]); 860 update_stream_freesync_context(core_freesync, streams[0]);
805 861
806 /* Program static screen ramp values */ 862 /* Program static screen ramp values */
807 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 863 adjust_vmin_vmax(core_freesync, streams,
808 num_streams, v_total, 864 num_streams, index,
809 v_total); 865 v_total,
866 v_total);
810 867
811 triggers.overlay_update = true; 868 triggers.overlay_update = true;
812 triggers.surface_update = true; 869 triggers.surface_update = true;
@@ -1063,9 +1120,9 @@ bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
1063 max_refresh); 1120 max_refresh);
1064 1121
1065 /* Program vtotal min/max */ 1122 /* Program vtotal min/max */
1066 dc_stream_adjust_vmin_vmax(core_freesync->dc, &streams, 1, 1123 adjust_vmin_vmax(core_freesync, &streams, 1, index,
1067 state->freesync_range.vmin, 1124 state->freesync_range.vmin,
1068 state->freesync_range.vmax); 1125 state->freesync_range.vmax);
1069 } 1126 }
1070 1127
1071 if (min_refresh != 0 && 1128 if (min_refresh != 0 &&
@@ -1399,11 +1456,9 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
1399 } else { 1456 } else {
1400 1457
1401 vmin = state->freesync_range.vmin; 1458 vmin = state->freesync_range.vmin;
1402
1403 vmax = vmin; 1459 vmax = vmin;
1404 1460 adjust_vmin_vmax(core_freesync, &stream, map_index,
1405 dc_stream_adjust_vmin_vmax(core_freesync->dc, &stream, 1461 1, vmin, vmax);
1406 1, vmin, vmax);
1407 } 1462 }
1408} 1463}
1409 1464
@@ -1457,3 +1512,43 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
1457 1512
1458 } 1513 }
1459} 1514}
1515
1516void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
1517 struct dc_stream_state **streams, int num_streams,
1518 unsigned int *v_total_min, unsigned int *v_total_max,
1519 unsigned int *event_triggers,
1520 unsigned int *window_min, unsigned int *window_max,
1521 unsigned int *lfc_mid_point_in_us,
1522 unsigned int *inserted_frames,
1523 unsigned int *inserted_duration_in_us)
1524{
1525 unsigned int stream_index, map_index;
1526 struct core_freesync *core_freesync = NULL;
1527
1528 if (mod_freesync == NULL)
1529 return;
1530
1531 core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
1532
1533 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1534
1535 map_index = map_index_from_stream(core_freesync,
1536 streams[stream_index]);
1537
1538 if (core_freesync->map[map_index].caps->supported) {
1539 struct freesync_state state =
1540 core_freesync->map[map_index].state;
1541 *v_total_min = state.vmin;
1542 *v_total_max = state.vmax;
1543 *event_triggers = 0;
1544 *window_min = state.time.min_window;
1545 *window_max = state.time.max_window;
1546 *lfc_mid_point_in_us = state.btr.mid_point_in_us;
1547 *inserted_frames = state.btr.frames_to_insert;
1548 *inserted_duration_in_us =
1549 state.btr.inserted_frame_duration_in_us;
1550 }
1551
1552 }
1553}
1554
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 84b53425f2c8..f083e1619dbe 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -164,4 +164,13 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
164 struct dc_stream_state **streams, int num_streams, 164 struct dc_stream_state **streams, int num_streams,
165 unsigned int curr_time_stamp); 165 unsigned int curr_time_stamp);
166 166
167void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
168 struct dc_stream_state **streams, int num_streams,
169 unsigned int *v_total_min, unsigned int *v_total_max,
170 unsigned int *event_triggers,
171 unsigned int *window_min, unsigned int *window_max,
172 unsigned int *lfc_mid_point_in_us,
173 unsigned int *inserted_frames,
174 unsigned int *inserted_duration_in_us);
175
167#endif 176#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
new file mode 100644
index 000000000000..3230e2adb870
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef MODULES_INC_MOD_STATS_H_
27#define MODULES_INC_MOD_STATS_H_
28
29#include "dm_services.h"
30
31struct mod_stats {
32 int dummy;
33};
34
35struct mod_stats_caps {
36 bool dummy;
37};
38
39struct mod_stats *mod_stats_create(struct dc *dc);
40
41void mod_stats_destroy(struct mod_stats *mod_stats);
42
43bool mod_stats_init(struct mod_stats *mod_stats);
44
45void mod_stats_dump(struct mod_stats *mod_stats);
46
47void mod_stats_reset_data(struct mod_stats *mod_stats);
48
49void mod_stats_update_flip(struct mod_stats *mod_stats,
50 unsigned long timestamp_in_ns);
51
52void mod_stats_update_vupdate(struct mod_stats *mod_stats,
53 unsigned long timestamp_in_ns);
54
55void mod_stats_update_freesync(struct mod_stats *mod_stats,
56 unsigned int v_total_min,
57 unsigned int v_total_max,
58 unsigned int event_triggers,
59 unsigned int window_min,
60 unsigned int window_max,
61 unsigned int lfc_mid_point_in_us,
62 unsigned int inserted_frames,
63 unsigned int inserted_frame_duration_in_us);
64
65#endif /* MODULES_INC_MOD_STATS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
new file mode 100644
index 000000000000..041f87b73d5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "mod_stats.h"
27#include "dm_services.h"
28#include "dc.h"
29#include "core_types.h"
30
31#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
32#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
33#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
34
35#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
36#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
37#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
38
39#define MOD_STATS_NUM_VSYNCS 5
40
41struct stats_time_cache {
42 unsigned long flip_timestamp_in_ns;
43 unsigned long vupdate_timestamp_in_ns;
44
45 unsigned int render_time_in_us;
46 unsigned int avg_render_time_in_us_last_ten;
47 unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
48 unsigned int num_vsync_between_flips;
49
50 unsigned int flip_to_vsync_time_in_us;
51 unsigned int vsync_to_flip_time_in_us;
52
53 unsigned int min_window;
54 unsigned int max_window;
55 unsigned int v_total_min;
56 unsigned int v_total_max;
57 unsigned int event_triggers;
58
59 unsigned int lfc_mid_point_in_us;
60 unsigned int num_frames_inserted;
61 unsigned int inserted_duration_in_us;
62
63 unsigned int flags;
64};
65
66struct core_stats {
67 struct mod_stats public;
68 struct dc *dc;
69
70 struct stats_time_cache *time;
71 unsigned int index;
72
73 bool enabled;
74 unsigned int entries;
75};
76
77#define MOD_STATS_TO_CORE(mod_stats)\
78 container_of(mod_stats, struct core_stats, public)
79
80bool mod_stats_init(struct mod_stats *mod_stats)
81{
82 bool result = false;
83 struct core_stats *core_stats = NULL;
84 struct dc *dc = NULL;
85
86 if (mod_stats == NULL)
87 return false;
88
89 core_stats = MOD_STATS_TO_CORE(mod_stats);
90 dc = core_stats->dc;
91
92 return result;
93}
94
95struct mod_stats *mod_stats_create(struct dc *dc)
96{
97 struct core_stats *core_stats = NULL;
98 struct persistent_data_flag flag;
99 unsigned int reg_data;
100 int i = 0;
101
102 core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
103
104 if (core_stats == NULL)
105 goto fail_alloc_context;
106
107 if (dc == NULL)
108 goto fail_construct;
109
110 core_stats->dc = dc;
111
112 core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
113 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
114 DAL_STATS_ENABLE_REGKEY,
115 &reg_data, sizeof(unsigned int), &flag))
116 core_stats->enabled = reg_data;
117
118 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
119 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
120 DAL_STATS_ENTRIES_REGKEY,
121 &reg_data, sizeof(unsigned int), &flag)) {
122 if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
123 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
124 else
125 core_stats->entries = reg_data;
126 }
127
128 core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
129 GFP_KERNEL);
130
131 if (core_stats->time == NULL)
132 goto fail_construct;
133
134 /* Purposely leave index 0 unused so we don't need special logic to
135 * handle calculation cases that depend on previous flip data.
136 */
137 core_stats->index = 1;
138
139 return &core_stats->public;
140
141fail_construct:
142 kfree(core_stats);
143
144fail_alloc_context:
145 return NULL;
146}
147
148void mod_stats_destroy(struct mod_stats *mod_stats)
149{
150 if (mod_stats != NULL) {
151 struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
152
153 if (core_stats->time != NULL)
154 kfree(core_stats->time);
155
156 kfree(core_stats);
157 }
158}
159
160void mod_stats_dump(struct mod_stats *mod_stats)
161{
162 struct dc *dc = NULL;
163 struct dal_logger *logger = NULL;
164 struct core_stats *core_stats = NULL;
165 struct stats_time_cache *time = NULL;
166 unsigned int index = 0;
167
168 if (mod_stats == NULL)
169 return;
170
171 core_stats = MOD_STATS_TO_CORE(mod_stats);
172 dc = core_stats->dc;
173 logger = dc->ctx->logger;
174 time = core_stats->time;
175
176 //LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
177
178 //if (!pLog->IsDummyEntry())
179 {
180 dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
181 dm_logger_write(logger, LOG_PROFILING, "\n");
182 dm_logger_write(logger, LOG_PROFILING, "\n");
183
184 dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
185 dm_logger_write(logger, LOG_PROFILING,
186 "render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
187
188 for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
189 dm_logger_write(logger, LOG_PROFILING,
190 "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
191 time[i].render_time_in_us,
192 time[i].avg_render_time_in_us_last_ten,
193 time[i].min_window,
194 time[i].lfc_mid_point_in_us,
195 time[i].max_window,
196 time[i].vsync_to_flip_time_in_us,
197 time[i].flip_to_vsync_time_in_us,
198 time[i].num_vsync_between_flips,
199 time[i].num_frames_inserted,
200 time[i].inserted_duration_in_us,
201 time[i].v_total_min,
202 time[i].v_total_max,
203 time[i].event_triggers,
204 time[i].v_sync_time_in_us[0],
205 time[i].v_sync_time_in_us[1],
206 time[i].v_sync_time_in_us[2],
207 time[i].v_sync_time_in_us[3],
208 time[i].v_sync_time_in_us[4],
209 time[i].flags);
210 }
211 }
212 //GetLog()->Close(pLog);
213 //GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
214}
215
216void mod_stats_reset_data(struct mod_stats *mod_stats)
217{
218 struct core_stats *core_stats = NULL;
219 struct stats_time_cache *time = NULL;
220 unsigned int index = 0;
221
222 if (mod_stats == NULL)
223 return;
224
225 core_stats = MOD_STATS_TO_CORE(mod_stats);
226
227 memset(core_stats->time, 0,
228 sizeof(struct stats_time_cache) * core_stats->entries);
229
230 core_stats->index = 0;
231}
232
233void mod_stats_update_flip(struct mod_stats *mod_stats,
234 unsigned long timestamp_in_ns)
235{
236 struct core_stats *core_stats = NULL;
237 struct stats_time_cache *time = NULL;
238 unsigned int index = 0;
239
240 if (mod_stats == NULL)
241 return;
242
243 core_stats = MOD_STATS_TO_CORE(mod_stats);
244
245 if (core_stats->index >= core_stats->entries)
246 return;
247
248 time = core_stats->time;
249 index = core_stats->index;
250
251 time[index].flip_timestamp_in_ns = timestamp_in_ns;
252 time[index].render_time_in_us =
253 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
254
255 if (index >= 10) {
256 for (unsigned int i = 0; i < 10; i++)
257 time[index].avg_render_time_in_us_last_ten +=
258 time[index - i].render_time_in_us;
259 time[index].avg_render_time_in_us_last_ten /= 10;
260 }
261
262 if (time[index].num_vsync_between_flips > 0)
263 time[index].vsync_to_flip_time_in_us =
264 timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
265 else
266 time[index].vsync_to_flip_time_in_us =
267 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
268
269 core_stats->index++;
270}
271
272void mod_stats_update_vupdate(struct mod_stats *mod_stats,
273 unsigned long timestamp_in_ns)
274{
275 struct core_stats *core_stats = NULL;
276 struct stats_time_cache *time = NULL;
277 unsigned int index = 0;
278
279 if (mod_stats == NULL)
280 return;
281
282 core_stats = MOD_STATS_TO_CORE(mod_stats);
283
284 if (core_stats->index >= core_stats->entries)
285 return;
286
287 time = core_stats->time;
288 index = core_stats->index;
289
290 time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
291 if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
292 time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
293 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
294 time[index].flip_to_vsync_time_in_us =
295 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
296
297 time[index].num_vsync_between_flips++;
298}
299
300void mod_stats_update_freesync(struct mod_stats *mod_stats,
301 unsigned int v_total_min,
302 unsigned int v_total_max,
303 unsigned int event_triggers,
304 unsigned int window_min,
305 unsigned int window_max,
306 unsigned int lfc_mid_point_in_us,
307 unsigned int inserted_frames,
308 unsigned int inserted_duration_in_us)
309{
310 struct core_stats *core_stats = NULL;
311 struct stats_time_cache *time = NULL;
312 unsigned int index = 0;
313
314 if (mod_stats == NULL)
315 return;
316
317 core_stats = MOD_STATS_TO_CORE(mod_stats);
318
319 if (core_stats->index >= core_stats->entries)
320 return;
321
322 time = core_stats->time;
323 index = core_stats->index;
324
325 time[index].v_total_min = v_total_min;
326 time[index].v_total_max = v_total_max;
327 time[index].event_triggers = event_triggers;
328 time[index].min_window = window_min;
329 time[index].max_window = window_max;
330 time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
331 time[index].num_frames_inserted = inserted_frames;
332 time[index].inserted_duration_in_us = inserted_duration_in_us;
333}
334
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
index b28d4b64c05d..e2a2f114bd8e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
@@ -9364,17 +9364,31 @@
9364#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0 9364#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
9365#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1 9365#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
9366#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2 9366#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
9367#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
9367#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5 9368#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
9369#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
9368#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9 9370#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
9369#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa 9371#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
9372#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
9370#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd 9373#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
9374#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
9375#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x14
9376#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x18
9377#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x1c
9371#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L 9378#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
9372#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L 9379#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
9373#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L 9380#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
9381#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
9374#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L 9382#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
9383#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
9375#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L 9384#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
9376#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L 9385#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
9386#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
9377#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L 9387#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
9388#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
9389#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00100000L
9390#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x01000000L
9391#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x10000000L
9378//HUBPREQ0_DCSURF_FLIP_CONTROL 9392//HUBPREQ0_DCSURF_FLIP_CONTROL
9379#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0 9393#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
9380#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1 9394#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h
new file mode 100644
index 000000000000..13bfc2e6e16f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h
@@ -0,0 +1,31150 @@
1/*
2 * Copyright (C) 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21#ifndef _gc_9_1_SH_MASK_HEADER
22#define _gc_9_1_SH_MASK_HEADER
23
24
25// addressBlock: gc_grbmdec
26//GRBM_CNTL
27#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
28#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
29#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000FFL
30#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000L
31//GRBM_SKEW_CNTL
32#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
33#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
34#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003FL
35#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000FC0L
36//GRBM_STATUS2
37#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
38#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
39#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
40#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
41#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
42#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
43#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
44#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING__SHIFT 0xa
45#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING__SHIFT 0xb
46#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING__SHIFT 0xc
47#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING__SHIFT 0xd
48#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
49#define GRBM_STATUS2__UTCL2_BUSY__SHIFT 0xf
50#define GRBM_STATUS2__EA_BUSY__SHIFT 0x10
51#define GRBM_STATUS2__RMI_BUSY__SHIFT 0x11
52#define GRBM_STATUS2__UTCL2_RQ_PENDING__SHIFT 0x12
53#define GRBM_STATUS2__CPF_RQ_PENDING__SHIFT 0x13
54#define GRBM_STATUS2__EA_LINK_BUSY__SHIFT 0x14
55#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x18
56#define GRBM_STATUS2__TC_BUSY__SHIFT 0x19
57#define GRBM_STATUS2__TCC_CC_RESIDENT__SHIFT 0x1a
58#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
59#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
60#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
61#define GRBM_STATUS2__CPAXI_BUSY__SHIFT 0x1f
62#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000FL
63#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
64#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
65#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
66#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
67#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
68#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
69#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING_MASK 0x00000400L
70#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING_MASK 0x00000800L
71#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING_MASK 0x00001000L
72#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING_MASK 0x00002000L
73#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00004000L
74#define GRBM_STATUS2__UTCL2_BUSY_MASK 0x00008000L
75#define GRBM_STATUS2__EA_BUSY_MASK 0x00010000L
76#define GRBM_STATUS2__RMI_BUSY_MASK 0x00020000L
77#define GRBM_STATUS2__UTCL2_RQ_PENDING_MASK 0x00040000L
78#define GRBM_STATUS2__CPF_RQ_PENDING_MASK 0x00080000L
79#define GRBM_STATUS2__EA_LINK_BUSY_MASK 0x00100000L
80#define GRBM_STATUS2__RLC_BUSY_MASK 0x01000000L
81#define GRBM_STATUS2__TC_BUSY_MASK 0x02000000L
82#define GRBM_STATUS2__TCC_CC_RESIDENT_MASK 0x04000000L
83#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
84#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
85#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
86#define GRBM_STATUS2__CPAXI_BUSY_MASK 0x80000000L
87//GRBM_PWR_CNTL
88#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
89#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
90#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
91#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
92#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
93#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
94#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x00000003L
95#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0x0000000CL
96#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x00000030L
97#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0x000000C0L
98#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x00004000L
99#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
100//GRBM_STATUS
101#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
102#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
103#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
104#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
105#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
106#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
107#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
108#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
109#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
110#define GRBM_STATUS__WD_BUSY_NO_DMA__SHIFT 0x10
111#define GRBM_STATUS__VGT_BUSY__SHIFT 0x11
112#define GRBM_STATUS__IA_BUSY_NO_DMA__SHIFT 0x12
113#define GRBM_STATUS__IA_BUSY__SHIFT 0x13
114#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
115#define GRBM_STATUS__WD_BUSY__SHIFT 0x15
116#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
117#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
118#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
119#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
120#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
121#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
122#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
123#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
124#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
125#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
126#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
127#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
128#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
129#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
130#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
131#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
132#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
133#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
134#define GRBM_STATUS__WD_BUSY_NO_DMA_MASK 0x00010000L
135#define GRBM_STATUS__VGT_BUSY_MASK 0x00020000L
136#define GRBM_STATUS__IA_BUSY_NO_DMA_MASK 0x00040000L
137#define GRBM_STATUS__IA_BUSY_MASK 0x00080000L
138#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
139#define GRBM_STATUS__WD_BUSY_MASK 0x00200000L
140#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
141#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
142#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
143#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
144#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
145#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
146#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
147#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
148#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
149//GRBM_STATUS_SE0
150#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
151#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
152#define GRBM_STATUS_SE0__RMI_BUSY__SHIFT 0x15
153#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
154#define GRBM_STATUS_SE0__VGT_BUSY__SHIFT 0x17
155#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
156#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
157#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
158#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
159#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
160#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
161#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
162#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
163#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
164#define GRBM_STATUS_SE0__RMI_BUSY_MASK 0x00200000L
165#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
166#define GRBM_STATUS_SE0__VGT_BUSY_MASK 0x00800000L
167#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
168#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
169#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
170#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
171#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
172#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
173#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
174//GRBM_STATUS_SE1
175#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
176#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
177#define GRBM_STATUS_SE1__RMI_BUSY__SHIFT 0x15
178#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
179#define GRBM_STATUS_SE1__VGT_BUSY__SHIFT 0x17
180#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
181#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
182#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
183#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
184#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
185#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
186#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
187#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
188#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
189#define GRBM_STATUS_SE1__RMI_BUSY_MASK 0x00200000L
190#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
191#define GRBM_STATUS_SE1__VGT_BUSY_MASK 0x00800000L
192#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
193#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
194#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
195#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
196#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
197#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
198#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
199//GRBM_SOFT_RESET
200#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
201#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
202#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
203#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
204#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
205#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
206#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
207#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI__SHIFT 0x15
208#define GRBM_SOFT_RESET__SOFT_RESET_EA__SHIFT 0x16
209#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
210#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
211#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
212#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
213#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
214#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
215#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x00100000L
216#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI_MASK 0x00200000L
217#define GRBM_SOFT_RESET__SOFT_RESET_EA_MASK 0x00400000L
218//GRBM_CGTT_CLK_CNTL
219#define GRBM_CGTT_CLK_CNTL__ON_DELAY__SHIFT 0x0
220#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS__SHIFT 0x4
221#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
222#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
223#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
224#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
225#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
226#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
227#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
228#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
229#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
230#define GRBM_CGTT_CLK_CNTL__ON_DELAY_MASK 0x0000000FL
231#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS_MASK 0x00000FF0L
232#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
233#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
234#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
235#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
236#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
237#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
238#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
239#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
240#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
241//GRBM_GFX_CLKEN_CNTL
242#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
243#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
244#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000FL
245#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001F00L
246//GRBM_WAIT_IDLE_CLOCKS
247#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
248#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000FFL
249//GRBM_STATUS_SE2
250#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
251#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
252#define GRBM_STATUS_SE2__RMI_BUSY__SHIFT 0x15
253#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
254#define GRBM_STATUS_SE2__VGT_BUSY__SHIFT 0x17
255#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
256#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
257#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
258#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
259#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
260#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
261#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
262#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
263#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
264#define GRBM_STATUS_SE2__RMI_BUSY_MASK 0x00200000L
265#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
266#define GRBM_STATUS_SE2__VGT_BUSY_MASK 0x00800000L
267#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
268#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
269#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
270#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
271#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
272#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
273#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
274//GRBM_STATUS_SE3
275#define GRBM_STATUS_SE3__DB_CLEAN__SHIFT 0x1
276#define GRBM_STATUS_SE3__CB_CLEAN__SHIFT 0x2
277#define GRBM_STATUS_SE3__RMI_BUSY__SHIFT 0x15
278#define GRBM_STATUS_SE3__BCI_BUSY__SHIFT 0x16
279#define GRBM_STATUS_SE3__VGT_BUSY__SHIFT 0x17
280#define GRBM_STATUS_SE3__PA_BUSY__SHIFT 0x18
281#define GRBM_STATUS_SE3__TA_BUSY__SHIFT 0x19
282#define GRBM_STATUS_SE3__SX_BUSY__SHIFT 0x1a
283#define GRBM_STATUS_SE3__SPI_BUSY__SHIFT 0x1b
284#define GRBM_STATUS_SE3__SC_BUSY__SHIFT 0x1d
285#define GRBM_STATUS_SE3__DB_BUSY__SHIFT 0x1e
286#define GRBM_STATUS_SE3__CB_BUSY__SHIFT 0x1f
287#define GRBM_STATUS_SE3__DB_CLEAN_MASK 0x00000002L
288#define GRBM_STATUS_SE3__CB_CLEAN_MASK 0x00000004L
289#define GRBM_STATUS_SE3__RMI_BUSY_MASK 0x00200000L
290#define GRBM_STATUS_SE3__BCI_BUSY_MASK 0x00400000L
291#define GRBM_STATUS_SE3__VGT_BUSY_MASK 0x00800000L
292#define GRBM_STATUS_SE3__PA_BUSY_MASK 0x01000000L
293#define GRBM_STATUS_SE3__TA_BUSY_MASK 0x02000000L
294#define GRBM_STATUS_SE3__SX_BUSY_MASK 0x04000000L
295#define GRBM_STATUS_SE3__SPI_BUSY_MASK 0x08000000L
296#define GRBM_STATUS_SE3__SC_BUSY_MASK 0x20000000L
297#define GRBM_STATUS_SE3__DB_BUSY_MASK 0x40000000L
298#define GRBM_STATUS_SE3__CB_BUSY_MASK 0x80000000L
299//GRBM_READ_ERROR
300#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
301#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
302#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
303#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
304#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003FFFCL
305#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
306#define GRBM_READ_ERROR__READ_MEID_MASK 0x00C00000L
307#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
308//GRBM_READ_ERROR2
309#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
310#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
311#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
312#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
313#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
314#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
315#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
316#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
317#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
318#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
319#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
320#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
321#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
322#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
323#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
324#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
325#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
326#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
327#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
328#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
329#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
330#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
331#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
332#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
333#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
334#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
335#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
336#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
337#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
338#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
339#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
340#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
341//GRBM_INT_CNTL
342#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
343#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
344#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
345#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
346//GRBM_TRAP_OP
347#define GRBM_TRAP_OP__RW__SHIFT 0x0
348#define GRBM_TRAP_OP__RW_MASK 0x00000001L
349//GRBM_TRAP_ADDR
350#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
351#define GRBM_TRAP_ADDR__DATA_MASK 0x0003FFFFL
352//GRBM_TRAP_ADDR_MSK
353#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
354#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0x0003FFFFL
355//GRBM_TRAP_WD
356#define GRBM_TRAP_WD__DATA__SHIFT 0x0
357#define GRBM_TRAP_WD__DATA_MASK 0xFFFFFFFFL
358//GRBM_TRAP_WD_MSK
359#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
360#define GRBM_TRAP_WD_MSK__DATA_MASK 0xFFFFFFFFL
361//GRBM_DSM_BYPASS
362#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
363#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
364#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x00000003L
365#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x00000004L
366//GRBM_WRITE_ERROR
367#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
368#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU__SHIFT 0x1
369#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
370#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x5
371#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
372#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
373#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
374#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
375#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
376#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x00000001L
377#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU_MASK 0x00000002L
378#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x0000001CL
379#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x000001E0L
380#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x00001000L
381#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x0001E000L
382#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x00300000L
383#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0x00C00000L
384#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000L
385//GRBM_IOV_ERROR
386#define GRBM_IOV_ERROR__IOV_ADDR__SHIFT 0x2
387#define GRBM_IOV_ERROR__IOV_VFID__SHIFT 0x14
388#define GRBM_IOV_ERROR__IOV_VF__SHIFT 0x1a
389#define GRBM_IOV_ERROR__IOV_OP__SHIFT 0x1b
390#define GRBM_IOV_ERROR__IOV_ERROR__SHIFT 0x1f
391#define GRBM_IOV_ERROR__IOV_ADDR_MASK 0x000FFFFCL
392#define GRBM_IOV_ERROR__IOV_VFID_MASK 0x03F00000L
393#define GRBM_IOV_ERROR__IOV_VF_MASK 0x04000000L
394#define GRBM_IOV_ERROR__IOV_OP_MASK 0x08000000L
395#define GRBM_IOV_ERROR__IOV_ERROR_MASK 0x80000000L
396//GRBM_CHIP_REVISION
397#define GRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x0
398#define GRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000FFL
399//GRBM_GFX_CNTL
400#define GRBM_GFX_CNTL__PIPEID__SHIFT 0x0
401#define GRBM_GFX_CNTL__MEID__SHIFT 0x2
402#define GRBM_GFX_CNTL__VMID__SHIFT 0x4
403#define GRBM_GFX_CNTL__QUEUEID__SHIFT 0x8
404#define GRBM_GFX_CNTL__PIPEID_MASK 0x00000003L
405#define GRBM_GFX_CNTL__MEID_MASK 0x0000000CL
406#define GRBM_GFX_CNTL__VMID_MASK 0x000000F0L
407#define GRBM_GFX_CNTL__QUEUEID_MASK 0x00000700L
408//GRBM_RSMU_CFG
409#define GRBM_RSMU_CFG__APERTURE_ID__SHIFT 0x0
410#define GRBM_RSMU_CFG__QOS__SHIFT 0xc
411#define GRBM_RSMU_CFG__POSTED_WR__SHIFT 0x10
412#define GRBM_RSMU_CFG__APERTURE_ID_MASK 0x00000FFFL
413#define GRBM_RSMU_CFG__QOS_MASK 0x0000F000L
414#define GRBM_RSMU_CFG__POSTED_WR_MASK 0x00010000L
415//GRBM_IH_CREDIT
416#define GRBM_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
417#define GRBM_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
418#define GRBM_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
419#define GRBM_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
420//GRBM_PWR_CNTL2
421#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT__SHIFT 0x10
422#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT__SHIFT 0x14
423#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT_MASK 0x00010000L
424#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT_MASK 0x00100000L
425//GRBM_UTCL2_INVAL_RANGE_START
426#define GRBM_UTCL2_INVAL_RANGE_START__DATA__SHIFT 0x0
427#define GRBM_UTCL2_INVAL_RANGE_START__DATA_MASK 0x0003FFFFL
428//GRBM_UTCL2_INVAL_RANGE_END
429#define GRBM_UTCL2_INVAL_RANGE_END__DATA__SHIFT 0x0
430#define GRBM_UTCL2_INVAL_RANGE_END__DATA_MASK 0x0003FFFFL
431//GRBM_RSMU_READ_ERROR
432#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS__SHIFT 0x2
433#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF__SHIFT 0x14
434#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID__SHIFT 0x15
435#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE__SHIFT 0x1b
436#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR__SHIFT 0x1f
437#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS_MASK 0x000FFFFCL
438#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF_MASK 0x00100000L
439#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID_MASK 0x07E00000L
440#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE_MASK 0x08000000L
441#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_MASK 0x80000000L
442//GRBM_CHICKEN_BITS
443#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ__SHIFT 0x0
444#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ_MASK 0x00000001L
445//GRBM_NOWHERE
446#define GRBM_NOWHERE__DATA__SHIFT 0x0
447#define GRBM_NOWHERE__DATA_MASK 0xFFFFFFFFL
448//GRBM_SCRATCH_REG0
449#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
450#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
451//GRBM_SCRATCH_REG1
452#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
453#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
454//GRBM_SCRATCH_REG2
455#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
456#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
457//GRBM_SCRATCH_REG3
458#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
459#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
460//GRBM_SCRATCH_REG4
461#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
462#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
463//GRBM_SCRATCH_REG5
464#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
465#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
466//GRBM_SCRATCH_REG6
467#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
468#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
469//GRBM_SCRATCH_REG7
470#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
471#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
472
473
474// addressBlock: gc_cpdec
475//CP_CPC_STATUS
476#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
477#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
478#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
479#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
480#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
481#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
482#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
483#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
484#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
485#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
486#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
487#define CP_CPC_STATUS__UTCL2IU_BUSY__SHIFT 0xd
488#define CP_CPC_STATUS__SAVE_RESTORE_BUSY__SHIFT 0xe
489#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
490#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
491#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
492#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x00000001L
493#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x00000002L
494#define CP_CPC_STATUS__DC0_BUSY_MASK 0x00000004L
495#define CP_CPC_STATUS__DC1_BUSY_MASK 0x00000008L
496#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x00000010L
497#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x00000020L
498#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x00000040L
499#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x00000080L
500#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x00000400L
501#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x00000800L
502#define CP_CPC_STATUS__QU_BUSY_MASK 0x00001000L
503#define CP_CPC_STATUS__UTCL2IU_BUSY_MASK 0x00002000L
504#define CP_CPC_STATUS__SAVE_RESTORE_BUSY_MASK 0x00004000L
505#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000L
506#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000L
507#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000L
508//CP_CPC_BUSY_STAT
509#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
510#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY__SHIFT 0x1
511#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
512#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
513#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
514#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
515#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
516#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
517#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
518#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
519#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
520#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
521#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
522#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
523#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
524#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY__SHIFT 0x11
525#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
526#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
527#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
528#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
529#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
530#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
531#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
532#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
533#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
534#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
535#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
536#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
537#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x00000001L
538#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY_MASK 0x00000002L
539#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x00000004L
540#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x00000008L
541#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x00000010L
542#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x00000020L
543#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x00000040L
544#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x00000080L
545#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x00000100L
546#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x00000200L
547#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x00000400L
548#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x00000800L
549#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x00001000L
550#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x00002000L
551#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x00010000L
552#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY_MASK 0x00020000L
553#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x00040000L
554#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x00080000L
555#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x00100000L
556#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x00200000L
557#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x00400000L
558#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x00800000L
559#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x01000000L
560#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x02000000L
561#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x04000000L
562#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x08000000L
563#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000L
564#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000L
565//CP_CPC_STALLED_STAT1
566#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
567#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
568#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
569#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
570#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
571#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
572#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
573#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
574#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
575#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
576#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
577#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x16
578#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x17
579#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS__SHIFT 0x18
580#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x00000008L
581#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x00000010L
582#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x00000040L
583#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x00000100L
584#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x00000200L
585#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x00000400L
586#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x00002000L
587#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x00010000L
588#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x00020000L
589#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x00040000L
590#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x00200000L
591#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00400000L
592#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00800000L
593#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS_MASK 0x01000000L
594//CP_CPF_STATUS
595#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
596#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
597#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
598#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
599#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
600#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
601#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
602#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
603#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
604#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
605#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
606#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
607#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
608#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
609#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
610#define CP_CPF_STATUS__UTCL2IU_BUSY__SHIFT 0x11
611#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
612#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
613#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
614#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
615#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
616#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x00000001L
617#define CP_CPF_STATUS__CSF_BUSY_MASK 0x00000002L
618#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x00000010L
619#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x00000020L
620#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x00000040L
621#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x00000080L
622#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x00000100L
623#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x00000200L
624#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x00000400L
625#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x00000800L
626#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x00001000L
627#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x00002000L
628#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x00004000L
629#define CP_CPF_STATUS__HQD_BUSY_MASK 0x00008000L
630#define CP_CPF_STATUS__PRT_BUSY_MASK 0x00010000L
631#define CP_CPF_STATUS__UTCL2IU_BUSY_MASK 0x00020000L
632#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x04000000L
633#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x08000000L
634#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000L
635#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000L
636#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000L
637//CP_CPF_BUSY_STAT
638#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
639#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
640#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
641#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
642#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
643#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
644#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
645#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
646#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
647#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS__SHIFT 0x9
648#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
649#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
650#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
651#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
652#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
653#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
654#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
655#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
656#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
657#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
658#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
659#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
660#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
661#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
662#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
663#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
664#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
665#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
666#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
667#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
668#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
669#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
670#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x00000002L
671#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x00000004L
672#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x00000008L
673#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x00000010L
674#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x00000020L
675#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x00000040L
676#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x00000080L
677#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x00000100L
678#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS_MASK 0x00000200L
679#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x00000800L
680#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x00001000L
681#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x00002000L
682#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
683#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x00008000L
684#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x00010000L
685#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x00020000L
686#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
687#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x00080000L
688#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x00100000L
689#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x00200000L
690#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
691#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
692#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
693#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x02000000L
694#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x04000000L
695#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x08000000L
696#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000L
697#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000L
698#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000L
699#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000L
700//CP_CPF_STALLED_STAT1
701#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
702#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
703#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
704#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
705#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
706#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
707#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x7
708#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x8
709#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS__SHIFT 0x9
710#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS__SHIFT 0xa
711#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE__SHIFT 0xb
712#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x00000001L
713#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x00000002L
714#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x00000004L
715#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x00000008L
716#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x00000020L
717#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000040L
718#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00000080L
719#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00000100L
720#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS_MASK 0x00000200L
721#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS_MASK 0x00000400L
722#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE_MASK 0x00000800L
723//CP_CPC_GRBM_FREE_COUNT
724#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
725#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
726//CP_MEC_CNTL
727#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
728#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
729#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
730#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
731#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
732#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
733#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
734#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
735#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
736#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
737#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
738#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x00000010L
739#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x00010000L
740#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x00020000L
741#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x00040000L
742#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x00080000L
743#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x00100000L
744#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x00200000L
745#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000L
746#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000L
747#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000L
748#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000L
749//CP_MEC_ME1_HEADER_DUMP
750#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
751#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
752//CP_MEC_ME2_HEADER_DUMP
753#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
754#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
755//CP_CPC_SCRATCH_INDEX
756#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
757#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
758//CP_CPC_SCRATCH_DATA
759#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
760#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
761//CP_CPF_GRBM_FREE_COUNT
762#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
763#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x00000007L
764//CP_CPC_HALT_HYST_COUNT
765#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
766#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0x0000000FL
767//CP_PRT_LOD_STATS_CNTL0
768#define CP_PRT_LOD_STATS_CNTL0__BU_SIZE__SHIFT 0x0
769#define CP_PRT_LOD_STATS_CNTL0__BU_SIZE_MASK 0xFFFFFFFFL
770//CP_PRT_LOD_STATS_CNTL1
771#define CP_PRT_LOD_STATS_CNTL1__BASE_LO__SHIFT 0x0
772#define CP_PRT_LOD_STATS_CNTL1__BASE_LO_MASK 0xFFFFFFFFL
773//CP_PRT_LOD_STATS_CNTL2
774#define CP_PRT_LOD_STATS_CNTL2__BASE_HI__SHIFT 0x0
775#define CP_PRT_LOD_STATS_CNTL2__BASE_HI_MASK 0x000003FFL
776//CP_PRT_LOD_STATS_CNTL3
777#define CP_PRT_LOD_STATS_CNTL3__INTERVAL__SHIFT 0x2
778#define CP_PRT_LOD_STATS_CNTL3__RESET_CNT__SHIFT 0xa
779#define CP_PRT_LOD_STATS_CNTL3__RESET_FORCE__SHIFT 0x12
780#define CP_PRT_LOD_STATS_CNTL3__REPORT_AND_RESET__SHIFT 0x13
781#define CP_PRT_LOD_STATS_CNTL3__MC_VMID__SHIFT 0x17
782#define CP_PRT_LOD_STATS_CNTL3__CACHE_POLICY__SHIFT 0x1c
783#define CP_PRT_LOD_STATS_CNTL3__INTERVAL_MASK 0x000003FCL
784#define CP_PRT_LOD_STATS_CNTL3__RESET_CNT_MASK 0x0003FC00L
785#define CP_PRT_LOD_STATS_CNTL3__RESET_FORCE_MASK 0x00040000L
786#define CP_PRT_LOD_STATS_CNTL3__REPORT_AND_RESET_MASK 0x00080000L
787#define CP_PRT_LOD_STATS_CNTL3__MC_VMID_MASK 0x07800000L
788#define CP_PRT_LOD_STATS_CNTL3__CACHE_POLICY_MASK 0x10000000L
789//CP_CE_COMPARE_COUNT
790#define CP_CE_COMPARE_COUNT__COMPARE_COUNT__SHIFT 0x0
791#define CP_CE_COMPARE_COUNT__COMPARE_COUNT_MASK 0xFFFFFFFFL
792//CP_CE_DE_COUNT
793#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
794#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
795//CP_DE_CE_COUNT
796#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT__SHIFT 0x0
797#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
798//CP_DE_LAST_INVAL_COUNT
799#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT__SHIFT 0x0
800#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT_MASK 0xFFFFFFFFL
801//CP_DE_DE_COUNT
802#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
803#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
804//CP_STALLED_STAT3
805#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
806#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
807#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
808#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
809#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
810#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
811#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
812#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
813#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
814#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
815#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
816#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
817#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
818#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
819#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
820#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
821#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE__SHIFT 0x12
822#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x13
823#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS__SHIFT 0x14
824#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
825#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
826#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
827#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
828#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
829#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
830#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
831#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
832#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
833#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
834#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
835#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
836#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
837#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
838#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x00010000L
839#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00020000L
840#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE_MASK 0x00040000L
841#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS_MASK 0x00080000L
842#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS_MASK 0x00100000L
843//CP_STALLED_STAT1
844#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
845#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV__SHIFT 0x2
846#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV__SHIFT 0x4
847#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
848#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
849#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
850#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
851#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
852#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
853#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
854#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
855#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
856#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
857#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
858#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
859#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
860#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
861#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_MASK 0x00000004L
862#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_MASK 0x00000010L
863#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
864#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
865#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
866#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
867#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x00004000L
868#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
869#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
870#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
871#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
872#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
873#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
874#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
875#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000L
876//CP_STALLED_STAT2
877#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
878#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
879#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
880#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
881#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
882#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
883#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
884#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
885#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
886#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
887#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
888#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
889#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
890#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
891#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
892#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
893#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
894#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
895#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE__SHIFT 0x15
896#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM__SHIFT 0x16
897#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
898#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
899#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
900#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
901#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
902#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
903#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
904#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
905#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
906#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
907#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
908#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
909#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
910#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
911#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
912#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
913#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
914#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
915#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
916#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
917#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
918#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
919#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
920#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
921#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
922#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
923#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
924#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE_MASK 0x00200000L
925#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM_MASK 0x00400000L
926#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
927#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
928#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
929#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
930#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
931#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
932#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
933#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
934#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
935//CP_BUSY_STAT
936#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
937#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
938#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
939#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
940#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
941#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
942#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
943#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
944#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
945#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
946#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
947#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
948#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
949#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
950#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
951#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
952#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
953#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
954#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
955#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
956#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
957#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
958#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
959#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
960#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
961#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
962#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
963#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
964#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
965#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
966#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
967#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
968//CP_STAT
969#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
970#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
971#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
972#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
973#define CP_STAT__DC_BUSY__SHIFT 0xd
974#define CP_STAT__UTCL2IU_BUSY__SHIFT 0xe
975#define CP_STAT__PFP_BUSY__SHIFT 0xf
976#define CP_STAT__MEQ_BUSY__SHIFT 0x10
977#define CP_STAT__ME_BUSY__SHIFT 0x11
978#define CP_STAT__QUERY_BUSY__SHIFT 0x12
979#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
980#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
981#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
982#define CP_STAT__DMA_BUSY__SHIFT 0x16
983#define CP_STAT__RCIU_BUSY__SHIFT 0x17
984#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
985#define CP_STAT__CE_BUSY__SHIFT 0x1a
986#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
987#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
988#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
989#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
990#define CP_STAT__CP_BUSY__SHIFT 0x1f
991#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
992#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
993#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
994#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
995#define CP_STAT__DC_BUSY_MASK 0x00002000L
996#define CP_STAT__UTCL2IU_BUSY_MASK 0x00004000L
997#define CP_STAT__PFP_BUSY_MASK 0x00008000L
998#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
999#define CP_STAT__ME_BUSY_MASK 0x00020000L
1000#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
1001#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
1002#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
1003#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
1004#define CP_STAT__DMA_BUSY_MASK 0x00400000L
1005#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
1006#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
1007#define CP_STAT__CE_BUSY_MASK 0x04000000L
1008#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
1009#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
1010#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
1011#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
1012#define CP_STAT__CP_BUSY_MASK 0x80000000L
1013//CP_ME_HEADER_DUMP
1014#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
1015#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xFFFFFFFFL
1016//CP_PFP_HEADER_DUMP
1017#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
1018#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xFFFFFFFFL
1019//CP_GRBM_FREE_COUNT
1020#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
1021#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
1022#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
1023#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
1024#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003F00L
1025#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003F0000L
1026//CP_CE_HEADER_DUMP
1027#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP__SHIFT 0x0
1028#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP_MASK 0xFFFFFFFFL
1029//CP_PFP_INSTR_PNTR
1030#define CP_PFP_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
1031#define CP_PFP_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
1032//CP_ME_INSTR_PNTR
1033#define CP_ME_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
1034#define CP_ME_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
1035//CP_CE_INSTR_PNTR
1036#define CP_CE_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
1037#define CP_CE_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
1038//CP_MEC1_INSTR_PNTR
1039#define CP_MEC1_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
1040#define CP_MEC1_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
1041//CP_MEC2_INSTR_PNTR
1042#define CP_MEC2_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
1043#define CP_MEC2_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
1044//CP_CSF_STAT
1045#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
1046#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x0001FF00L
1047//CP_ME_CNTL
1048#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
1049#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
1050#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
1051#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
1052#define CP_ME_CNTL__CE_PIPE1_RESET__SHIFT 0x11
1053#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
1054#define CP_ME_CNTL__PFP_PIPE1_RESET__SHIFT 0x13
1055#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
1056#define CP_ME_CNTL__ME_PIPE1_RESET__SHIFT 0x15
1057#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
1058#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
1059#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
1060#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
1061#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
1062#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
1063#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
1064#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
1065#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
1066#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x00010000L
1067#define CP_ME_CNTL__CE_PIPE1_RESET_MASK 0x00020000L
1068#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x00040000L
1069#define CP_ME_CNTL__PFP_PIPE1_RESET_MASK 0x00080000L
1070#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x00100000L
1071#define CP_ME_CNTL__ME_PIPE1_RESET_MASK 0x00200000L
1072#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
1073#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
1074#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
1075#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
1076#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
1077#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
1078//CP_CNTX_STAT
1079#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
1080#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
1081#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
1082#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
1083#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000FFL
1084#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
1085#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0FF00000L
1086#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
1087//CP_ME_PREEMPTION
1088#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
1089#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x00000001L
1090//CP_ROQ_THRESHOLDS
1091#define CP_ROQ_THRESHOLDS__IB1_START__SHIFT 0x0
1092#define CP_ROQ_THRESHOLDS__IB2_START__SHIFT 0x8
1093#define CP_ROQ_THRESHOLDS__IB1_START_MASK 0x000000FFL
1094#define CP_ROQ_THRESHOLDS__IB2_START_MASK 0x0000FF00L
1095//CP_MEQ_STQ_THRESHOLD
1096#define CP_MEQ_STQ_THRESHOLD__STQ_START__SHIFT 0x0
1097#define CP_MEQ_STQ_THRESHOLD__STQ_START_MASK 0x000000FFL
1098//CP_RB2_RPTR
1099#define CP_RB2_RPTR__RB_RPTR__SHIFT 0x0
1100#define CP_RB2_RPTR__RB_RPTR_MASK 0x000FFFFFL
1101//CP_RB1_RPTR
1102#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
1103#define CP_RB1_RPTR__RB_RPTR_MASK 0x000FFFFFL
1104//CP_RB0_RPTR
1105#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
1106#define CP_RB0_RPTR__RB_RPTR_MASK 0x000FFFFFL
1107//CP_RB_RPTR
1108#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
1109#define CP_RB_RPTR__RB_RPTR_MASK 0x000FFFFFL
1110//CP_RB_WPTR_DELAY
1111#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
1112#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
1113#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0FFFFFFFL
1114#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xF0000000L
1115//CP_RB_WPTR_POLL_CNTL
1116#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
1117#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
1118#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000FFFFL
1119#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
1120//CP_ROQ1_THRESHOLDS
1121#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
1122#define CP_ROQ1_THRESHOLDS__RB2_START__SHIFT 0x8
1123#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0x10
1124#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x18
1125#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000000FFL
1126#define CP_ROQ1_THRESHOLDS__RB2_START_MASK 0x0000FF00L
1127#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x00FF0000L
1128#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0xFF000000L
1129//CP_ROQ2_THRESHOLDS
1130#define CP_ROQ2_THRESHOLDS__R2_IB1_START__SHIFT 0x0
1131#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x8
1132#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0x10
1133#define CP_ROQ2_THRESHOLDS__R2_IB2_START__SHIFT 0x18
1134#define CP_ROQ2_THRESHOLDS__R2_IB1_START_MASK 0x000000FFL
1135#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x0000FF00L
1136#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x00FF0000L
1137#define CP_ROQ2_THRESHOLDS__R2_IB2_START_MASK 0xFF000000L
1138//CP_STQ_THRESHOLDS
1139#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
1140#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
1141#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
1142#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000FFL
1143#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000FF00L
1144#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00FF0000L
1145//CP_QUEUE_THRESHOLDS
1146#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT 0x0
1147#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT 0x8
1148#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START_MASK 0x0000003FL
1149#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START_MASK 0x00003F00L
1150//CP_MEQ_THRESHOLDS
1151#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
1152#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
1153#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000FFL
1154#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000FF00L
1155//CP_ROQ_AVAIL
1156#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
1157#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
1158#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x000007FFL
1159#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x07FF0000L
1160//CP_STQ_AVAIL
1161#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
1162#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001FFL
1163//CP_ROQ2_AVAIL
1164#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
1165#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x000007FFL
1166//CP_MEQ_AVAIL
1167#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
1168#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003FFL
1169//CP_CMD_INDEX
1170#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
1171#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
1172#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
1173#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007FFL
1174#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
1175#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00070000L
1176//CP_CMD_DATA
1177#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
1178#define CP_CMD_DATA__CMD_DATA_MASK 0xFFFFFFFFL
1179//CP_ROQ_RB_STAT
1180#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
1181#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
1182#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x000003FFL
1183#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x03FF0000L
1184//CP_ROQ_IB1_STAT
1185#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
1186#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
1187#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x000003FFL
1188#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x03FF0000L
1189//CP_ROQ_IB2_STAT
1190#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
1191#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
1192#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x000003FFL
1193#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x03FF0000L
1194//CP_STQ_STAT
1195#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
1196#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003FFL
1197//CP_STQ_WR_STAT
1198#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
1199#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x000003FFL
1200//CP_MEQ_STAT
1201#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
1202#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
1203#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003FFL
1204#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03FF0000L
1205//CP_CEQ1_AVAIL
1206#define CP_CEQ1_AVAIL__CEQ_CNT_RING__SHIFT 0x0
1207#define CP_CEQ1_AVAIL__CEQ_CNT_IB1__SHIFT 0x10
1208#define CP_CEQ1_AVAIL__CEQ_CNT_RING_MASK 0x000007FFL
1209#define CP_CEQ1_AVAIL__CEQ_CNT_IB1_MASK 0x07FF0000L
1210//CP_CEQ2_AVAIL
1211#define CP_CEQ2_AVAIL__CEQ_CNT_IB2__SHIFT 0x0
1212#define CP_CEQ2_AVAIL__CEQ_CNT_IB2_MASK 0x000007FFL
1213//CP_CE_ROQ_RB_STAT
1214#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY__SHIFT 0x0
1215#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY__SHIFT 0x10
1216#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY_MASK 0x000003FFL
1217#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY_MASK 0x03FF0000L
1218//CP_CE_ROQ_IB1_STAT
1219#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1__SHIFT 0x0
1220#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1__SHIFT 0x10
1221#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1_MASK 0x000003FFL
1222#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1_MASK 0x03FF0000L
1223//CP_CE_ROQ_IB2_STAT
1224#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2__SHIFT 0x0
1225#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2__SHIFT 0x10
1226#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2_MASK 0x000003FFL
1227#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2_MASK 0x03FF0000L
1228#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x16
1229#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
1230#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
1231#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
1232
1233
1234// addressBlock: gc_padec
1235//VGT_VTX_VECT_EJECT_REG
1236#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT__SHIFT 0x0
1237#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT_MASK 0x0000007FL
1238//VGT_DMA_DATA_FIFO_DEPTH
1239#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
1240#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH__SHIFT 0x9
1241#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000001FFL
1242#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH_MASK 0x0007FE00L
1243//VGT_DMA_REQ_FIFO_DEPTH
1244#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
1245#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003FL
1246//VGT_DRAW_INIT_FIFO_DEPTH
1247#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
1248#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003FL
1249//VGT_LAST_COPY_STATE
1250#define VGT_LAST_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
1251#define VGT_LAST_COPY_STATE__DST_STATE_ID__SHIFT 0x10
1252#define VGT_LAST_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
1253#define VGT_LAST_COPY_STATE__DST_STATE_ID_MASK 0x00070000L
1254//VGT_CACHE_INVALIDATION
1255#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT 0x0
1256#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT__SHIFT 0x4
1257#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER__SHIFT 0x5
1258#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT 0x6
1259#define VGT_CACHE_INVALIDATION__USE_GS_DONE__SHIFT 0x9
1260#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD__SHIFT 0xb
1261#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN__SHIFT 0xc
1262#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH__SHIFT 0xd
1263#define VGT_CACHE_INVALIDATION__ES_LIMIT__SHIFT 0x10
1264#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG__SHIFT 0x15
1265#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1__SHIFT 0x16
1266#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2__SHIFT 0x19
1267#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE__SHIFT 0x1c
1268#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI__SHIFT 0x1d
1269#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION_MASK 0x00000003L
1270#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT_MASK 0x00000010L
1271#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER_MASK 0x00000020L
1272#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN_MASK 0x000000C0L
1273#define VGT_CACHE_INVALIDATION__USE_GS_DONE_MASK 0x00000200L
1274#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD_MASK 0x00000800L
1275#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN_MASK 0x00001000L
1276#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH_MASK 0x00002000L
1277#define VGT_CACHE_INVALIDATION__ES_LIMIT_MASK 0x001F0000L
1278#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_MASK 0x00200000L
1279#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1_MASK 0x01C00000L
1280#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2_MASK 0x0E000000L
1281#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE_MASK 0x10000000L
1282#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI_MASK 0x20000000L
1283//VGT_STRMOUT_DELAY
1284#define VGT_STRMOUT_DELAY__SKIP_DELAY__SHIFT 0x0
1285#define VGT_STRMOUT_DELAY__SE0_WD_DELAY__SHIFT 0x8
1286#define VGT_STRMOUT_DELAY__SE1_WD_DELAY__SHIFT 0xb
1287#define VGT_STRMOUT_DELAY__SE2_WD_DELAY__SHIFT 0xe
1288#define VGT_STRMOUT_DELAY__SE3_WD_DELAY__SHIFT 0x11
1289#define VGT_STRMOUT_DELAY__SKIP_DELAY_MASK 0x000000FFL
1290#define VGT_STRMOUT_DELAY__SE0_WD_DELAY_MASK 0x00000700L
1291#define VGT_STRMOUT_DELAY__SE1_WD_DELAY_MASK 0x00003800L
1292#define VGT_STRMOUT_DELAY__SE2_WD_DELAY_MASK 0x0001C000L
1293#define VGT_STRMOUT_DELAY__SE3_WD_DELAY_MASK 0x000E0000L
1294//VGT_FIFO_DEPTHS
1295#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH__SHIFT 0x0
1296#define VGT_FIFO_DEPTHS__RESERVED_0__SHIFT 0x7
1297#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH__SHIFT 0x8
1298#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH__SHIFT 0x16
1299#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH_MASK 0x0000007FL
1300#define VGT_FIFO_DEPTHS__RESERVED_0_MASK 0x00000080L
1301#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH_MASK 0x003FFF00L
1302#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH_MASK 0x0FC00000L
1303//VGT_GS_VERTEX_REUSE
1304#define VGT_GS_VERTEX_REUSE__VERT_REUSE__SHIFT 0x0
1305#define VGT_GS_VERTEX_REUSE__VERT_REUSE_MASK 0x0000001FL
1306//VGT_MC_LAT_CNTL
1307#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
1308#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x0000000FL
1309//IA_CNTL_STATUS
1310#define IA_CNTL_STATUS__IA_BUSY__SHIFT 0x0
1311#define IA_CNTL_STATUS__IA_DMA_BUSY__SHIFT 0x1
1312#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY__SHIFT 0x2
1313#define IA_CNTL_STATUS__IA_GRP_BUSY__SHIFT 0x3
1314#define IA_CNTL_STATUS__IA_ADC_BUSY__SHIFT 0x4
1315#define IA_CNTL_STATUS__IA_BUSY_MASK 0x00000001L
1316#define IA_CNTL_STATUS__IA_DMA_BUSY_MASK 0x00000002L
1317#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY_MASK 0x00000004L
1318#define IA_CNTL_STATUS__IA_GRP_BUSY_MASK 0x00000008L
1319#define IA_CNTL_STATUS__IA_ADC_BUSY_MASK 0x00000010L
1320//VGT_CNTL_STATUS
1321#define VGT_CNTL_STATUS__VGT_BUSY__SHIFT 0x0
1322#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY__SHIFT 0x1
1323#define VGT_CNTL_STATUS__VGT_OUT_BUSY__SHIFT 0x2
1324#define VGT_CNTL_STATUS__VGT_PT_BUSY__SHIFT 0x3
1325#define VGT_CNTL_STATUS__VGT_TE_BUSY__SHIFT 0x4
1326#define VGT_CNTL_STATUS__VGT_VR_BUSY__SHIFT 0x5
1327#define VGT_CNTL_STATUS__VGT_PI_BUSY__SHIFT 0x6
1328#define VGT_CNTL_STATUS__VGT_GS_BUSY__SHIFT 0x7
1329#define VGT_CNTL_STATUS__VGT_HS_BUSY__SHIFT 0x8
1330#define VGT_CNTL_STATUS__VGT_TE11_BUSY__SHIFT 0x9
1331#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY__SHIFT 0xa
1332#define VGT_CNTL_STATUS__VGT_BUSY_MASK 0x00000001L
1333#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY_MASK 0x00000002L
1334#define VGT_CNTL_STATUS__VGT_OUT_BUSY_MASK 0x00000004L
1335#define VGT_CNTL_STATUS__VGT_PT_BUSY_MASK 0x00000008L
1336#define VGT_CNTL_STATUS__VGT_TE_BUSY_MASK 0x00000010L
1337#define VGT_CNTL_STATUS__VGT_VR_BUSY_MASK 0x00000020L
1338#define VGT_CNTL_STATUS__VGT_PI_BUSY_MASK 0x00000040L
1339#define VGT_CNTL_STATUS__VGT_GS_BUSY_MASK 0x00000080L
1340#define VGT_CNTL_STATUS__VGT_HS_BUSY_MASK 0x00000100L
1341#define VGT_CNTL_STATUS__VGT_TE11_BUSY_MASK 0x00000200L
1342#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY_MASK 0x00000400L
1343//WD_CNTL_STATUS
1344#define WD_CNTL_STATUS__WD_BUSY__SHIFT 0x0
1345#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY__SHIFT 0x1
1346#define WD_CNTL_STATUS__WD_SPL_DI_BUSY__SHIFT 0x2
1347#define WD_CNTL_STATUS__WD_ADC_BUSY__SHIFT 0x3
1348#define WD_CNTL_STATUS__WD_BUSY_MASK 0x00000001L
1349#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY_MASK 0x00000002L
1350#define WD_CNTL_STATUS__WD_SPL_DI_BUSY_MASK 0x00000004L
1351#define WD_CNTL_STATUS__WD_ADC_BUSY_MASK 0x00000008L
1352//CC_GC_PRIM_CONFIG
1353#define CC_GC_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
1354#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
1355#define CC_GC_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
1356#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
1357//GC_USER_PRIM_CONFIG
1358#define GC_USER_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
1359#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
1360#define GC_USER_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
1361#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
1362//WD_QOS
1363#define WD_QOS__DRAW_STALL__SHIFT 0x0
1364#define WD_QOS__DRAW_STALL_MASK 0x00000001L
1365//WD_UTCL1_CNTL
1366#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
1367#define WD_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
1368#define WD_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
1369#define WD_UTCL1_CNTL__BYPASS__SHIFT 0x19
1370#define WD_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
1371#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
1372#define WD_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
1373#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
1374#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
1375#define WD_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
1376#define WD_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
1377#define WD_UTCL1_CNTL__BYPASS_MASK 0x02000000L
1378#define WD_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
1379#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
1380#define WD_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
1381#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
1382//WD_UTCL1_STATUS
1383#define WD_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
1384#define WD_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
1385#define WD_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
1386#define WD_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
1387#define WD_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
1388#define WD_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
1389#define WD_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
1390#define WD_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
1391#define WD_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
1392#define WD_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
1393#define WD_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
1394#define WD_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
1395//IA_UTCL1_CNTL
1396#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
1397#define IA_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
1398#define IA_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
1399#define IA_UTCL1_CNTL__BYPASS__SHIFT 0x19
1400#define IA_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
1401#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
1402#define IA_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
1403#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
1404#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
1405#define IA_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
1406#define IA_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
1407#define IA_UTCL1_CNTL__BYPASS_MASK 0x02000000L
1408#define IA_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
1409#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
1410#define IA_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
1411#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
1412//IA_UTCL1_STATUS
1413#define IA_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
1414#define IA_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
1415#define IA_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
1416#define IA_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
1417#define IA_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
1418#define IA_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
1419#define IA_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
1420#define IA_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
1421#define IA_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
1422#define IA_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
1423#define IA_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
1424#define IA_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
1425//VGT_SYS_CONFIG
1426#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
1427#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
1428#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
1429#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
1430#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007EL
1431#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
1432//VGT_VS_MAX_WAVE_ID
1433#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
1434#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
1435//VGT_GS_MAX_WAVE_ID
1436#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
1437#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
1438//GFX_PIPE_CONTROL
1439#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
1440#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
1441#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
1442#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x00001FFFL
1443#define GFX_PIPE_CONTROL__RESERVED_MASK 0x0000E000L
1444#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x00010000L
1445//CC_GC_SHADER_ARRAY_CONFIG
1446#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
1447#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
1448//GC_USER_SHADER_ARRAY_CONFIG
1449#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
1450#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
1451//VGT_DMA_PRIMITIVE_TYPE
1452#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
1453#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
1454//VGT_DMA_CONTROL
1455#define VGT_DMA_CONTROL__PRIMGROUP_SIZE__SHIFT 0x0
1456#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP__SHIFT 0x11
1457#define VGT_DMA_CONTROL__SWITCH_ON_EOI__SHIFT 0x13
1458#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP__SHIFT 0x14
1459#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC__SHIFT 0x15
1460#define VGT_DMA_CONTROL__EN_INST_OPT_ADV__SHIFT 0x16
1461#define VGT_DMA_CONTROL__HW_USE_ONLY__SHIFT 0x17
1462#define VGT_DMA_CONTROL__PRIMGROUP_SIZE_MASK 0x0000FFFFL
1463#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP_MASK 0x00020000L
1464#define VGT_DMA_CONTROL__SWITCH_ON_EOI_MASK 0x00080000L
1465#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP_MASK 0x00100000L
1466#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC_MASK 0x00200000L
1467#define VGT_DMA_CONTROL__EN_INST_OPT_ADV_MASK 0x00400000L
1468#define VGT_DMA_CONTROL__HW_USE_ONLY_MASK 0x00800000L
1469//VGT_DMA_LS_HS_CONFIG
1470#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
1471#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
1472//WD_BUF_RESOURCE_1
1473#define WD_BUF_RESOURCE_1__POS_BUF_SIZE__SHIFT 0x0
1474#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE__SHIFT 0x10
1475#define WD_BUF_RESOURCE_1__POS_BUF_SIZE_MASK 0x0000FFFFL
1476#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE_MASK 0xFFFF0000L
1477//WD_BUF_RESOURCE_2
1478#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE__SHIFT 0x0
1479#define WD_BUF_RESOURCE_2__ADDR_MODE__SHIFT 0xf
1480#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE__SHIFT 0x10
1481#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE_MASK 0x00001FFFL
1482#define WD_BUF_RESOURCE_2__ADDR_MODE_MASK 0x00008000L
1483#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE_MASK 0xFFFF0000L
1484//PA_CL_CNTL_STATUS
1485#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED__SHIFT 0x0
1486#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED__SHIFT 0x1
1487#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED__SHIFT 0x2
1488#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED_MASK 0x00000001L
1489#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED_MASK 0x00000002L
1490#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED_MASK 0x00000004L
1491//PA_CL_ENHANCE
1492#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
1493#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
1494#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
1495#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
1496#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET__SHIFT 0x6
1497#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS__SHIFT 0x7
1498#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC__SHIFT 0x8
1499#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION__SHIFT 0x9
1500#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER__SHIFT 0xb
1501#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH__SHIFT 0xc
1502#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH__SHIFT 0xe
1503#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
1504#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
1505#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
1506#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
1507#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
1508#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
1509#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
1510#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
1511#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET_MASK 0x00000040L
1512#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS_MASK 0x00000080L
1513#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC_MASK 0x00000100L
1514#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION_MASK 0x00000600L
1515#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER_MASK 0x00000800L
1516#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH_MASK 0x00003000L
1517#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH_MASK 0x0001C000L
1518#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
1519#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
1520#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
1521#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
1522//PA_SU_CNTL_STATUS
1523#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
1524#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
1525//PA_SC_FIFO_DEPTH_CNTL
1526#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
1527#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000003FFL
1528//PA_SC_P3D_TRAP_SCREEN_HV_LOCK
1529#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
1530#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
1531//PA_SC_HP3D_TRAP_SCREEN_HV_LOCK
1532#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
1533#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
1534//PA_SC_TRAP_SCREEN_HV_LOCK
1535#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
1536#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
1537//PA_SC_FORCE_EOV_MAX_CNTS
1538#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
1539#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
1540#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000FFFFL
1541#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xFFFF0000L
1542//PA_SC_BINNER_EVENT_CNTL_0
1543#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0__SHIFT 0x0
1544#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1__SHIFT 0x2
1545#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2__SHIFT 0x4
1546#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3__SHIFT 0x6
1547#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS__SHIFT 0x8
1548#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE__SHIFT 0xa
1549#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH__SHIFT 0xc
1550#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH__SHIFT 0xe
1551#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC__SHIFT 0x10
1552#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9__SHIFT 0x12
1553#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET__SHIFT 0x14
1554#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE__SHIFT 0x16
1555#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END__SHIFT 0x18
1556#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT__SHIFT 0x1a
1557#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH__SHIFT 0x1c
1558#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH__SHIFT 0x1e
1559#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0_MASK 0x00000003L
1560#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1_MASK 0x0000000CL
1561#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2_MASK 0x00000030L
1562#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3_MASK 0x000000C0L
1563#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS_MASK 0x00000300L
1564#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE_MASK 0x00000C00L
1565#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_MASK 0x00003000L
1566#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH_MASK 0x0000C000L
1567#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC_MASK 0x00030000L
1568#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9_MASK 0x000C0000L
1569#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET_MASK 0x00300000L
1570#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE_MASK 0x00C00000L
1571#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END_MASK 0x03000000L
1572#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT_MASK 0x0C000000L
1573#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH_MASK 0x30000000L
1574#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH_MASK 0xC0000000L
1575//PA_SC_BINNER_EVENT_CNTL_1
1576#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH__SHIFT 0x0
1577#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT__SHIFT 0x2
1578#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM__SHIFT 0x4
1579#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT__SHIFT 0x6
1580#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x8
1581#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE__SHIFT 0xa
1582#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT__SHIFT 0xc
1583#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START__SHIFT 0xe
1584#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP__SHIFT 0x10
1585#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START__SHIFT 0x12
1586#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP__SHIFT 0x14
1587#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE__SHIFT 0x16
1588#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT__SHIFT 0x18
1589#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT__SHIFT 0x1a
1590#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT__SHIFT 0x1c
1591#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH__SHIFT 0x1e
1592#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH_MASK 0x00000003L
1593#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT_MASK 0x0000000CL
1594#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM_MASK 0x00000030L
1595#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT_MASK 0x000000C0L
1596#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x00000300L
1597#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE_MASK 0x00000C00L
1598#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT_MASK 0x00003000L
1599#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START_MASK 0x0000C000L
1600#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP_MASK 0x00030000L
1601#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START_MASK 0x000C0000L
1602#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP_MASK 0x00300000L
1603#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE_MASK 0x00C00000L
1604#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT_MASK 0x03000000L
1605#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT_MASK 0x0C000000L
1606#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT_MASK 0x30000000L
1607#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH_MASK 0xC0000000L
1608//PA_SC_BINNER_EVENT_CNTL_2
1609#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS__SHIFT 0x0
1610#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT__SHIFT 0x2
1611#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE__SHIFT 0x4
1612#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE__SHIFT 0x6
1613#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH__SHIFT 0x8
1614#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER__SHIFT 0xa
1615#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT__SHIFT 0xc
1616#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ__SHIFT 0xe
1617#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS__SHIFT 0x10
1618#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS__SHIFT 0x12
1619#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV__SHIFT 0x14
1620#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS__SHIFT 0x16
1621#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META__SHIFT 0x18
1622#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x1a
1623#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META__SHIFT 0x1c
1624#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE__SHIFT 0x1e
1625#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS_MASK 0x00000003L
1626#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT_MASK 0x0000000CL
1627#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE_MASK 0x00000030L
1628#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE_MASK 0x000000C0L
1629#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH_MASK 0x00000300L
1630#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER_MASK 0x00000C00L
1631#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT_MASK 0x00003000L
1632#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ_MASK 0x0000C000L
1633#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS_MASK 0x00030000L
1634#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS_MASK 0x000C0000L
1635#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV_MASK 0x00300000L
1636#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS_MASK 0x00C00000L
1637#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META_MASK 0x03000000L
1638#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS_MASK 0x0C000000L
1639#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META_MASK 0x30000000L
1640#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE_MASK 0xC0000000L
1641//PA_SC_BINNER_EVENT_CNTL_3
1642#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE__SHIFT 0x0
1643#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA__SHIFT 0x2
1644#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST__SHIFT 0x4
1645#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START__SHIFT 0x6
1646#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP__SHIFT 0x8
1647#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER__SHIFT 0xa
1648#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH__SHIFT 0xc
1649#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH__SHIFT 0xe
1650#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL__SHIFT 0x10
1651#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP__SHIFT 0x12
1652#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET__SHIFT 0x14
1653#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND__SHIFT 0x16
1654#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC__SHIFT 0x18
1655#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE__SHIFT 0x1a
1656#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE__SHIFT 0x1c
1657#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63__SHIFT 0x1e
1658#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE_MASK 0x00000003L
1659#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA_MASK 0x0000000CL
1660#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST_MASK 0x00000030L
1661#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START_MASK 0x000000C0L
1662#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP_MASK 0x00000300L
1663#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER_MASK 0x00000C00L
1664#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH_MASK 0x00003000L
1665#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH_MASK 0x0000C000L
1666#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL_MASK 0x00030000L
1667#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP_MASK 0x000C0000L
1668#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET_MASK 0x00300000L
1669#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND_MASK 0x00C00000L
1670#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC_MASK 0x03000000L
1671#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE_MASK 0x0C000000L
1672#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE_MASK 0x30000000L
1673#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63_MASK 0xC0000000L
1674//PA_SC_BINNER_TIMEOUT_COUNTER
1675#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD__SHIFT 0x0
1676#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD_MASK 0xFFFFFFFFL
1677//PA_SC_BINNER_PERF_CNTL_0
1678#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0x0
1679#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0xa
1680#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x14
1681#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x17
1682#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000003FFL
1683#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000FFC00L
1684#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x00700000L
1685#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x03800000L
1686//PA_SC_BINNER_PERF_CNTL_1
1687#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x0
1688#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x5
1689#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD__SHIFT 0xa
1690#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x0000001FL
1691#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x000003E0L
1692#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD_MASK 0x03FFFC00L
1693//PA_SC_BINNER_PERF_CNTL_2
1694#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD__SHIFT 0x0
1695#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD__SHIFT 0xb
1696#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD_MASK 0x000007FFL
1697#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD_MASK 0x003FF800L
1698//PA_SC_BINNER_PERF_CNTL_3
1699#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD__SHIFT 0x0
1700#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD_MASK 0xFFFFFFFFL
1701//PA_SC_FIFO_SIZE
1702#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
1703#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
1704#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
1705#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x15
1706#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003FL
1707#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007FC0L
1708#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001F8000L
1709#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xFFE00000L
1710//PA_SC_IF_FIFO_SIZE
1711#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
1712#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
1713#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
1714#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
1715#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003FL
1716#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000FC0L
1717#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003F000L
1718#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00FC0000L
1719//PA_SC_PKR_WAVE_TABLE_CNTL
1720#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE__SHIFT 0x0
1721#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE_MASK 0x0000003FL
1722//PA_UTCL1_CNTL1
1723#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
1724#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
1725#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
1726#define PA_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
1727#define PA_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
1728#define PA_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
1729#define PA_UTCL1_CNTL1__SPARE__SHIFT 0x10
1730#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
1731#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
1732#define PA_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
1733#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
1734#define PA_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
1735#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID__SHIFT 0x19
1736#define PA_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
1737#define PA_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
1738#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
1739#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
1740#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
1741#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
1742#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
1743#define PA_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
1744#define PA_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
1745#define PA_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
1746#define PA_UTCL1_CNTL1__SPARE_MASK 0x00010000L
1747#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
1748#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
1749#define PA_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
1750#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
1751#define PA_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
1752#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID_MASK 0x02000000L
1753#define PA_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
1754#define PA_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
1755#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
1756#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
1757//PA_UTCL1_CNTL2
1758#define PA_UTCL1_CNTL2__SPARE1__SHIFT 0x0
1759#define PA_UTCL1_CNTL2__SPARE2__SHIFT 0x8
1760#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
1761#define PA_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
1762#define PA_UTCL1_CNTL2__SPARE3__SHIFT 0xb
1763#define PA_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
1764#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT__SHIFT 0xd
1765#define PA_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
1766#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
1767#define PA_UTCL1_CNTL2__SPARE4__SHIFT 0x10
1768#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
1769#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
1770#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
1771#define PA_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
1772#define PA_UTCL1_CNTL2__SPARE5__SHIFT 0x19
1773#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
1774#define PA_UTCL1_CNTL2__RESERVED__SHIFT 0x1b
1775#define PA_UTCL1_CNTL2__SPARE1_MASK 0x000000FFL
1776#define PA_UTCL1_CNTL2__SPARE2_MASK 0x00000100L
1777#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
1778#define PA_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
1779#define PA_UTCL1_CNTL2__SPARE3_MASK 0x00000800L
1780#define PA_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
1781#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT_MASK 0x00002000L
1782#define PA_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
1783#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
1784#define PA_UTCL1_CNTL2__SPARE4_MASK 0x00030000L
1785#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
1786#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
1787#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
1788#define PA_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
1789#define PA_UTCL1_CNTL2__SPARE5_MASK 0x02000000L
1790#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
1791#define PA_UTCL1_CNTL2__RESERVED_MASK 0xF8000000L
1792//PA_SIDEBAND_REQUEST_DELAYS
1793#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY__SHIFT 0x0
1794#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY__SHIFT 0x10
1795#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY_MASK 0x0000FFFFL
1796#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY_MASK 0xFFFF0000L
1797//PA_SC_ENHANCE
1798#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
1799#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
1800#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
1801#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
1802#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
1803#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
1804#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x6
1805#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x7
1806#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x8
1807#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x9
1808#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xa
1809#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xb
1810#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xc
1811#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xd
1812#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0xe
1813#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0xf
1814#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x10
1815#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x11
1816#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x12
1817#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x13
1818#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x14
1819#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x15
1820#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x16
1821#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x17
1822#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
1823#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x19
1824#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1a
1825#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1b
1826#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE__SHIFT 0x1c
1827#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING__SHIFT 0x1d
1828#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
1829#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
1830#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
1831#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
1832#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
1833#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
1834#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000040L
1835#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000080L
1836#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000100L
1837#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000200L
1838#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00000400L
1839#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00000800L
1840#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00001000L
1841#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00002000L
1842#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00004000L
1843#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00008000L
1844#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00010000L
1845#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00020000L
1846#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00040000L
1847#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00080000L
1848#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00100000L
1849#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00200000L
1850#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x00400000L
1851#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x00800000L
1852#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
1853#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x02000000L
1854#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x04000000L
1855#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x08000000L
1856#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE_MASK 0x10000000L
1857#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING_MASK 0x20000000L
1858//PA_SC_ENHANCE_1
1859#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
1860#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
1861#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING__SHIFT 0x3
1862#define PA_SC_ENHANCE_1__BYPASS_PBB__SHIFT 0x4
1863#define PA_SC_ENHANCE_1__ECO_SPARE0__SHIFT 0x5
1864#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x6
1865#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x7
1866#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x8
1867#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB__SHIFT 0x9
1868#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT__SHIFT 0xa
1869#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM__SHIFT 0xb
1870#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE__SHIFT 0xd
1871#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE__SHIFT 0xe
1872#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION__SHIFT 0xf
1873#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE__SHIFT 0x10
1874#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING__SHIFT 0x11
1875#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION__SHIFT 0x12
1876#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS__SHIFT 0x13
1877#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION__SHIFT 0x14
1878#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION__SHIFT 0x15
1879#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION__SHIFT 0x16
1880#define PA_SC_ENHANCE_1__RSVD__SHIFT 0x17
1881#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x00000001L
1882#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x00000006L
1883#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING_MASK 0x00000008L
1884#define PA_SC_ENHANCE_1__BYPASS_PBB_MASK 0x00000010L
1885#define PA_SC_ENHANCE_1__ECO_SPARE0_MASK 0x00000020L
1886#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x00000040L
1887#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x00000080L
1888#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x00000100L
1889#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB_MASK 0x00000200L
1890#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT_MASK 0x00000400L
1891#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM_MASK 0x00000800L
1892#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE_MASK 0x00002000L
1893#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE_MASK 0x00004000L
1894#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION_MASK 0x00008000L
1895#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE_MASK 0x00010000L
1896#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING_MASK 0x00020000L
1897#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION_MASK 0x00040000L
1898#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS_MASK 0x00080000L
1899#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION_MASK 0x00100000L
1900#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION_MASK 0x00200000L
1901#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION_MASK 0x00400000L
1902#define PA_SC_ENHANCE_1__RSVD_MASK 0xFF800000L
1903//PA_SC_DSM_CNTL
1904#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
1905#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
1906#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x00000001L
1907#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x00000002L
1908//PA_SC_TILE_STEERING_CREST_OVERRIDE
1909#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE__SHIFT 0x0
1910#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT__SHIFT 0x1
1911#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT__SHIFT 0x5
1912#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE_MASK 0x00000001L
1913#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT_MASK 0x00000006L
1914#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT_MASK 0x00000060L
1915
1916
1917// addressBlock: gc_sqdec
1918//SQ_CONFIG
1919#define SQ_CONFIG__UNUSED__SHIFT 0x0
1920#define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7
1921#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY__SHIFT 0xb
1922#define SQ_CONFIG__EARLY_TA_DONE_DISABLE__SHIFT 0xc
1923#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE__SHIFT 0xd
1924#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE__SHIFT 0xe
1925#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE__SHIFT 0xf
1926#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE__SHIFT 0x10
1927#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE__SHIFT 0x11
1928#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS__SHIFT 0x12
1929#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS__SHIFT 0x13
1930#define SQ_CONFIG__REPLAY_SLEEP_CNT__SHIFT 0x15
1931#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP__SHIFT 0x1c
1932#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d
1933#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e
1934#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f
1935#define SQ_CONFIG__UNUSED_MASK 0x0000007FL
1936#define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L
1937#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY_MASK 0x00000800L
1938#define SQ_CONFIG__EARLY_TA_DONE_DISABLE_MASK 0x00001000L
1939#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE_MASK 0x00002000L
1940#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE_MASK 0x00004000L
1941#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE_MASK 0x00008000L
1942#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE_MASK 0x00010000L
1943#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE_MASK 0x00020000L
1944#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS_MASK 0x00040000L
1945#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS_MASK 0x00180000L
1946#define SQ_CONFIG__REPLAY_SLEEP_CNT_MASK 0x0FE00000L
1947#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP_MASK 0x10000000L
1948#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING_MASK 0x20000000L
1949#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE_MASK 0x40000000L
1950#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE_MASK 0x80000000L
1951//SQC_CONFIG
1952#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
1953#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
1954#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
1955#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
1956#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
1957#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
1958#define SQC_CONFIG__IDENTITY_HASH_BANK__SHIFT 0x9
1959#define SQC_CONFIG__IDENTITY_HASH_SET__SHIFT 0xa
1960#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0xb
1961#define SQC_CONFIG__EVICT_LRU__SHIFT 0xc
1962#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xe
1963#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xf
1964#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0x10
1965#define SQC_CONFIG__INST_PRF_COUNT__SHIFT 0x18
1966#define SQC_CONFIG__INST_PRF_FILTER_DIS__SHIFT 0x1a
1967#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
1968#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000CL
1969#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
1970#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
1971#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
1972#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
1973#define SQC_CONFIG__IDENTITY_HASH_BANK_MASK 0x00000200L
1974#define SQC_CONFIG__IDENTITY_HASH_SET_MASK 0x00000400L
1975#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000800L
1976#define SQC_CONFIG__EVICT_LRU_MASK 0x00003000L
1977#define SQC_CONFIG__FORCE_2_BANK_MASK 0x00004000L
1978#define SQC_CONFIG__FORCE_1_BANK_MASK 0x00008000L
1979#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0x00FF0000L
1980#define SQC_CONFIG__INST_PRF_COUNT_MASK 0x03000000L
1981#define SQC_CONFIG__INST_PRF_FILTER_DIS_MASK 0x04000000L
1982//LDS_CONFIG
1983#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING__SHIFT 0x0
1984#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING_MASK 0x00000001L
1985//SQ_RANDOM_WAVE_PRI
1986#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
1987#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
1988#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
1989#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007FL
1990#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
1991#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x007FFC00L
1992//SQ_REG_CREDITS
1993#define SQ_REG_CREDITS__SRBM_CREDITS__SHIFT 0x0
1994#define SQ_REG_CREDITS__CMD_CREDITS__SHIFT 0x8
1995#define SQ_REG_CREDITS__REG_BUSY__SHIFT 0x1c
1996#define SQ_REG_CREDITS__SRBM_OVERFLOW__SHIFT 0x1d
1997#define SQ_REG_CREDITS__IMMED_OVERFLOW__SHIFT 0x1e
1998#define SQ_REG_CREDITS__CMD_OVERFLOW__SHIFT 0x1f
1999#define SQ_REG_CREDITS__SRBM_CREDITS_MASK 0x0000003FL
2000#define SQ_REG_CREDITS__CMD_CREDITS_MASK 0x00000F00L
2001#define SQ_REG_CREDITS__REG_BUSY_MASK 0x10000000L
2002#define SQ_REG_CREDITS__SRBM_OVERFLOW_MASK 0x20000000L
2003#define SQ_REG_CREDITS__IMMED_OVERFLOW_MASK 0x40000000L
2004#define SQ_REG_CREDITS__CMD_OVERFLOW_MASK 0x80000000L
2005//SQ_FIFO_SIZES
2006#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
2007#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
2008#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE__SHIFT 0x10
2009#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
2010#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000FL
2011#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000F00L
2012#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE_MASK 0x00030000L
2013#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000C0000L
2014//SQ_DSM_CNTL
2015#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
2016#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
2017#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
2018#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
2019#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
2020#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
2021#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
2022#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
2023#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
2024#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
2025#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
2026#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
2027#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
2028#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
2029#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
2030#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
2031#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x00000001L
2032#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x00000002L
2033#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x00000004L
2034#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x00000008L
2035#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x00000100L
2036#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x00000200L
2037#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x00000400L
2038#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x00010000L
2039#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x00020000L
2040#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x00040000L
2041#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x00080000L
2042#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x00100000L
2043#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x00200000L
2044#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x01000000L
2045#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x02000000L
2046#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x04000000L
2047//SQ_DSM_CNTL2
2048#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT__SHIFT 0x0
2049#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY__SHIFT 0x2
2050#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT__SHIFT 0x3
2051#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY__SHIFT 0x5
2052#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT__SHIFT 0x6
2053#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY__SHIFT 0x8
2054#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT__SHIFT 0x9
2055#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY__SHIFT 0xb
2056#define SQ_DSM_CNTL2__LDS_INJECT_DELAY__SHIFT 0xe
2057#define SQ_DSM_CNTL2__SP_INJECT_DELAY__SHIFT 0x14
2058#define SQ_DSM_CNTL2__SQ_INJECT_DELAY__SHIFT 0x1a
2059#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT_MASK 0x00000003L
2060#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY_MASK 0x00000004L
2061#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT_MASK 0x00000018L
2062#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY_MASK 0x00000020L
2063#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT_MASK 0x000000C0L
2064#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY_MASK 0x00000100L
2065#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT_MASK 0x00000600L
2066#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY_MASK 0x00000800L
2067#define SQ_DSM_CNTL2__LDS_INJECT_DELAY_MASK 0x000FC000L
2068#define SQ_DSM_CNTL2__SP_INJECT_DELAY_MASK 0x03F00000L
2069#define SQ_DSM_CNTL2__SQ_INJECT_DELAY_MASK 0xFC000000L
2070//SQ_RUNTIME_CONFIG
2071#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST__SHIFT 0x0
2072#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST_MASK 0x00000001L
2073//SH_MEM_BASES
2074#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
2075#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
2076#define SH_MEM_BASES__PRIVATE_BASE_MASK 0x0000FFFFL
2077#define SH_MEM_BASES__SHARED_BASE_MASK 0xFFFF0000L
2078//SH_MEM_CONFIG
2079#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
2080#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
2081#define SH_MEM_CONFIG__RETRY_DISABLE__SHIFT 0xc
2082#define SH_MEM_CONFIG__PRIVATE_NV__SHIFT 0xd
2083#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
2084#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x00000018L
2085#define SH_MEM_CONFIG__RETRY_DISABLE_MASK 0x00001000L
2086#define SH_MEM_CONFIG__PRIVATE_NV_MASK 0x00002000L
2087//CC_GC_SHADER_RATE_CONFIG
2088#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
2089#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
2090#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
2091#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
2092#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
2093#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
2094//GC_USER_SHADER_RATE_CONFIG
2095#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
2096#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
2097#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
2098#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
2099#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
2100#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
2101//SQ_INTERRUPT_AUTO_MASK
2102#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
2103#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0x00FFFFFFL
2104//SQ_INTERRUPT_MSG_CTRL
2105#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
2106#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x00000001L
2107//SQ_UTCL1_CNTL1
2108#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
2109#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
2110#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
2111#define SQ_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
2112#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
2113#define SQ_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
2114#define SQ_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
2115#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
2116#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
2117#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
2118#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
2119#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
2120#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL__SHIFT 0x19
2121#define SQ_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
2122#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
2123#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
2124#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
2125#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
2126#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
2127#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
2128#define SQ_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
2129#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
2130#define SQ_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
2131#define SQ_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
2132#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
2133#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
2134#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
2135#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
2136#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
2137#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_MASK 0x02000000L
2138#define SQ_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
2139#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
2140#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
2141#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
2142//SQ_UTCL1_CNTL2
2143#define SQ_UTCL1_CNTL2__SPARE__SHIFT 0x0
2144#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
2145#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
2146#define SQ_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
2147#define SQ_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
2148#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
2149#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
2150#define SQ_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
2151#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
2152#define SQ_UTCL1_CNTL2__RETRY_TIMER__SHIFT 0x10
2153#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
2154#define SQ_UTCL1_CNTL2__PREFETCH_PAGE__SHIFT 0x1c
2155#define SQ_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
2156#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
2157#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
2158#define SQ_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
2159#define SQ_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
2160#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
2161#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
2162#define SQ_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
2163#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
2164#define SQ_UTCL1_CNTL2__RETRY_TIMER_MASK 0x007F0000L
2165#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
2166#define SQ_UTCL1_CNTL2__PREFETCH_PAGE_MASK 0xF0000000L
2167//SQ_UTCL1_STATUS
2168#define SQ_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
2169#define SQ_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
2170#define SQ_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
2171#define SQ_UTCL1_STATUS__RESERVED__SHIFT 0x3
2172#define SQ_UTCL1_STATUS__UNUSED__SHIFT 0x10
2173#define SQ_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
2174#define SQ_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
2175#define SQ_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
2176#define SQ_UTCL1_STATUS__RESERVED_MASK 0x0000FFF8L
2177#define SQ_UTCL1_STATUS__UNUSED_MASK 0xFFFF0000L
2178//SQ_SHADER_TBA_LO
2179#define SQ_SHADER_TBA_LO__ADDR_LO__SHIFT 0x0
2180#define SQ_SHADER_TBA_LO__ADDR_LO_MASK 0xFFFFFFFFL
2181//SQ_SHADER_TBA_HI
2182#define SQ_SHADER_TBA_HI__ADDR_HI__SHIFT 0x0
2183#define SQ_SHADER_TBA_HI__ADDR_HI_MASK 0x000000FFL
2184//SQ_SHADER_TMA_LO
2185#define SQ_SHADER_TMA_LO__ADDR_LO__SHIFT 0x0
2186#define SQ_SHADER_TMA_LO__ADDR_LO_MASK 0xFFFFFFFFL
2187//SQ_SHADER_TMA_HI
2188#define SQ_SHADER_TMA_HI__ADDR_HI__SHIFT 0x0
2189#define SQ_SHADER_TMA_HI__ADDR_HI_MASK 0x000000FFL
2190//SQC_DSM_CNTL
2191#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x0
2192#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x2
2193#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x3
2194#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x5
2195#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
2196#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
2197#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x9
2198#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0xb
2199#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0xc
2200#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0xe
2201#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0xf
2202#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x11
2203#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
2204#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
2205#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00000003L
2206#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
2207#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000018L
2208#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000020L
2209#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
2210#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
2211#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000600L
2212#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000800L
2213#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00003000L
2214#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00004000L
2215#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00018000L
2216#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00020000L
2217#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
2218#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
2219//SQC_DSM_CNTLA
2220#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
2221#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
2222#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
2223#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
2224#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
2225#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
2226#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
2227#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
2228#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
2229#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
2230#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
2231#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
2232#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
2233#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
2234#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
2235#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
2236#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
2237#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
2238#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
2239#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
2240#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
2241#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
2242#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
2243#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
2244#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
2245#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
2246#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
2247#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
2248#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
2249#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
2250#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
2251#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
2252#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
2253#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
2254#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
2255#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
2256//SQC_DSM_CNTLB
2257#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
2258#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
2259#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
2260#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
2261#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
2262#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
2263#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
2264#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
2265#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
2266#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
2267#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
2268#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
2269#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
2270#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
2271#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
2272#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
2273#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
2274#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
2275#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
2276#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
2277#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
2278#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
2279#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
2280#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
2281#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
2282#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
2283#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
2284#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
2285#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
2286#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
2287#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
2288#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
2289#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
2290#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
2291#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
2292#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
2293//SQC_DSM_CNTL2
2294#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
2295#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x2
2296#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x3
2297#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x5
2298#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
2299#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x8
2300#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x9
2301#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0xb
2302#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
2303#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0xe
2304#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0xf
2305#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x11
2306#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
2307#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x14
2308#define SQC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
2309#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
2310#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
2311#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000018L
2312#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000020L
2313#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
2314#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
2315#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000600L
2316#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000800L
2317#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
2318#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
2319#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00018000L
2320#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00020000L
2321#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
2322#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
2323#define SQC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
2324//SQC_DSM_CNTL2A
2325#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
2326#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
2327#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
2328#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
2329#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
2330#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
2331#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
2332#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
2333#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
2334#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
2335#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
2336#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
2337#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
2338#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
2339#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
2340#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
2341#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
2342#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
2343#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
2344#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
2345#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
2346#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
2347#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
2348#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
2349#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
2350#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
2351#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
2352#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
2353#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
2354#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
2355#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
2356#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
2357#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
2358#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
2359#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
2360#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
2361//SQC_DSM_CNTL2B
2362#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
2363#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
2364#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
2365#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
2366#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
2367#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
2368#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
2369#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
2370#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
2371#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
2372#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
2373#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
2374#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
2375#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
2376#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
2377#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
2378#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
2379#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
2380#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
2381#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
2382#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
2383#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
2384#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
2385#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
2386#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
2387#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
2388#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
2389#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
2390#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
2391#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
2392#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
2393#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
2394#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
2395#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
2396#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
2397#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
2398//SQC_EDC_FUE_CNTL
2399#define SQC_EDC_FUE_CNTL__BLOCK_FUE_FLAGS__SHIFT 0x0
2400#define SQC_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES__SHIFT 0x10
2401#define SQC_EDC_FUE_CNTL__BLOCK_FUE_FLAGS_MASK 0x0000FFFFL
2402#define SQC_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES_MASK 0xFFFF0000L
2403//SQC_EDC_CNT2
2404#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0
2405#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2
2406#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4
2407#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6
2408#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8
2409#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa
2410#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc
2411#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe
2412#define SQC_EDC_CNT2__INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT__SHIFT 0x10
2413#define SQC_EDC_CNT2__INST_BANKA_MISS_FIFO_SED_COUNT__SHIFT 0x12
2414#define SQC_EDC_CNT2__DATA_BANKA_HIT_FIFO_SED_COUNT__SHIFT 0x14
2415#define SQC_EDC_CNT2__DATA_BANKA_MISS_FIFO_SED_COUNT__SHIFT 0x16
2416#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT__SHIFT 0x18
2417#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1a
2418#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1c
2419#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L
2420#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL
2421#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L
2422#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L
2423#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L
2424#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L
2425#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L
2426#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L
2427#define SQC_EDC_CNT2__INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT_MASK 0x00030000L
2428#define SQC_EDC_CNT2__INST_BANKA_MISS_FIFO_SED_COUNT_MASK 0x000C0000L
2429#define SQC_EDC_CNT2__DATA_BANKA_HIT_FIFO_SED_COUNT_MASK 0x00300000L
2430#define SQC_EDC_CNT2__DATA_BANKA_MISS_FIFO_SED_COUNT_MASK 0x00C00000L
2431#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT_MASK 0x03000000L
2432#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x0C000000L
2433#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x30000000L
2434//SQC_EDC_CNT3
2435#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0
2436#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2
2437#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4
2438#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6
2439#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8
2440#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa
2441#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc
2442#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe
2443#define SQC_EDC_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT__SHIFT 0x10
2444#define SQC_EDC_CNT3__INST_BANKB_MISS_FIFO_SED_COUNT__SHIFT 0x12
2445#define SQC_EDC_CNT3__DATA_BANKB_HIT_FIFO_SED_COUNT__SHIFT 0x14
2446#define SQC_EDC_CNT3__DATA_BANKB_MISS_FIFO_SED_COUNT__SHIFT 0x16
2447#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT__SHIFT 0x18
2448#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L
2449#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL
2450#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L
2451#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L
2452#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L
2453#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L
2454#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L
2455#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L
2456#define SQC_EDC_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT_MASK 0x00030000L
2457#define SQC_EDC_CNT3__INST_BANKB_MISS_FIFO_SED_COUNT_MASK 0x000C0000L
2458#define SQC_EDC_CNT3__DATA_BANKB_HIT_FIFO_SED_COUNT_MASK 0x00300000L
2459#define SQC_EDC_CNT3__DATA_BANKB_MISS_FIFO_SED_COUNT_MASK 0x00C00000L
2460#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT_MASK 0x03000000L
2461//SQ_REG_TIMESTAMP
2462#define SQ_REG_TIMESTAMP__TIMESTAMP__SHIFT 0x0
2463#define SQ_REG_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
2464//SQ_CMD_TIMESTAMP
2465#define SQ_CMD_TIMESTAMP__TIMESTAMP__SHIFT 0x0
2466#define SQ_CMD_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
2467//SQ_IND_INDEX
2468#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
2469#define SQ_IND_INDEX__SIMD_ID__SHIFT 0x4
2470#define SQ_IND_INDEX__THREAD_ID__SHIFT 0x6
2471#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xc
2472#define SQ_IND_INDEX__FORCE_READ__SHIFT 0xd
2473#define SQ_IND_INDEX__READ_TIMEOUT__SHIFT 0xe
2474#define SQ_IND_INDEX__UNINDEXED__SHIFT 0xf
2475#define SQ_IND_INDEX__INDEX__SHIFT 0x10
2476#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000000FL
2477#define SQ_IND_INDEX__SIMD_ID_MASK 0x00000030L
2478#define SQ_IND_INDEX__THREAD_ID_MASK 0x00000FC0L
2479#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00001000L
2480#define SQ_IND_INDEX__FORCE_READ_MASK 0x00002000L
2481#define SQ_IND_INDEX__READ_TIMEOUT_MASK 0x00004000L
2482#define SQ_IND_INDEX__UNINDEXED_MASK 0x00008000L
2483#define SQ_IND_INDEX__INDEX_MASK 0xFFFF0000L
2484//SQ_IND_DATA
2485#define SQ_IND_DATA__DATA__SHIFT 0x0
2486#define SQ_IND_DATA__DATA_MASK 0xFFFFFFFFL
2487//SQ_CMD
2488#define SQ_CMD__CMD__SHIFT 0x0
2489#define SQ_CMD__MODE__SHIFT 0x4
2490#define SQ_CMD__CHECK_VMID__SHIFT 0x7
2491#define SQ_CMD__DATA__SHIFT 0x8
2492#define SQ_CMD__WAVE_ID__SHIFT 0x10
2493#define SQ_CMD__SIMD_ID__SHIFT 0x14
2494#define SQ_CMD__QUEUE_ID__SHIFT 0x18
2495#define SQ_CMD__VM_ID__SHIFT 0x1c
2496#define SQ_CMD__CMD_MASK 0x00000007L
2497#define SQ_CMD__MODE_MASK 0x00000070L
2498#define SQ_CMD__CHECK_VMID_MASK 0x00000080L
2499#define SQ_CMD__DATA_MASK 0x00000F00L
2500#define SQ_CMD__WAVE_ID_MASK 0x000F0000L
2501#define SQ_CMD__SIMD_ID_MASK 0x00300000L
2502#define SQ_CMD__QUEUE_ID_MASK 0x07000000L
2503#define SQ_CMD__VM_ID_MASK 0xF0000000L
2504//SQ_TIME_HI
2505#define SQ_TIME_HI__TIME__SHIFT 0x0
2506#define SQ_TIME_HI__TIME_MASK 0xFFFFFFFFL
2507//SQ_TIME_LO
2508#define SQ_TIME_LO__TIME__SHIFT 0x0
2509#define SQ_TIME_LO__TIME_MASK 0xFFFFFFFFL
2510//SQ_DS_0
2511#define SQ_DS_0__OFFSET0__SHIFT 0x0
2512#define SQ_DS_0__OFFSET1__SHIFT 0x8
2513#define SQ_DS_0__GDS__SHIFT 0x10
2514#define SQ_DS_0__OP__SHIFT 0x11
2515#define SQ_DS_0__ENCODING__SHIFT 0x1a
2516#define SQ_DS_0__OFFSET0_MASK 0x000000FFL
2517#define SQ_DS_0__OFFSET1_MASK 0x0000FF00L
2518#define SQ_DS_0__GDS_MASK 0x00010000L
2519#define SQ_DS_0__OP_MASK 0x01FE0000L
2520#define SQ_DS_0__ENCODING_MASK 0xFC000000L
2521//SQ_DS_1
2522#define SQ_DS_1__ADDR__SHIFT 0x0
2523#define SQ_DS_1__DATA0__SHIFT 0x8
2524#define SQ_DS_1__DATA1__SHIFT 0x10
2525#define SQ_DS_1__VDST__SHIFT 0x18
2526#define SQ_DS_1__ADDR_MASK 0x000000FFL
2527#define SQ_DS_1__DATA0_MASK 0x0000FF00L
2528#define SQ_DS_1__DATA1_MASK 0x00FF0000L
2529#define SQ_DS_1__VDST_MASK 0xFF000000L
2530//SQ_EXP_0
2531#define SQ_EXP_0__EN__SHIFT 0x0
2532#define SQ_EXP_0__TGT__SHIFT 0x4
2533#define SQ_EXP_0__COMPR__SHIFT 0xa
2534#define SQ_EXP_0__DONE__SHIFT 0xb
2535#define SQ_EXP_0__VM__SHIFT 0xc
2536#define SQ_EXP_0__ENCODING__SHIFT 0x1a
2537#define SQ_EXP_0__EN_MASK 0x0000000FL
2538#define SQ_EXP_0__TGT_MASK 0x000003F0L
2539#define SQ_EXP_0__COMPR_MASK 0x00000400L
2540#define SQ_EXP_0__DONE_MASK 0x00000800L
2541#define SQ_EXP_0__VM_MASK 0x00001000L
2542#define SQ_EXP_0__ENCODING_MASK 0xFC000000L
2543//SQ_EXP_1
2544#define SQ_EXP_1__VSRC0__SHIFT 0x0
2545#define SQ_EXP_1__VSRC1__SHIFT 0x8
2546#define SQ_EXP_1__VSRC2__SHIFT 0x10
2547#define SQ_EXP_1__VSRC3__SHIFT 0x18
2548#define SQ_EXP_1__VSRC0_MASK 0x000000FFL
2549#define SQ_EXP_1__VSRC1_MASK 0x0000FF00L
2550#define SQ_EXP_1__VSRC2_MASK 0x00FF0000L
2551#define SQ_EXP_1__VSRC3_MASK 0xFF000000L
2552//SQ_FLAT_0
2553#define SQ_FLAT_0__OFFSET__SHIFT 0x0
2554#define SQ_FLAT_0__LDS__SHIFT 0xd
2555#define SQ_FLAT_0__SEG__SHIFT 0xe
2556#define SQ_FLAT_0__GLC__SHIFT 0x10
2557#define SQ_FLAT_0__SLC__SHIFT 0x11
2558#define SQ_FLAT_0__OP__SHIFT 0x12
2559#define SQ_FLAT_0__ENCODING__SHIFT 0x1a
2560#define SQ_FLAT_0__OFFSET_MASK 0x00000FFFL
2561#define SQ_FLAT_0__LDS_MASK 0x00002000L
2562#define SQ_FLAT_0__SEG_MASK 0x0000C000L
2563#define SQ_FLAT_0__GLC_MASK 0x00010000L
2564#define SQ_FLAT_0__SLC_MASK 0x00020000L
2565#define SQ_FLAT_0__OP_MASK 0x01FC0000L
2566#define SQ_FLAT_0__ENCODING_MASK 0xFC000000L
2567//SQ_FLAT_1
2568#define SQ_FLAT_1__ADDR__SHIFT 0x0
2569#define SQ_FLAT_1__DATA__SHIFT 0x8
2570#define SQ_FLAT_1__SADDR__SHIFT 0x10
2571#define SQ_FLAT_1__NV__SHIFT 0x17
2572#define SQ_FLAT_1__VDST__SHIFT 0x18
2573#define SQ_FLAT_1__ADDR_MASK 0x000000FFL
2574#define SQ_FLAT_1__DATA_MASK 0x0000FF00L
2575#define SQ_FLAT_1__SADDR_MASK 0x007F0000L
2576#define SQ_FLAT_1__NV_MASK 0x00800000L
2577#define SQ_FLAT_1__VDST_MASK 0xFF000000L
2578//SQ_GLBL_0
2579#define SQ_GLBL_0__OFFSET__SHIFT 0x0
2580#define SQ_GLBL_0__LDS__SHIFT 0xd
2581#define SQ_GLBL_0__SEG__SHIFT 0xe
2582#define SQ_GLBL_0__GLC__SHIFT 0x10
2583#define SQ_GLBL_0__SLC__SHIFT 0x11
2584#define SQ_GLBL_0__OP__SHIFT 0x12
2585#define SQ_GLBL_0__ENCODING__SHIFT 0x1a
2586#define SQ_GLBL_0__OFFSET_MASK 0x00001FFFL
2587#define SQ_GLBL_0__LDS_MASK 0x00002000L
2588#define SQ_GLBL_0__SEG_MASK 0x0000C000L
2589#define SQ_GLBL_0__GLC_MASK 0x00010000L
2590#define SQ_GLBL_0__SLC_MASK 0x00020000L
2591#define SQ_GLBL_0__OP_MASK 0x01FC0000L
2592#define SQ_GLBL_0__ENCODING_MASK 0xFC000000L
2593//SQ_GLBL_1
2594#define SQ_GLBL_1__ADDR__SHIFT 0x0
2595#define SQ_GLBL_1__DATA__SHIFT 0x8
2596#define SQ_GLBL_1__SADDR__SHIFT 0x10
2597#define SQ_GLBL_1__NV__SHIFT 0x17
2598#define SQ_GLBL_1__VDST__SHIFT 0x18
2599#define SQ_GLBL_1__ADDR_MASK 0x000000FFL
2600#define SQ_GLBL_1__DATA_MASK 0x0000FF00L
2601#define SQ_GLBL_1__SADDR_MASK 0x007F0000L
2602#define SQ_GLBL_1__NV_MASK 0x00800000L
2603#define SQ_GLBL_1__VDST_MASK 0xFF000000L
2604//SQ_INST
2605#define SQ_INST__ENCODING__SHIFT 0x0
2606#define SQ_INST__ENCODING_MASK 0xFFFFFFFFL
2607//SQ_MIMG_0
2608#define SQ_MIMG_0__OPM__SHIFT 0x0
2609#define SQ_MIMG_0__DMASK__SHIFT 0x8
2610#define SQ_MIMG_0__UNORM__SHIFT 0xc
2611#define SQ_MIMG_0__GLC__SHIFT 0xd
2612#define SQ_MIMG_0__DA__SHIFT 0xe
2613#define SQ_MIMG_0__A16__SHIFT 0xf
2614#define SQ_MIMG_0__TFE__SHIFT 0x10
2615#define SQ_MIMG_0__LWE__SHIFT 0x11
2616#define SQ_MIMG_0__OP__SHIFT 0x12
2617#define SQ_MIMG_0__SLC__SHIFT 0x19
2618#define SQ_MIMG_0__ENCODING__SHIFT 0x1a
2619#define SQ_MIMG_0__OPM_MASK 0x00000001L
2620#define SQ_MIMG_0__DMASK_MASK 0x00000F00L
2621#define SQ_MIMG_0__UNORM_MASK 0x00001000L
2622#define SQ_MIMG_0__GLC_MASK 0x00002000L
2623#define SQ_MIMG_0__DA_MASK 0x00004000L
2624#define SQ_MIMG_0__A16_MASK 0x00008000L
2625#define SQ_MIMG_0__TFE_MASK 0x00010000L
2626#define SQ_MIMG_0__LWE_MASK 0x00020000L
2627#define SQ_MIMG_0__OP_MASK 0x01FC0000L
2628#define SQ_MIMG_0__SLC_MASK 0x02000000L
2629#define SQ_MIMG_0__ENCODING_MASK 0xFC000000L
2630//SQ_MIMG_1
2631#define SQ_MIMG_1__VADDR__SHIFT 0x0
2632#define SQ_MIMG_1__VDATA__SHIFT 0x8
2633#define SQ_MIMG_1__SRSRC__SHIFT 0x10
2634#define SQ_MIMG_1__SSAMP__SHIFT 0x15
2635#define SQ_MIMG_1__D16__SHIFT 0x1f
2636#define SQ_MIMG_1__VADDR_MASK 0x000000FFL
2637#define SQ_MIMG_1__VDATA_MASK 0x0000FF00L
2638#define SQ_MIMG_1__SRSRC_MASK 0x001F0000L
2639#define SQ_MIMG_1__SSAMP_MASK 0x03E00000L
2640#define SQ_MIMG_1__D16_MASK 0x80000000L
2641//SQ_MTBUF_0
2642#define SQ_MTBUF_0__OFFSET__SHIFT 0x0
2643#define SQ_MTBUF_0__OFFEN__SHIFT 0xc
2644#define SQ_MTBUF_0__IDXEN__SHIFT 0xd
2645#define SQ_MTBUF_0__GLC__SHIFT 0xe
2646#define SQ_MTBUF_0__OP__SHIFT 0xf
2647#define SQ_MTBUF_0__DFMT__SHIFT 0x13
2648#define SQ_MTBUF_0__NFMT__SHIFT 0x17
2649#define SQ_MTBUF_0__ENCODING__SHIFT 0x1a
2650#define SQ_MTBUF_0__OFFSET_MASK 0x00000FFFL
2651#define SQ_MTBUF_0__OFFEN_MASK 0x00001000L
2652#define SQ_MTBUF_0__IDXEN_MASK 0x00002000L
2653#define SQ_MTBUF_0__GLC_MASK 0x00004000L
2654#define SQ_MTBUF_0__OP_MASK 0x00078000L
2655#define SQ_MTBUF_0__DFMT_MASK 0x00780000L
2656#define SQ_MTBUF_0__NFMT_MASK 0x03800000L
2657#define SQ_MTBUF_0__ENCODING_MASK 0xFC000000L
2658//SQ_MTBUF_1
2659#define SQ_MTBUF_1__VADDR__SHIFT 0x0
2660#define SQ_MTBUF_1__VDATA__SHIFT 0x8
2661#define SQ_MTBUF_1__SRSRC__SHIFT 0x10
2662#define SQ_MTBUF_1__SLC__SHIFT 0x16
2663#define SQ_MTBUF_1__TFE__SHIFT 0x17
2664#define SQ_MTBUF_1__SOFFSET__SHIFT 0x18
2665#define SQ_MTBUF_1__VADDR_MASK 0x000000FFL
2666#define SQ_MTBUF_1__VDATA_MASK 0x0000FF00L
2667#define SQ_MTBUF_1__SRSRC_MASK 0x001F0000L
2668#define SQ_MTBUF_1__SLC_MASK 0x00400000L
2669#define SQ_MTBUF_1__TFE_MASK 0x00800000L
2670#define SQ_MTBUF_1__SOFFSET_MASK 0xFF000000L
2671//SQ_MUBUF_0
2672#define SQ_MUBUF_0__OFFSET__SHIFT 0x0
2673#define SQ_MUBUF_0__OFFEN__SHIFT 0xc
2674#define SQ_MUBUF_0__IDXEN__SHIFT 0xd
2675#define SQ_MUBUF_0__GLC__SHIFT 0xe
2676#define SQ_MUBUF_0__LDS__SHIFT 0x10
2677#define SQ_MUBUF_0__SLC__SHIFT 0x11
2678#define SQ_MUBUF_0__OP__SHIFT 0x12
2679#define SQ_MUBUF_0__ENCODING__SHIFT 0x1a
2680#define SQ_MUBUF_0__OFFSET_MASK 0x00000FFFL
2681#define SQ_MUBUF_0__OFFEN_MASK 0x00001000L
2682#define SQ_MUBUF_0__IDXEN_MASK 0x00002000L
2683#define SQ_MUBUF_0__GLC_MASK 0x00004000L
2684#define SQ_MUBUF_0__LDS_MASK 0x00010000L
2685#define SQ_MUBUF_0__SLC_MASK 0x00020000L
2686#define SQ_MUBUF_0__OP_MASK 0x01FC0000L
2687#define SQ_MUBUF_0__ENCODING_MASK 0xFC000000L
2688//SQ_MUBUF_1
2689#define SQ_MUBUF_1__VADDR__SHIFT 0x0
2690#define SQ_MUBUF_1__VDATA__SHIFT 0x8
2691#define SQ_MUBUF_1__SRSRC__SHIFT 0x10
2692#define SQ_MUBUF_1__TFE__SHIFT 0x17
2693#define SQ_MUBUF_1__SOFFSET__SHIFT 0x18
2694#define SQ_MUBUF_1__VADDR_MASK 0x000000FFL
2695#define SQ_MUBUF_1__VDATA_MASK 0x0000FF00L
2696#define SQ_MUBUF_1__SRSRC_MASK 0x001F0000L
2697#define SQ_MUBUF_1__TFE_MASK 0x00800000L
2698#define SQ_MUBUF_1__SOFFSET_MASK 0xFF000000L
2699//SQ_SCRATCH_0
2700#define SQ_SCRATCH_0__OFFSET__SHIFT 0x0
2701#define SQ_SCRATCH_0__LDS__SHIFT 0xd
2702#define SQ_SCRATCH_0__SEG__SHIFT 0xe
2703#define SQ_SCRATCH_0__GLC__SHIFT 0x10
2704#define SQ_SCRATCH_0__SLC__SHIFT 0x11
2705#define SQ_SCRATCH_0__OP__SHIFT 0x12
2706#define SQ_SCRATCH_0__ENCODING__SHIFT 0x1a
2707#define SQ_SCRATCH_0__OFFSET_MASK 0x00001FFFL
2708#define SQ_SCRATCH_0__LDS_MASK 0x00002000L
2709#define SQ_SCRATCH_0__SEG_MASK 0x0000C000L
2710#define SQ_SCRATCH_0__GLC_MASK 0x00010000L
2711#define SQ_SCRATCH_0__SLC_MASK 0x00020000L
2712#define SQ_SCRATCH_0__OP_MASK 0x01FC0000L
2713#define SQ_SCRATCH_0__ENCODING_MASK 0xFC000000L
2714//SQ_SCRATCH_1
2715#define SQ_SCRATCH_1__ADDR__SHIFT 0x0
2716#define SQ_SCRATCH_1__DATA__SHIFT 0x8
2717#define SQ_SCRATCH_1__SADDR__SHIFT 0x10
2718#define SQ_SCRATCH_1__NV__SHIFT 0x17
2719#define SQ_SCRATCH_1__VDST__SHIFT 0x18
2720#define SQ_SCRATCH_1__ADDR_MASK 0x000000FFL
2721#define SQ_SCRATCH_1__DATA_MASK 0x0000FF00L
2722#define SQ_SCRATCH_1__SADDR_MASK 0x007F0000L
2723#define SQ_SCRATCH_1__NV_MASK 0x00800000L
2724#define SQ_SCRATCH_1__VDST_MASK 0xFF000000L
2725//SQ_SMEM_0
2726#define SQ_SMEM_0__SBASE__SHIFT 0x0
2727#define SQ_SMEM_0__SDATA__SHIFT 0x6
2728#define SQ_SMEM_0__SOFFSET_EN__SHIFT 0xe
2729#define SQ_SMEM_0__NV__SHIFT 0xf
2730#define SQ_SMEM_0__GLC__SHIFT 0x10
2731#define SQ_SMEM_0__IMM__SHIFT 0x11
2732#define SQ_SMEM_0__OP__SHIFT 0x12
2733#define SQ_SMEM_0__ENCODING__SHIFT 0x1a
2734#define SQ_SMEM_0__SBASE_MASK 0x0000003FL
2735#define SQ_SMEM_0__SDATA_MASK 0x00001FC0L
2736#define SQ_SMEM_0__SOFFSET_EN_MASK 0x00004000L
2737#define SQ_SMEM_0__NV_MASK 0x00008000L
2738#define SQ_SMEM_0__GLC_MASK 0x00010000L
2739#define SQ_SMEM_0__IMM_MASK 0x00020000L
2740#define SQ_SMEM_0__OP_MASK 0x03FC0000L
2741#define SQ_SMEM_0__ENCODING_MASK 0xFC000000L
2742//SQ_SMEM_1
2743#define SQ_SMEM_1__OFFSET__SHIFT 0x0
2744#define SQ_SMEM_1__SOFFSET__SHIFT 0x19
2745#define SQ_SMEM_1__OFFSET_MASK 0x001FFFFFL
2746#define SQ_SMEM_1__SOFFSET_MASK 0xFE000000L
2747//SQ_SOP1
2748#define SQ_SOP1__SSRC0__SHIFT 0x0
2749#define SQ_SOP1__OP__SHIFT 0x8
2750#define SQ_SOP1__SDST__SHIFT 0x10
2751#define SQ_SOP1__ENCODING__SHIFT 0x17
2752#define SQ_SOP1__SSRC0_MASK 0x000000FFL
2753#define SQ_SOP1__OP_MASK 0x0000FF00L
2754#define SQ_SOP1__SDST_MASK 0x007F0000L
2755#define SQ_SOP1__ENCODING_MASK 0xFF800000L
2756//SQ_SOP2
2757#define SQ_SOP2__SSRC0__SHIFT 0x0
2758#define SQ_SOP2__SSRC1__SHIFT 0x8
2759#define SQ_SOP2__SDST__SHIFT 0x10
2760#define SQ_SOP2__OP__SHIFT 0x17
2761#define SQ_SOP2__ENCODING__SHIFT 0x1e
2762#define SQ_SOP2__SSRC0_MASK 0x000000FFL
2763#define SQ_SOP2__SSRC1_MASK 0x0000FF00L
2764#define SQ_SOP2__SDST_MASK 0x007F0000L
2765#define SQ_SOP2__OP_MASK 0x3F800000L
2766#define SQ_SOP2__ENCODING_MASK 0xC0000000L
2767//SQ_SOPC
2768#define SQ_SOPC__SSRC0__SHIFT 0x0
2769#define SQ_SOPC__SSRC1__SHIFT 0x8
2770#define SQ_SOPC__OP__SHIFT 0x10
2771#define SQ_SOPC__ENCODING__SHIFT 0x17
2772#define SQ_SOPC__SSRC0_MASK 0x000000FFL
2773#define SQ_SOPC__SSRC1_MASK 0x0000FF00L
2774#define SQ_SOPC__OP_MASK 0x007F0000L
2775#define SQ_SOPC__ENCODING_MASK 0xFF800000L
2776//SQ_SOPK
2777#define SQ_SOPK__SIMM16__SHIFT 0x0
2778#define SQ_SOPK__SDST__SHIFT 0x10
2779#define SQ_SOPK__OP__SHIFT 0x17
2780#define SQ_SOPK__ENCODING__SHIFT 0x1c
2781#define SQ_SOPK__SIMM16_MASK 0x0000FFFFL
2782#define SQ_SOPK__SDST_MASK 0x007F0000L
2783#define SQ_SOPK__OP_MASK 0x0F800000L
2784#define SQ_SOPK__ENCODING_MASK 0xF0000000L
2785//SQ_SOPP
2786#define SQ_SOPP__SIMM16__SHIFT 0x0
2787#define SQ_SOPP__OP__SHIFT 0x10
2788#define SQ_SOPP__ENCODING__SHIFT 0x17
2789#define SQ_SOPP__SIMM16_MASK 0x0000FFFFL
2790#define SQ_SOPP__OP_MASK 0x007F0000L
2791#define SQ_SOPP__ENCODING_MASK 0xFF800000L
2792//SQ_VINTRP
2793#define SQ_VINTRP__VSRC__SHIFT 0x0
2794#define SQ_VINTRP__ATTRCHAN__SHIFT 0x8
2795#define SQ_VINTRP__ATTR__SHIFT 0xa
2796#define SQ_VINTRP__OP__SHIFT 0x10
2797#define SQ_VINTRP__VDST__SHIFT 0x12
2798#define SQ_VINTRP__ENCODING__SHIFT 0x1a
2799#define SQ_VINTRP__VSRC_MASK 0x000000FFL
2800#define SQ_VINTRP__ATTRCHAN_MASK 0x00000300L
2801#define SQ_VINTRP__ATTR_MASK 0x0000FC00L
2802#define SQ_VINTRP__OP_MASK 0x00030000L
2803#define SQ_VINTRP__VDST_MASK 0x03FC0000L
2804#define SQ_VINTRP__ENCODING_MASK 0xFC000000L
2805//SQ_VOP1
2806#define SQ_VOP1__SRC0__SHIFT 0x0
2807#define SQ_VOP1__OP__SHIFT 0x9
2808#define SQ_VOP1__VDST__SHIFT 0x11
2809#define SQ_VOP1__ENCODING__SHIFT 0x19
2810#define SQ_VOP1__SRC0_MASK 0x000001FFL
2811#define SQ_VOP1__OP_MASK 0x0001FE00L
2812#define SQ_VOP1__VDST_MASK 0x01FE0000L
2813#define SQ_VOP1__ENCODING_MASK 0xFE000000L
2814//SQ_VOP2
2815#define SQ_VOP2__SRC0__SHIFT 0x0
2816#define SQ_VOP2__VSRC1__SHIFT 0x9
2817#define SQ_VOP2__VDST__SHIFT 0x11
2818#define SQ_VOP2__OP__SHIFT 0x19
2819#define SQ_VOP2__ENCODING__SHIFT 0x1f
2820#define SQ_VOP2__SRC0_MASK 0x000001FFL
2821#define SQ_VOP2__VSRC1_MASK 0x0001FE00L
2822#define SQ_VOP2__VDST_MASK 0x01FE0000L
2823#define SQ_VOP2__OP_MASK 0x7E000000L
2824#define SQ_VOP2__ENCODING_MASK 0x80000000L
2825//SQ_VOP3P_0
2826#define SQ_VOP3P_0__VDST__SHIFT 0x0
2827#define SQ_VOP3P_0__NEG_HI__SHIFT 0x8
2828#define SQ_VOP3P_0__OP_SEL__SHIFT 0xb
2829#define SQ_VOP3P_0__OP_SEL_HI_2__SHIFT 0xe
2830#define SQ_VOP3P_0__CLAMP__SHIFT 0xf
2831#define SQ_VOP3P_0__OP__SHIFT 0x10
2832#define SQ_VOP3P_0__ENCODING__SHIFT 0x17
2833#define SQ_VOP3P_0__VDST_MASK 0x000000FFL
2834#define SQ_VOP3P_0__NEG_HI_MASK 0x00000700L
2835#define SQ_VOP3P_0__OP_SEL_MASK 0x00003800L
2836#define SQ_VOP3P_0__OP_SEL_HI_2_MASK 0x00004000L
2837#define SQ_VOP3P_0__CLAMP_MASK 0x00008000L
2838#define SQ_VOP3P_0__OP_MASK 0x007F0000L
2839#define SQ_VOP3P_0__ENCODING_MASK 0xFF800000L
2840//SQ_VOP3P_1
2841#define SQ_VOP3P_1__SRC0__SHIFT 0x0
2842#define SQ_VOP3P_1__SRC1__SHIFT 0x9
2843#define SQ_VOP3P_1__SRC2__SHIFT 0x12
2844#define SQ_VOP3P_1__OP_SEL_HI__SHIFT 0x1b
2845#define SQ_VOP3P_1__NEG__SHIFT 0x1d
2846#define SQ_VOP3P_1__SRC0_MASK 0x000001FFL
2847#define SQ_VOP3P_1__SRC1_MASK 0x0003FE00L
2848#define SQ_VOP3P_1__SRC2_MASK 0x07FC0000L
2849#define SQ_VOP3P_1__OP_SEL_HI_MASK 0x18000000L
2850#define SQ_VOP3P_1__NEG_MASK 0xE0000000L
2851//SQ_VOP3_0
2852#define SQ_VOP3_0__VDST__SHIFT 0x0
2853#define SQ_VOP3_0__ABS__SHIFT 0x8
2854#define SQ_VOP3_0__OP_SEL__SHIFT 0xb
2855#define SQ_VOP3_0__CLAMP__SHIFT 0xf
2856#define SQ_VOP3_0__OP__SHIFT 0x10
2857#define SQ_VOP3_0__ENCODING__SHIFT 0x1a
2858#define SQ_VOP3_0__VDST_MASK 0x000000FFL
2859#define SQ_VOP3_0__ABS_MASK 0x00000700L
2860#define SQ_VOP3_0__OP_SEL_MASK 0x00007800L
2861#define SQ_VOP3_0__CLAMP_MASK 0x00008000L
2862#define SQ_VOP3_0__OP_MASK 0x03FF0000L
2863#define SQ_VOP3_0__ENCODING_MASK 0xFC000000L
2864//SQ_VOP3_0_SDST_ENC
2865#define SQ_VOP3_0_SDST_ENC__VDST__SHIFT 0x0
2866#define SQ_VOP3_0_SDST_ENC__SDST__SHIFT 0x8
2867#define SQ_VOP3_0_SDST_ENC__CLAMP__SHIFT 0xf
2868#define SQ_VOP3_0_SDST_ENC__OP__SHIFT 0x10
2869#define SQ_VOP3_0_SDST_ENC__ENCODING__SHIFT 0x1a
2870#define SQ_VOP3_0_SDST_ENC__VDST_MASK 0x000000FFL
2871#define SQ_VOP3_0_SDST_ENC__SDST_MASK 0x00007F00L
2872#define SQ_VOP3_0_SDST_ENC__CLAMP_MASK 0x00008000L
2873#define SQ_VOP3_0_SDST_ENC__OP_MASK 0x03FF0000L
2874#define SQ_VOP3_0_SDST_ENC__ENCODING_MASK 0xFC000000L
2875//SQ_VOP3_1
2876#define SQ_VOP3_1__SRC0__SHIFT 0x0
2877#define SQ_VOP3_1__SRC1__SHIFT 0x9
2878#define SQ_VOP3_1__SRC2__SHIFT 0x12
2879#define SQ_VOP3_1__OMOD__SHIFT 0x1b
2880#define SQ_VOP3_1__NEG__SHIFT 0x1d
2881#define SQ_VOP3_1__SRC0_MASK 0x000001FFL
2882#define SQ_VOP3_1__SRC1_MASK 0x0003FE00L
2883#define SQ_VOP3_1__SRC2_MASK 0x07FC0000L
2884#define SQ_VOP3_1__OMOD_MASK 0x18000000L
2885#define SQ_VOP3_1__NEG_MASK 0xE0000000L
2886//SQ_VOPC
2887#define SQ_VOPC__SRC0__SHIFT 0x0
2888#define SQ_VOPC__VSRC1__SHIFT 0x9
2889#define SQ_VOPC__OP__SHIFT 0x11
2890#define SQ_VOPC__ENCODING__SHIFT 0x19
2891#define SQ_VOPC__SRC0_MASK 0x000001FFL
2892#define SQ_VOPC__VSRC1_MASK 0x0001FE00L
2893#define SQ_VOPC__OP_MASK 0x01FE0000L
2894#define SQ_VOPC__ENCODING_MASK 0xFE000000L
2895//SQ_VOP_DPP
2896#define SQ_VOP_DPP__SRC0__SHIFT 0x0
2897#define SQ_VOP_DPP__DPP_CTRL__SHIFT 0x8
2898#define SQ_VOP_DPP__BOUND_CTRL__SHIFT 0x13
2899#define SQ_VOP_DPP__SRC0_NEG__SHIFT 0x14
2900#define SQ_VOP_DPP__SRC0_ABS__SHIFT 0x15
2901#define SQ_VOP_DPP__SRC1_NEG__SHIFT 0x16
2902#define SQ_VOP_DPP__SRC1_ABS__SHIFT 0x17
2903#define SQ_VOP_DPP__BANK_MASK__SHIFT 0x18
2904#define SQ_VOP_DPP__ROW_MASK__SHIFT 0x1c
2905#define SQ_VOP_DPP__SRC0_MASK 0x000000FFL
2906#define SQ_VOP_DPP__DPP_CTRL_MASK 0x0001FF00L
2907#define SQ_VOP_DPP__BOUND_CTRL_MASK 0x00080000L
2908#define SQ_VOP_DPP__SRC0_NEG_MASK 0x00100000L
2909#define SQ_VOP_DPP__SRC0_ABS_MASK 0x00200000L
2910#define SQ_VOP_DPP__SRC1_NEG_MASK 0x00400000L
2911#define SQ_VOP_DPP__SRC1_ABS_MASK 0x00800000L
2912#define SQ_VOP_DPP__BANK_MASK_MASK 0x0F000000L
2913#define SQ_VOP_DPP__ROW_MASK_MASK 0xF0000000L
2914//SQ_VOP_SDWA
2915#define SQ_VOP_SDWA__SRC0__SHIFT 0x0
2916#define SQ_VOP_SDWA__DST_SEL__SHIFT 0x8
2917#define SQ_VOP_SDWA__DST_UNUSED__SHIFT 0xb
2918#define SQ_VOP_SDWA__CLAMP__SHIFT 0xd
2919#define SQ_VOP_SDWA__OMOD__SHIFT 0xe
2920#define SQ_VOP_SDWA__SRC0_SEL__SHIFT 0x10
2921#define SQ_VOP_SDWA__SRC0_SEXT__SHIFT 0x13
2922#define SQ_VOP_SDWA__SRC0_NEG__SHIFT 0x14
2923#define SQ_VOP_SDWA__SRC0_ABS__SHIFT 0x15
2924#define SQ_VOP_SDWA__S0__SHIFT 0x17
2925#define SQ_VOP_SDWA__SRC1_SEL__SHIFT 0x18
2926#define SQ_VOP_SDWA__SRC1_SEXT__SHIFT 0x1b
2927#define SQ_VOP_SDWA__SRC1_NEG__SHIFT 0x1c
2928#define SQ_VOP_SDWA__SRC1_ABS__SHIFT 0x1d
2929#define SQ_VOP_SDWA__S1__SHIFT 0x1f
2930#define SQ_VOP_SDWA__SRC0_MASK 0x000000FFL
2931#define SQ_VOP_SDWA__DST_SEL_MASK 0x00000700L
2932#define SQ_VOP_SDWA__DST_UNUSED_MASK 0x00001800L
2933#define SQ_VOP_SDWA__CLAMP_MASK 0x00002000L
2934#define SQ_VOP_SDWA__OMOD_MASK 0x0000C000L
2935#define SQ_VOP_SDWA__SRC0_SEL_MASK 0x00070000L
2936#define SQ_VOP_SDWA__SRC0_SEXT_MASK 0x00080000L
2937#define SQ_VOP_SDWA__SRC0_NEG_MASK 0x00100000L
2938#define SQ_VOP_SDWA__SRC0_ABS_MASK 0x00200000L
2939#define SQ_VOP_SDWA__S0_MASK 0x00800000L
2940#define SQ_VOP_SDWA__SRC1_SEL_MASK 0x07000000L
2941#define SQ_VOP_SDWA__SRC1_SEXT_MASK 0x08000000L
2942#define SQ_VOP_SDWA__SRC1_NEG_MASK 0x10000000L
2943#define SQ_VOP_SDWA__SRC1_ABS_MASK 0x20000000L
2944#define SQ_VOP_SDWA__S1_MASK 0x80000000L
2945//SQ_VOP_SDWA_SDST_ENC
2946#define SQ_VOP_SDWA_SDST_ENC__SRC0__SHIFT 0x0
2947#define SQ_VOP_SDWA_SDST_ENC__SDST__SHIFT 0x8
2948#define SQ_VOP_SDWA_SDST_ENC__SD__SHIFT 0xf
2949#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL__SHIFT 0x10
2950#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT__SHIFT 0x13
2951#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG__SHIFT 0x14
2952#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS__SHIFT 0x15
2953#define SQ_VOP_SDWA_SDST_ENC__S0__SHIFT 0x17
2954#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL__SHIFT 0x18
2955#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT__SHIFT 0x1b
2956#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG__SHIFT 0x1c
2957#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS__SHIFT 0x1d
2958#define SQ_VOP_SDWA_SDST_ENC__S1__SHIFT 0x1f
2959#define SQ_VOP_SDWA_SDST_ENC__SRC0_MASK 0x000000FFL
2960#define SQ_VOP_SDWA_SDST_ENC__SDST_MASK 0x00007F00L
2961#define SQ_VOP_SDWA_SDST_ENC__SD_MASK 0x00008000L
2962#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL_MASK 0x00070000L
2963#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT_MASK 0x00080000L
2964#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG_MASK 0x00100000L
2965#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS_MASK 0x00200000L
2966#define SQ_VOP_SDWA_SDST_ENC__S0_MASK 0x00800000L
2967#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL_MASK 0x07000000L
2968#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT_MASK 0x08000000L
2969#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG_MASK 0x10000000L
2970#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS_MASK 0x20000000L
2971#define SQ_VOP_SDWA_SDST_ENC__S1_MASK 0x80000000L
2972//SQ_LB_CTR_CTRL
2973#define SQ_LB_CTR_CTRL__START__SHIFT 0x0
2974#define SQ_LB_CTR_CTRL__LOAD__SHIFT 0x1
2975#define SQ_LB_CTR_CTRL__CLEAR__SHIFT 0x2
2976#define SQ_LB_CTR_CTRL__START_MASK 0x00000001L
2977#define SQ_LB_CTR_CTRL__LOAD_MASK 0x00000002L
2978#define SQ_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
2979//SQ_LB_DATA0
2980#define SQ_LB_DATA0__DATA__SHIFT 0x0
2981#define SQ_LB_DATA0__DATA_MASK 0xFFFFFFFFL
2982//SQ_LB_DATA1
2983#define SQ_LB_DATA1__DATA__SHIFT 0x0
2984#define SQ_LB_DATA1__DATA_MASK 0xFFFFFFFFL
2985//SQ_LB_DATA2
2986#define SQ_LB_DATA2__DATA__SHIFT 0x0
2987#define SQ_LB_DATA2__DATA_MASK 0xFFFFFFFFL
2988//SQ_LB_DATA3
2989#define SQ_LB_DATA3__DATA__SHIFT 0x0
2990#define SQ_LB_DATA3__DATA_MASK 0xFFFFFFFFL
2991//SQ_LB_CTR_SEL
2992#define SQ_LB_CTR_SEL__SEL0__SHIFT 0x0
2993#define SQ_LB_CTR_SEL__SEL1__SHIFT 0x4
2994#define SQ_LB_CTR_SEL__SEL2__SHIFT 0x8
2995#define SQ_LB_CTR_SEL__SEL3__SHIFT 0xc
2996#define SQ_LB_CTR_SEL__SEL0_MASK 0x0000000FL
2997#define SQ_LB_CTR_SEL__SEL1_MASK 0x000000F0L
2998#define SQ_LB_CTR_SEL__SEL2_MASK 0x00000F00L
2999#define SQ_LB_CTR_SEL__SEL3_MASK 0x0000F000L
3000//SQ_LB_CTR0_CU
3001#define SQ_LB_CTR0_CU__SH0_MASK__SHIFT 0x0
3002#define SQ_LB_CTR0_CU__SH1_MASK__SHIFT 0x10
3003#define SQ_LB_CTR0_CU__SH0_MASK_MASK 0x0000FFFFL
3004#define SQ_LB_CTR0_CU__SH1_MASK_MASK 0xFFFF0000L
3005//SQ_LB_CTR1_CU
3006#define SQ_LB_CTR1_CU__SH0_MASK__SHIFT 0x0
3007#define SQ_LB_CTR1_CU__SH1_MASK__SHIFT 0x10
3008#define SQ_LB_CTR1_CU__SH0_MASK_MASK 0x0000FFFFL
3009#define SQ_LB_CTR1_CU__SH1_MASK_MASK 0xFFFF0000L
3010//SQ_LB_CTR2_CU
3011#define SQ_LB_CTR2_CU__SH0_MASK__SHIFT 0x0
3012#define SQ_LB_CTR2_CU__SH1_MASK__SHIFT 0x10
3013#define SQ_LB_CTR2_CU__SH0_MASK_MASK 0x0000FFFFL
3014#define SQ_LB_CTR2_CU__SH1_MASK_MASK 0xFFFF0000L
3015//SQ_LB_CTR3_CU
3016#define SQ_LB_CTR3_CU__SH0_MASK__SHIFT 0x0
3017#define SQ_LB_CTR3_CU__SH1_MASK__SHIFT 0x10
3018#define SQ_LB_CTR3_CU__SH0_MASK_MASK 0x0000FFFFL
3019#define SQ_LB_CTR3_CU__SH1_MASK_MASK 0xFFFF0000L
3020//SQC_EDC_CNT
3021#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0
3022#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2
3023#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4
3024#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6
3025#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8
3026#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa
3027#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc
3028#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe
3029#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10
3030#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12
3031#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14
3032#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16
3033#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18
3034#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a
3035#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c
3036#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e
3037#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L
3038#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL
3039#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L
3040#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L
3041#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L
3042#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L
3043#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L
3044#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L
3045#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L
3046#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L
3047#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L
3048#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L
3049#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L
3050#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L
3051#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L
3052#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L
3053//SQ_EDC_SEC_CNT
3054#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0
3055#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8
3056#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10
3057#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL
3058#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L
3059#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L
3060//SQ_EDC_DED_CNT
3061#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0
3062#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8
3063#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10
3064#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL
3065#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L
3066#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L
3067//SQ_EDC_INFO
3068#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0
3069#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4
3070#define SQ_EDC_INFO__SOURCE__SHIFT 0x6
3071#define SQ_EDC_INFO__VM_ID__SHIFT 0x9
3072#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL
3073#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L
3074#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L
3075#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L
3076//SQ_EDC_CNT
3077#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0
3078#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2
3079#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4
3080#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6
3081#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8
3082#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa
3083#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc
3084#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe
3085#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10
3086#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12
3087#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14
3088#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16
3089#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18
3090#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a
3091#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L
3092#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL
3093#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L
3094#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L
3095#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L
3096#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L
3097#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L
3098#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L
3099#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L
3100#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L
3101#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L
3102#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L
3103#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L
3104#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L
3105//SQ_EDC_FUE_CNTL
3106#define SQ_EDC_FUE_CNTL__BLOCK_FUE_FLAGS__SHIFT 0x0
3107#define SQ_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES__SHIFT 0x10
3108#define SQ_EDC_FUE_CNTL__BLOCK_FUE_FLAGS_MASK 0x0000FFFFL
3109#define SQ_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES_MASK 0xFFFF0000L
3110//SQ_THREAD_TRACE_WORD_CMN
3111#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE__SHIFT 0x0
3112#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA__SHIFT 0x4
3113#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE_MASK 0x000FL
3114#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA_MASK 0x0010L
3115//SQ_THREAD_TRACE_WORD_EVENT
3116#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE__SHIFT 0x0
3117#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA__SHIFT 0x4
3118#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID__SHIFT 0x5
3119#define SQ_THREAD_TRACE_WORD_EVENT__STAGE__SHIFT 0x6
3120#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE__SHIFT 0xa
3121#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE_MASK 0x000FL
3122#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA_MASK 0x0010L
3123#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID_MASK 0x0020L
3124#define SQ_THREAD_TRACE_WORD_EVENT__STAGE_MASK 0x01C0L
3125#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE_MASK 0xFC00L
3126//SQ_THREAD_TRACE_WORD_INST
3127#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE__SHIFT 0x0
3128#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA__SHIFT 0x4
3129#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID__SHIFT 0x5
3130#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID__SHIFT 0x9
3131#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE__SHIFT 0xb
3132#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE_MASK 0x000FL
3133#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA_MASK 0x0010L
3134#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID_MASK 0x01E0L
3135#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID_MASK 0x0600L
3136#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE_MASK 0xF800L
3137//SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2
3138#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE__SHIFT 0x0
3139#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA__SHIFT 0x4
3140#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID__SHIFT 0x5
3141#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID__SHIFT 0x9
3142#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR__SHIFT 0xf
3143#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO__SHIFT 0x10
3144#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
3145#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA_MASK 0x00000010L
3146#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID_MASK 0x000001E0L
3147#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID_MASK 0x00000600L
3148#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR_MASK 0x00008000L
3149#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO_MASK 0xFFFF0000L
3150//SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2
3151#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE__SHIFT 0x0
3152#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA__SHIFT 0x4
3153#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID__SHIFT 0x5
3154#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID__SHIFT 0x6
3155#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID__SHIFT 0xa
3156#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID__SHIFT 0xe
3157#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO__SHIFT 0x10
3158#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
3159#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA_MASK 0x00000010L
3160#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID_MASK 0x00000020L
3161#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID_MASK 0x000003C0L
3162#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID_MASK 0x00003C00L
3163#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID_MASK 0x0000C000L
3164#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO_MASK 0xFFFF0000L
3165//SQ_THREAD_TRACE_WORD_ISSUE
3166#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE__SHIFT 0x0
3167#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA__SHIFT 0x4
3168#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID__SHIFT 0x5
3169#define SQ_THREAD_TRACE_WORD_ISSUE__INST0__SHIFT 0x8
3170#define SQ_THREAD_TRACE_WORD_ISSUE__INST1__SHIFT 0xa
3171#define SQ_THREAD_TRACE_WORD_ISSUE__INST2__SHIFT 0xc
3172#define SQ_THREAD_TRACE_WORD_ISSUE__INST3__SHIFT 0xe
3173#define SQ_THREAD_TRACE_WORD_ISSUE__INST4__SHIFT 0x10
3174#define SQ_THREAD_TRACE_WORD_ISSUE__INST5__SHIFT 0x12
3175#define SQ_THREAD_TRACE_WORD_ISSUE__INST6__SHIFT 0x14
3176#define SQ_THREAD_TRACE_WORD_ISSUE__INST7__SHIFT 0x16
3177#define SQ_THREAD_TRACE_WORD_ISSUE__INST8__SHIFT 0x18
3178#define SQ_THREAD_TRACE_WORD_ISSUE__INST9__SHIFT 0x1a
3179#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE_MASK 0x0000000FL
3180#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA_MASK 0x00000010L
3181#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID_MASK 0x00000060L
3182#define SQ_THREAD_TRACE_WORD_ISSUE__INST0_MASK 0x00000300L
3183#define SQ_THREAD_TRACE_WORD_ISSUE__INST1_MASK 0x00000C00L
3184#define SQ_THREAD_TRACE_WORD_ISSUE__INST2_MASK 0x00003000L
3185#define SQ_THREAD_TRACE_WORD_ISSUE__INST3_MASK 0x0000C000L
3186#define SQ_THREAD_TRACE_WORD_ISSUE__INST4_MASK 0x00030000L
3187#define SQ_THREAD_TRACE_WORD_ISSUE__INST5_MASK 0x000C0000L
3188#define SQ_THREAD_TRACE_WORD_ISSUE__INST6_MASK 0x00300000L
3189#define SQ_THREAD_TRACE_WORD_ISSUE__INST7_MASK 0x00C00000L
3190#define SQ_THREAD_TRACE_WORD_ISSUE__INST8_MASK 0x03000000L
3191#define SQ_THREAD_TRACE_WORD_ISSUE__INST9_MASK 0x0C000000L
3192//SQ_THREAD_TRACE_WORD_MISC
3193#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE__SHIFT 0x0
3194#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA__SHIFT 0x4
3195#define SQ_THREAD_TRACE_WORD_MISC__SH_ID__SHIFT 0xc
3196#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE__SHIFT 0xd
3197#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE_MASK 0x000FL
3198#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA_MASK 0x0FF0L
3199#define SQ_THREAD_TRACE_WORD_MISC__SH_ID_MASK 0x1000L
3200#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE_MASK 0xE000L
3201//SQ_THREAD_TRACE_WORD_PERF_1_OF_2
3202#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE__SHIFT 0x0
3203#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA__SHIFT 0x4
3204#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID__SHIFT 0x5
3205#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID__SHIFT 0x6
3206#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK__SHIFT 0xa
3207#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0__SHIFT 0xc
3208#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO__SHIFT 0x19
3209#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
3210#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA_MASK 0x00000010L
3211#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID_MASK 0x00000020L
3212#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID_MASK 0x000003C0L
3213#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK_MASK 0x00000C00L
3214#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0_MASK 0x01FFF000L
3215#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO_MASK 0xFE000000L
3216//SQ_THREAD_TRACE_WORD_REG_1_OF_2
3217#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE__SHIFT 0x0
3218#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA__SHIFT 0x4
3219#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID__SHIFT 0x5
3220#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID__SHIFT 0x7
3221#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV__SHIFT 0x9
3222#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE__SHIFT 0xa
3223#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV__SHIFT 0xe
3224#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP__SHIFT 0xf
3225#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR__SHIFT 0x10
3226#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
3227#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA_MASK 0x00000010L
3228#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID_MASK 0x00000060L
3229#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID_MASK 0x00000180L
3230#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV_MASK 0x00000200L
3231#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE_MASK 0x00001C00L
3232#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV_MASK 0x00004000L
3233#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP_MASK 0x00008000L
3234#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR_MASK 0xFFFF0000L
3235//SQ_THREAD_TRACE_WORD_REG_2_OF_2
3236#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA__SHIFT 0x0
3237#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA_MASK 0xFFFFFFFFL
3238//SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2
3239#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE__SHIFT 0x0
3240#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA__SHIFT 0x4
3241#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID__SHIFT 0x5
3242#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID__SHIFT 0x7
3243#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR__SHIFT 0x9
3244#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO__SHIFT 0x10
3245#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
3246#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA_MASK 0x00000010L
3247#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID_MASK 0x00000060L
3248#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID_MASK 0x00000180L
3249#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR_MASK 0x0000FE00L
3250#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO_MASK 0xFFFF0000L
3251//SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2
3252#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI__SHIFT 0x0
3253#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI_MASK 0x0000FFFFL
3254//SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2
3255#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE__SHIFT 0x0
3256#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO__SHIFT 0x10
3257#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
3258#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO_MASK 0xFFFF0000L
3259//SQ_THREAD_TRACE_WORD_WAVE
3260#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE__SHIFT 0x0
3261#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA__SHIFT 0x4
3262#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID__SHIFT 0x5
3263#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID__SHIFT 0x6
3264#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID__SHIFT 0xa
3265#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID__SHIFT 0xe
3266#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE_MASK 0x000FL
3267#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA_MASK 0x0010L
3268#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID_MASK 0x0020L
3269#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID_MASK 0x03C0L
3270#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID_MASK 0x3C00L
3271#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID_MASK 0xC000L
3272//SQ_THREAD_TRACE_WORD_WAVE_START
3273#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE__SHIFT 0x0
3274#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA__SHIFT 0x4
3275#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID__SHIFT 0x5
3276#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID__SHIFT 0x6
3277#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID__SHIFT 0xa
3278#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID__SHIFT 0xe
3279#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER__SHIFT 0x10
3280#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED__SHIFT 0x15
3281#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT__SHIFT 0x16
3282#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID__SHIFT 0x1d
3283#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE_MASK 0x0000000FL
3284#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA_MASK 0x00000010L
3285#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID_MASK 0x00000020L
3286#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID_MASK 0x000003C0L
3287#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID_MASK 0x00003C00L
3288#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID_MASK 0x0000C000L
3289#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER_MASK 0x001F0000L
3290#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED_MASK 0x00200000L
3291#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT_MASK 0x1FC00000L
3292#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID_MASK 0xE0000000L
3293//SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2
3294#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI__SHIFT 0x0
3295#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI_MASK 0x00FFFFFFL
3296//SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2
3297#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI__SHIFT 0x0
3298#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI_MASK 0xFFFFL
3299//SQ_THREAD_TRACE_WORD_PERF_2_OF_2
3300#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI__SHIFT 0x0
3301#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2__SHIFT 0x6
3302#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3__SHIFT 0x13
3303#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI_MASK 0x0000003FL
3304#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2_MASK 0x0007FFC0L
3305#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3_MASK 0xFFF80000L
3306//SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2
3307#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI__SHIFT 0x0
3308#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI_MASK 0xFFFFFFFFL
3309//SQ_WREXEC_EXEC_HI
3310#define SQ_WREXEC_EXEC_HI__ADDR_HI__SHIFT 0x0
3311#define SQ_WREXEC_EXEC_HI__FIRST_WAVE__SHIFT 0x1a
3312#define SQ_WREXEC_EXEC_HI__ATC__SHIFT 0x1b
3313#define SQ_WREXEC_EXEC_HI__MTYPE__SHIFT 0x1c
3314#define SQ_WREXEC_EXEC_HI__MSB__SHIFT 0x1f
3315#define SQ_WREXEC_EXEC_HI__ADDR_HI_MASK 0x0000FFFFL
3316#define SQ_WREXEC_EXEC_HI__FIRST_WAVE_MASK 0x04000000L
3317#define SQ_WREXEC_EXEC_HI__ATC_MASK 0x08000000L
3318#define SQ_WREXEC_EXEC_HI__MTYPE_MASK 0x70000000L
3319#define SQ_WREXEC_EXEC_HI__MSB_MASK 0x80000000L
3320//SQ_WREXEC_EXEC_LO
3321#define SQ_WREXEC_EXEC_LO__ADDR_LO__SHIFT 0x0
3322#define SQ_WREXEC_EXEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
3323//SQ_BUF_RSRC_WORD0
3324#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
3325#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
3326//SQ_BUF_RSRC_WORD1
3327#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
3328#define SQ_BUF_RSRC_WORD1__STRIDE__SHIFT 0x10
3329#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE__SHIFT 0x1e
3330#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE__SHIFT 0x1f
3331#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x0000FFFFL
3332#define SQ_BUF_RSRC_WORD1__STRIDE_MASK 0x3FFF0000L
3333#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE_MASK 0x40000000L
3334#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE_MASK 0x80000000L
3335//SQ_BUF_RSRC_WORD2
3336#define SQ_BUF_RSRC_WORD2__NUM_RECORDS__SHIFT 0x0
3337#define SQ_BUF_RSRC_WORD2__NUM_RECORDS_MASK 0xFFFFFFFFL
3338//SQ_BUF_RSRC_WORD3
3339#define SQ_BUF_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
3340#define SQ_BUF_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
3341#define SQ_BUF_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
3342#define SQ_BUF_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
3343#define SQ_BUF_RSRC_WORD3__NUM_FORMAT__SHIFT 0xc
3344#define SQ_BUF_RSRC_WORD3__DATA_FORMAT__SHIFT 0xf
3345#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE__SHIFT 0x13
3346#define SQ_BUF_RSRC_WORD3__USER_VM_MODE__SHIFT 0x14
3347#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE__SHIFT 0x15
3348#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE__SHIFT 0x17
3349#define SQ_BUF_RSRC_WORD3__NV__SHIFT 0x1b
3350#define SQ_BUF_RSRC_WORD3__TYPE__SHIFT 0x1e
3351#define SQ_BUF_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
3352#define SQ_BUF_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
3353#define SQ_BUF_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
3354#define SQ_BUF_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
3355#define SQ_BUF_RSRC_WORD3__NUM_FORMAT_MASK 0x00007000L
3356#define SQ_BUF_RSRC_WORD3__DATA_FORMAT_MASK 0x00078000L
3357#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE_MASK 0x00080000L
3358#define SQ_BUF_RSRC_WORD3__USER_VM_MODE_MASK 0x00100000L
3359#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE_MASK 0x00600000L
3360#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE_MASK 0x00800000L
3361#define SQ_BUF_RSRC_WORD3__NV_MASK 0x08000000L
3362#define SQ_BUF_RSRC_WORD3__TYPE_MASK 0xC0000000L
3363//SQ_IMG_RSRC_WORD0
3364#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
3365#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
3366//SQ_IMG_RSRC_WORD1
3367#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
3368#define SQ_IMG_RSRC_WORD1__MIN_LOD__SHIFT 0x8
3369#define SQ_IMG_RSRC_WORD1__DATA_FORMAT__SHIFT 0x14
3370#define SQ_IMG_RSRC_WORD1__NUM_FORMAT__SHIFT 0x1a
3371#define SQ_IMG_RSRC_WORD1__NV__SHIFT 0x1e
3372#define SQ_IMG_RSRC_WORD1__META_DIRECT__SHIFT 0x1f
3373#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x000000FFL
3374#define SQ_IMG_RSRC_WORD1__MIN_LOD_MASK 0x000FFF00L
3375#define SQ_IMG_RSRC_WORD1__DATA_FORMAT_MASK 0x03F00000L
3376#define SQ_IMG_RSRC_WORD1__NUM_FORMAT_MASK 0x3C000000L
3377#define SQ_IMG_RSRC_WORD1__NV_MASK 0x40000000L
3378#define SQ_IMG_RSRC_WORD1__META_DIRECT_MASK 0x80000000L
3379//SQ_IMG_RSRC_WORD2
3380#define SQ_IMG_RSRC_WORD2__WIDTH__SHIFT 0x0
3381#define SQ_IMG_RSRC_WORD2__HEIGHT__SHIFT 0xe
3382#define SQ_IMG_RSRC_WORD2__PERF_MOD__SHIFT 0x1c
3383#define SQ_IMG_RSRC_WORD2__WIDTH_MASK 0x00003FFFL
3384#define SQ_IMG_RSRC_WORD2__HEIGHT_MASK 0x0FFFC000L
3385#define SQ_IMG_RSRC_WORD2__PERF_MOD_MASK 0x70000000L
3386//SQ_IMG_RSRC_WORD3
3387#define SQ_IMG_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
3388#define SQ_IMG_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
3389#define SQ_IMG_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
3390#define SQ_IMG_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
3391#define SQ_IMG_RSRC_WORD3__BASE_LEVEL__SHIFT 0xc
3392#define SQ_IMG_RSRC_WORD3__LAST_LEVEL__SHIFT 0x10
3393#define SQ_IMG_RSRC_WORD3__SW_MODE__SHIFT 0x14
3394#define SQ_IMG_RSRC_WORD3__TYPE__SHIFT 0x1c
3395#define SQ_IMG_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
3396#define SQ_IMG_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
3397#define SQ_IMG_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
3398#define SQ_IMG_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
3399#define SQ_IMG_RSRC_WORD3__BASE_LEVEL_MASK 0x0000F000L
3400#define SQ_IMG_RSRC_WORD3__LAST_LEVEL_MASK 0x000F0000L
3401#define SQ_IMG_RSRC_WORD3__SW_MODE_MASK 0x01F00000L
3402#define SQ_IMG_RSRC_WORD3__TYPE_MASK 0xF0000000L
3403//SQ_IMG_RSRC_WORD4
3404#define SQ_IMG_RSRC_WORD4__DEPTH__SHIFT 0x0
3405#define SQ_IMG_RSRC_WORD4__PITCH__SHIFT 0xd
3406#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE__SHIFT 0x1d
3407#define SQ_IMG_RSRC_WORD4__DEPTH_MASK 0x00001FFFL
3408#define SQ_IMG_RSRC_WORD4__PITCH_MASK 0x1FFFE000L
3409#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE_MASK 0xE0000000L
3410//SQ_IMG_RSRC_WORD5
3411#define SQ_IMG_RSRC_WORD5__BASE_ARRAY__SHIFT 0x0
3412#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH__SHIFT 0xd
3413#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS__SHIFT 0x11
3414#define SQ_IMG_RSRC_WORD5__META_LINEAR__SHIFT 0x19
3415#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED__SHIFT 0x1a
3416#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED__SHIFT 0x1b
3417#define SQ_IMG_RSRC_WORD5__MAX_MIP__SHIFT 0x1c
3418#define SQ_IMG_RSRC_WORD5__BASE_ARRAY_MASK 0x00001FFFL
3419#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH_MASK 0x0001E000L
3420#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS_MASK 0x01FE0000L
3421#define SQ_IMG_RSRC_WORD5__META_LINEAR_MASK 0x02000000L
3422#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED_MASK 0x04000000L
3423#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED_MASK 0x08000000L
3424#define SQ_IMG_RSRC_WORD5__MAX_MIP_MASK 0xF0000000L
3425//SQ_IMG_RSRC_WORD6
3426#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN__SHIFT 0x0
3427#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID__SHIFT 0xc
3428#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN__SHIFT 0x14
3429#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN__SHIFT 0x15
3430#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB__SHIFT 0x16
3431#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM__SHIFT 0x17
3432#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS__SHIFT 0x18
3433#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS__SHIFT 0x1c
3434#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN_MASK 0x00000FFFL
3435#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID_MASK 0x000FF000L
3436#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN_MASK 0x00100000L
3437#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN_MASK 0x00200000L
3438#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB_MASK 0x00400000L
3439#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM_MASK 0x00800000L
3440#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS_MASK 0x0F000000L
3441#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS_MASK 0xF0000000L
3442//SQ_IMG_RSRC_WORD7
3443#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS__SHIFT 0x0
3444#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS_MASK 0xFFFFFFFFL
3445//SQ_IMG_SAMP_WORD0
3446#define SQ_IMG_SAMP_WORD0__CLAMP_X__SHIFT 0x0
3447#define SQ_IMG_SAMP_WORD0__CLAMP_Y__SHIFT 0x3
3448#define SQ_IMG_SAMP_WORD0__CLAMP_Z__SHIFT 0x6
3449#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO__SHIFT 0x9
3450#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC__SHIFT 0xc
3451#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED__SHIFT 0xf
3452#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD__SHIFT 0x10
3453#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC__SHIFT 0x13
3454#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA__SHIFT 0x14
3455#define SQ_IMG_SAMP_WORD0__ANISO_BIAS__SHIFT 0x15
3456#define SQ_IMG_SAMP_WORD0__TRUNC_COORD__SHIFT 0x1b
3457#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP__SHIFT 0x1c
3458#define SQ_IMG_SAMP_WORD0__FILTER_MODE__SHIFT 0x1d
3459#define SQ_IMG_SAMP_WORD0__COMPAT_MODE__SHIFT 0x1f
3460#define SQ_IMG_SAMP_WORD0__CLAMP_X_MASK 0x00000007L
3461#define SQ_IMG_SAMP_WORD0__CLAMP_Y_MASK 0x00000038L
3462#define SQ_IMG_SAMP_WORD0__CLAMP_Z_MASK 0x000001C0L
3463#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO_MASK 0x00000E00L
3464#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC_MASK 0x00007000L
3465#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED_MASK 0x00008000L
3466#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD_MASK 0x00070000L
3467#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC_MASK 0x00080000L
3468#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA_MASK 0x00100000L
3469#define SQ_IMG_SAMP_WORD0__ANISO_BIAS_MASK 0x07E00000L
3470#define SQ_IMG_SAMP_WORD0__TRUNC_COORD_MASK 0x08000000L
3471#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP_MASK 0x10000000L
3472#define SQ_IMG_SAMP_WORD0__FILTER_MODE_MASK 0x60000000L
3473#define SQ_IMG_SAMP_WORD0__COMPAT_MODE_MASK 0x80000000L
3474//SQ_IMG_SAMP_WORD1
3475#define SQ_IMG_SAMP_WORD1__MIN_LOD__SHIFT 0x0
3476#define SQ_IMG_SAMP_WORD1__MAX_LOD__SHIFT 0xc
3477#define SQ_IMG_SAMP_WORD1__PERF_MIP__SHIFT 0x18
3478#define SQ_IMG_SAMP_WORD1__PERF_Z__SHIFT 0x1c
3479#define SQ_IMG_SAMP_WORD1__MIN_LOD_MASK 0x00000FFFL
3480#define SQ_IMG_SAMP_WORD1__MAX_LOD_MASK 0x00FFF000L
3481#define SQ_IMG_SAMP_WORD1__PERF_MIP_MASK 0x0F000000L
3482#define SQ_IMG_SAMP_WORD1__PERF_Z_MASK 0xF0000000L
3483//SQ_IMG_SAMP_WORD2
3484#define SQ_IMG_SAMP_WORD2__LOD_BIAS__SHIFT 0x0
3485#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC__SHIFT 0xe
3486#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER__SHIFT 0x14
3487#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER__SHIFT 0x16
3488#define SQ_IMG_SAMP_WORD2__Z_FILTER__SHIFT 0x18
3489#define SQ_IMG_SAMP_WORD2__MIP_FILTER__SHIFT 0x1a
3490#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP__SHIFT 0x1c
3491#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT__SHIFT 0x1d
3492#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX__SHIFT 0x1e
3493#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE__SHIFT 0x1f
3494#define SQ_IMG_SAMP_WORD2__LOD_BIAS_MASK 0x00003FFFL
3495#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC_MASK 0x000FC000L
3496#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER_MASK 0x00300000L
3497#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER_MASK 0x00C00000L
3498#define SQ_IMG_SAMP_WORD2__Z_FILTER_MASK 0x03000000L
3499#define SQ_IMG_SAMP_WORD2__MIP_FILTER_MASK 0x0C000000L
3500#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP_MASK 0x10000000L
3501#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT_MASK 0x20000000L
3502#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX_MASK 0x40000000L
3503#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE_MASK 0x80000000L
3504//SQ_IMG_SAMP_WORD3
3505#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR__SHIFT 0x0
3506#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA__SHIFT 0xc
3507#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE__SHIFT 0x1e
3508#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR_MASK 0x00000FFFL
3509#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA_MASK 0x00001000L
3510#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE_MASK 0xC0000000L
3511//SQ_FLAT_SCRATCH_WORD0
3512#define SQ_FLAT_SCRATCH_WORD0__SIZE__SHIFT 0x0
3513#define SQ_FLAT_SCRATCH_WORD0__SIZE_MASK 0x0007FFFFL
3514//SQ_FLAT_SCRATCH_WORD1
3515#define SQ_FLAT_SCRATCH_WORD1__OFFSET__SHIFT 0x0
3516#define SQ_FLAT_SCRATCH_WORD1__OFFSET_MASK 0x00FFFFFFL
3517//SQ_M0_GPR_IDX_WORD
3518#define SQ_M0_GPR_IDX_WORD__INDEX__SHIFT 0x0
3519#define SQ_M0_GPR_IDX_WORD__VSRC0_REL__SHIFT 0xc
3520#define SQ_M0_GPR_IDX_WORD__VSRC1_REL__SHIFT 0xd
3521#define SQ_M0_GPR_IDX_WORD__VSRC2_REL__SHIFT 0xe
3522#define SQ_M0_GPR_IDX_WORD__VDST_REL__SHIFT 0xf
3523#define SQ_M0_GPR_IDX_WORD__INDEX_MASK 0x000000FFL
3524#define SQ_M0_GPR_IDX_WORD__VSRC0_REL_MASK 0x00001000L
3525#define SQ_M0_GPR_IDX_WORD__VSRC1_REL_MASK 0x00002000L
3526#define SQ_M0_GPR_IDX_WORD__VSRC2_REL_MASK 0x00004000L
3527#define SQ_M0_GPR_IDX_WORD__VDST_REL_MASK 0x00008000L
3528//SQC_ICACHE_UTCL1_CNTL1
3529#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
3530#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
3531#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
3532#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
3533#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
3534#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
3535#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
3536#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
3537#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
3538#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
3539#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
3540#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
3541#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
3542#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
3543#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
3544#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
3545#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
3546#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
3547#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
3548#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
3549#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
3550#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
3551#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
3552#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
3553#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
3554#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
3555#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
3556#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
3557#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
3558#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
3559#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
3560#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
3561//SQC_ICACHE_UTCL1_CNTL2
3562#define SQC_ICACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
3563#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
3564#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
3565#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
3566#define SQC_ICACHE_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
3567#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
3568#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
3569#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
3570#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
3571#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
3572#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
3573#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
3574#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
3575#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
3576#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
3577#define SQC_ICACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
3578#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
3579#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
3580#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
3581#define SQC_ICACHE_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
3582#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
3583#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
3584#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
3585#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
3586#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
3587#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
3588#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
3589#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
3590#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
3591#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
3592//SQC_DCACHE_UTCL1_CNTL1
3593#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
3594#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
3595#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
3596#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
3597#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
3598#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
3599#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
3600#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
3601#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
3602#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
3603#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
3604#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
3605#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
3606#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
3607#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
3608#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
3609#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
3610#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
3611#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
3612#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
3613#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
3614#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
3615#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
3616#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
3617#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
3618#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
3619#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
3620#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
3621#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
3622#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
3623#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
3624#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
3625//SQC_DCACHE_UTCL1_CNTL2
3626#define SQC_DCACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
3627#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
3628#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
3629#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
3630#define SQC_DCACHE_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
3631#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
3632#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
3633#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
3634#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
3635#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
3636#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
3637#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
3638#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
3639#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
3640#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
3641#define SQC_DCACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
3642#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
3643#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
3644#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
3645#define SQC_DCACHE_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
3646#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
3647#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
3648#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
3649#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
3650#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
3651#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
3652#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
3653#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
3654#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
3655#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
3656//SQC_ICACHE_UTCL1_STATUS
3657#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
3658#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
3659#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
3660#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
3661#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
3662#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
3663//SQC_DCACHE_UTCL1_STATUS
3664#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
3665#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
3666#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
3667#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
3668#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
3669#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
3670
3671
3672// addressBlock: gc_shsdec
3673//SX_DEBUG_1
3674#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x0
3675#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x8
3676#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x9
3677#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0xa
3678#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT__SHIFT 0xb
3679#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT__SHIFT 0xc
3680#define SX_DEBUG_1__PC_CFG__SHIFT 0xd
3681#define SX_DEBUG_1__DEBUG_DATA__SHIFT 0xe
3682#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007FL
3683#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x00000100L
3684#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS_MASK 0x00000200L
3685#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x00000400L
3686#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT_MASK 0x00000800L
3687#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT_MASK 0x00001000L
3688#define SX_DEBUG_1__PC_CFG_MASK 0x00002000L
3689#define SX_DEBUG_1__DEBUG_DATA_MASK 0xFFFFC000L
3690//SPI_PS_MAX_WAVE_ID
3691#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
3692#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID__SHIFT 0x10
3693#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
3694#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID_MASK 0x03FF0000L
3695//SPI_START_PHASE
3696#define SPI_START_PHASE__VGPR_START_PHASE__SHIFT 0x0
3697#define SPI_START_PHASE__SGPR_START_PHASE__SHIFT 0x2
3698#define SPI_START_PHASE__WAVE_START_PHASE__SHIFT 0x4
3699#define SPI_START_PHASE__VGPR_START_PHASE_MASK 0x00000003L
3700#define SPI_START_PHASE__SGPR_START_PHASE_MASK 0x0000000CL
3701#define SPI_START_PHASE__WAVE_START_PHASE_MASK 0x00000030L
3702//SPI_GFX_CNTL
3703#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
3704#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x00000001L
3705//SPI_DSM_CNTL
3706#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
3707#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
3708#define SPI_DSM_CNTL__UNUSED__SHIFT 0x3
3709#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
3710#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
3711#define SPI_DSM_CNTL__UNUSED_MASK 0xFFFFFFF8L
3712//SPI_DSM_CNTL2
3713#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
3714#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
3715#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY__SHIFT 0x4
3716#define SPI_DSM_CNTL2__UNUSED__SHIFT 0xa
3717#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
3718#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
3719#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY_MASK 0x000003F0L
3720#define SPI_DSM_CNTL2__UNUSED_MASK 0xFFFFFC00L
3721//SPI_EDC_CNT
3722#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT__SHIFT 0x0
3723#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT_MASK 0x00000003L
3724//SPI_CONFIG_PS_CU_EN
3725#define SPI_CONFIG_PS_CU_EN__ENABLE__SHIFT 0x0
3726#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN__SHIFT 0x1
3727#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN__SHIFT 0x10
3728#define SPI_CONFIG_PS_CU_EN__ENABLE_MASK 0x00000001L
3729#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN_MASK 0x0000FFFEL
3730#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN_MASK 0xFFFF0000L
3731//SPI_WF_LIFETIME_CNTL
3732#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
3733#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
3734#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0x0000000FL
3735#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x00000010L
3736//SPI_WF_LIFETIME_LIMIT_0
3737#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
3738#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
3739#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7FFFFFFFL
3740#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000L
3741//SPI_WF_LIFETIME_LIMIT_1
3742#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
3743#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
3744#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7FFFFFFFL
3745#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000L
3746//SPI_WF_LIFETIME_LIMIT_2
3747#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
3748#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
3749#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7FFFFFFFL
3750#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000L
3751//SPI_WF_LIFETIME_LIMIT_3
3752#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
3753#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
3754#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7FFFFFFFL
3755#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000L
3756//SPI_WF_LIFETIME_LIMIT_4
3757#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
3758#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
3759#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7FFFFFFFL
3760#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000L
3761//SPI_WF_LIFETIME_LIMIT_5
3762#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
3763#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
3764#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7FFFFFFFL
3765#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000L
3766//SPI_WF_LIFETIME_LIMIT_6
3767#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT__SHIFT 0x0
3768#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN__SHIFT 0x1f
3769#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT_MASK 0x7FFFFFFFL
3770#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN_MASK 0x80000000L
3771//SPI_WF_LIFETIME_LIMIT_7
3772#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT__SHIFT 0x0
3773#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN__SHIFT 0x1f
3774#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT_MASK 0x7FFFFFFFL
3775#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN_MASK 0x80000000L
3776//SPI_WF_LIFETIME_LIMIT_8
3777#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT__SHIFT 0x0
3778#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN__SHIFT 0x1f
3779#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT_MASK 0x7FFFFFFFL
3780#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN_MASK 0x80000000L
3781//SPI_WF_LIFETIME_LIMIT_9
3782#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT__SHIFT 0x0
3783#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN__SHIFT 0x1f
3784#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT_MASK 0x7FFFFFFFL
3785#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN_MASK 0x80000000L
3786//SPI_WF_LIFETIME_STATUS_0
3787#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
3788#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
3789#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7FFFFFFFL
3790#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000L
3791//SPI_WF_LIFETIME_STATUS_1
3792#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT__SHIFT 0x0
3793#define SPI_WF_LIFETIME_STATUS_1__INT_SENT__SHIFT 0x1f
3794#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT_MASK 0x7FFFFFFFL
3795#define SPI_WF_LIFETIME_STATUS_1__INT_SENT_MASK 0x80000000L
3796//SPI_WF_LIFETIME_STATUS_2
3797#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
3798#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
3799#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7FFFFFFFL
3800#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000L
3801//SPI_WF_LIFETIME_STATUS_3
3802#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT__SHIFT 0x0
3803#define SPI_WF_LIFETIME_STATUS_3__INT_SENT__SHIFT 0x1f
3804#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT_MASK 0x7FFFFFFFL
3805#define SPI_WF_LIFETIME_STATUS_3__INT_SENT_MASK 0x80000000L
3806//SPI_WF_LIFETIME_STATUS_4
3807#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
3808#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
3809#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7FFFFFFFL
3810#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000L
3811//SPI_WF_LIFETIME_STATUS_5
3812#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT__SHIFT 0x0
3813#define SPI_WF_LIFETIME_STATUS_5__INT_SENT__SHIFT 0x1f
3814#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT_MASK 0x7FFFFFFFL
3815#define SPI_WF_LIFETIME_STATUS_5__INT_SENT_MASK 0x80000000L
3816//SPI_WF_LIFETIME_STATUS_6
3817#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
3818#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
3819#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7FFFFFFFL
3820#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000L
3821//SPI_WF_LIFETIME_STATUS_7
3822#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
3823#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
3824#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7FFFFFFFL
3825#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000L
3826//SPI_WF_LIFETIME_STATUS_8
3827#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT__SHIFT 0x0
3828#define SPI_WF_LIFETIME_STATUS_8__INT_SENT__SHIFT 0x1f
3829#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT_MASK 0x7FFFFFFFL
3830#define SPI_WF_LIFETIME_STATUS_8__INT_SENT_MASK 0x80000000L
3831//SPI_WF_LIFETIME_STATUS_9
3832#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
3833#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
3834#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7FFFFFFFL
3835#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000L
3836//SPI_WF_LIFETIME_STATUS_10
3837#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT__SHIFT 0x0
3838#define SPI_WF_LIFETIME_STATUS_10__INT_SENT__SHIFT 0x1f
3839#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT_MASK 0x7FFFFFFFL
3840#define SPI_WF_LIFETIME_STATUS_10__INT_SENT_MASK 0x80000000L
3841//SPI_WF_LIFETIME_STATUS_11
3842#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
3843#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
3844#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7FFFFFFFL
3845#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000L
3846//SPI_WF_LIFETIME_STATUS_12
3847#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT__SHIFT 0x0
3848#define SPI_WF_LIFETIME_STATUS_12__INT_SENT__SHIFT 0x1f
3849#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT_MASK 0x7FFFFFFFL
3850#define SPI_WF_LIFETIME_STATUS_12__INT_SENT_MASK 0x80000000L
3851//SPI_WF_LIFETIME_STATUS_13
3852#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
3853#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
3854#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7FFFFFFFL
3855#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000L
3856//SPI_WF_LIFETIME_STATUS_14
3857#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
3858#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
3859#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7FFFFFFFL
3860#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000L
3861//SPI_WF_LIFETIME_STATUS_15
3862#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
3863#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
3864#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7FFFFFFFL
3865#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000L
3866//SPI_WF_LIFETIME_STATUS_16
3867#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
3868#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
3869#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7FFFFFFFL
3870#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000L
3871//SPI_WF_LIFETIME_STATUS_17
3872#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
3873#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
3874#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7FFFFFFFL
3875#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000L
3876//SPI_WF_LIFETIME_STATUS_18
3877#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
3878#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
3879#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7FFFFFFFL
3880#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000L
3881//SPI_WF_LIFETIME_STATUS_19
3882#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
3883#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
3884#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7FFFFFFFL
3885#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000L
3886//SPI_WF_LIFETIME_STATUS_20
3887#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
3888#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
3889#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7FFFFFFFL
3890#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000L
3891//SPI_LB_CTR_CTRL
3892#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
3893#define SPI_LB_CTR_CTRL__WAVES_SELECT__SHIFT 0x1
3894#define SPI_LB_CTR_CTRL__CLEAR_ON_READ__SHIFT 0x3
3895#define SPI_LB_CTR_CTRL__RESET_COUNTS__SHIFT 0x4
3896#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
3897#define SPI_LB_CTR_CTRL__WAVES_SELECT_MASK 0x00000006L
3898#define SPI_LB_CTR_CTRL__CLEAR_ON_READ_MASK 0x00000008L
3899#define SPI_LB_CTR_CTRL__RESET_COUNTS_MASK 0x00000010L
3900//SPI_LB_CU_MASK
3901#define SPI_LB_CU_MASK__CU_MASK__SHIFT 0x0
3902#define SPI_LB_CU_MASK__CU_MASK_MASK 0xFFFFL
3903//SPI_LB_DATA_REG
3904#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
3905#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xFFFFFFFFL
3906//SPI_PG_ENABLE_STATIC_CU_MASK
3907#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK__SHIFT 0x0
3908#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK_MASK 0xFFFFL
3909//SPI_GDS_CREDITS
3910#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
3911#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
3912#define SPI_GDS_CREDITS__UNUSED__SHIFT 0x10
3913#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000FFL
3914#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000FF00L
3915#define SPI_GDS_CREDITS__UNUSED_MASK 0xFFFF0000L
3916//SPI_SX_EXPORT_BUFFER_SIZES
3917#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
3918#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
3919#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000FFFFL
3920#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xFFFF0000L
3921//SPI_SX_SCOREBOARD_BUFFER_SIZES
3922#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
3923#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
3924#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000FFFFL
3925#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xFFFF0000L
3926//SPI_CSQ_WF_ACTIVE_STATUS
3927#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
3928#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xFFFFFFFFL
3929//SPI_CSQ_WF_ACTIVE_COUNT_0
3930#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
3931#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS__SHIFT 0x10
3932#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x000007FFL
3933#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS_MASK 0x07FF0000L
3934//SPI_CSQ_WF_ACTIVE_COUNT_1
3935#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
3936#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS__SHIFT 0x10
3937#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x000007FFL
3938#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS_MASK 0x07FF0000L
3939//SPI_CSQ_WF_ACTIVE_COUNT_2
3940#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
3941#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS__SHIFT 0x10
3942#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x000007FFL
3943#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS_MASK 0x07FF0000L
3944//SPI_CSQ_WF_ACTIVE_COUNT_3
3945#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
3946#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS__SHIFT 0x10
3947#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x000007FFL
3948#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS_MASK 0x07FF0000L
3949//SPI_CSQ_WF_ACTIVE_COUNT_4
3950#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT__SHIFT 0x0
3951#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS__SHIFT 0x10
3952#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT_MASK 0x000007FFL
3953#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS_MASK 0x07FF0000L
3954//SPI_CSQ_WF_ACTIVE_COUNT_5
3955#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT__SHIFT 0x0
3956#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS__SHIFT 0x10
3957#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT_MASK 0x000007FFL
3958#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS_MASK 0x07FF0000L
3959//SPI_CSQ_WF_ACTIVE_COUNT_6
3960#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT__SHIFT 0x0
3961#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS__SHIFT 0x10
3962#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT_MASK 0x000007FFL
3963#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS_MASK 0x07FF0000L
3964//SPI_CSQ_WF_ACTIVE_COUNT_7
3965#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT__SHIFT 0x0
3966#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS__SHIFT 0x10
3967#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT_MASK 0x000007FFL
3968#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS_MASK 0x07FF0000L
3969//SPI_LB_DATA_WAVES
3970#define SPI_LB_DATA_WAVES__COUNT0__SHIFT 0x0
3971#define SPI_LB_DATA_WAVES__COUNT1__SHIFT 0x10
3972#define SPI_LB_DATA_WAVES__COUNT0_MASK 0x0000FFFFL
3973#define SPI_LB_DATA_WAVES__COUNT1_MASK 0xFFFF0000L
3974//SPI_LB_DATA_PERCU_WAVE_HSGS
3975#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS__SHIFT 0x0
3976#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS__SHIFT 0x10
3977#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS_MASK 0x0000FFFFL
3978#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS_MASK 0xFFFF0000L
3979//SPI_LB_DATA_PERCU_WAVE_VSPS
3980#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS__SHIFT 0x0
3981#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS__SHIFT 0x10
3982#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS_MASK 0x0000FFFFL
3983#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS_MASK 0xFFFF0000L
3984//SPI_LB_DATA_PERCU_WAVE_CS
3985#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE__SHIFT 0x0
3986#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE_MASK 0xFFFFL
3987//SPI_P0_TRAP_SCREEN_PSBA_LO
3988#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
3989#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
3990//SPI_P0_TRAP_SCREEN_PSBA_HI
3991#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
3992#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
3993//SPI_P0_TRAP_SCREEN_PSMA_LO
3994#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
3995#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
3996//SPI_P0_TRAP_SCREEN_PSMA_HI
3997#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
3998#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
3999//SPI_P0_TRAP_SCREEN_GPR_MIN
4000#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
4001#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
4002#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
4003#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
4004//SPI_P1_TRAP_SCREEN_PSBA_LO
4005#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
4006#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
4007//SPI_P1_TRAP_SCREEN_PSBA_HI
4008#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
4009#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
4010//SPI_P1_TRAP_SCREEN_PSMA_LO
4011#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
4012#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
4013//SPI_P1_TRAP_SCREEN_PSMA_HI
4014#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
4015#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
4016//SPI_P1_TRAP_SCREEN_GPR_MIN
4017#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
4018#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
4019#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
4020#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
4021
4022
4023// addressBlock: gc_tpdec
4024//TD_CNTL
4025#define TD_CNTL__SYNC_PHASE_SH__SHIFT 0x0
4026#define TD_CNTL__SYNC_PHASE_VC_SMX__SHIFT 0x4
4027#define TD_CNTL__PAD_STALL_EN__SHIFT 0x8
4028#define TD_CNTL__EXTEND_LDS_STALL__SHIFT 0x9
4029#define TD_CNTL__LDS_STALL_PHASE_ADJUST__SHIFT 0xb
4030#define TD_CNTL__PRECISION_COMPATIBILITY__SHIFT 0xf
4031#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x10
4032#define TD_CNTL__LD_FLOAT_MODE__SHIFT 0x12
4033#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x13
4034#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
4035#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
4036#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
4037#define TD_CNTL__DISABLE_MM_QNAN_COMPARE_RESULT__SHIFT 0x18
4038#define TD_CNTL__SYNC_PHASE_SH_MASK 0x00000003L
4039#define TD_CNTL__SYNC_PHASE_VC_SMX_MASK 0x00000030L
4040#define TD_CNTL__PAD_STALL_EN_MASK 0x00000100L
4041#define TD_CNTL__EXTEND_LDS_STALL_MASK 0x00000600L
4042#define TD_CNTL__LDS_STALL_PHASE_ADJUST_MASK 0x00001800L
4043#define TD_CNTL__PRECISION_COMPATIBILITY_MASK 0x00008000L
4044#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
4045#define TD_CNTL__LD_FLOAT_MODE_MASK 0x00040000L
4046#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
4047#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
4048#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x00200000L
4049#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x00800000L
4050#define TD_CNTL__DISABLE_MM_QNAN_COMPARE_RESULT_MASK 0x01000000L
4051//TD_STATUS
4052#define TD_STATUS__BUSY__SHIFT 0x1f
4053#define TD_STATUS__BUSY_MASK 0x80000000L
4054//TD_DSM_CNTL
4055#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA__SHIFT 0x0
4056#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE__SHIFT 0x2
4057#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA__SHIFT 0x3
4058#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE__SHIFT 0x5
4059#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
4060#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
4061#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA_MASK 0x00000003L
4062#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
4063#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA_MASK 0x00000018L
4064#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE_MASK 0x00000020L
4065#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
4066#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
4067//TD_DSM_CNTL2
4068#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT__SHIFT 0x0
4069#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY__SHIFT 0x2
4070#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT__SHIFT 0x3
4071#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY__SHIFT 0x5
4072#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
4073#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
4074#define TD_DSM_CNTL2__TD_INJECT_DELAY__SHIFT 0x1a
4075#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT_MASK 0x00000003L
4076#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY_MASK 0x00000004L
4077#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT_MASK 0x00000018L
4078#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY_MASK 0x00000020L
4079#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
4080#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
4081#define TD_DSM_CNTL2__TD_INJECT_DELAY_MASK 0xFC000000L
4082//TD_SCRATCH
4083#define TD_SCRATCH__SCRATCH__SHIFT 0x0
4084#define TD_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
4085//TA_CNTL
4086#define TA_CNTL__FX_XNACK_CREDIT__SHIFT 0x0
4087#define TA_CNTL__SQ_XNACK_CREDIT__SHIFT 0x9
4088#define TA_CNTL__TC_DATA_CREDIT__SHIFT 0xd
4089#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
4090#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
4091#define TA_CNTL__FX_XNACK_CREDIT_MASK 0x0000007FL
4092#define TA_CNTL__SQ_XNACK_CREDIT_MASK 0x00001E00L
4093#define TA_CNTL__TC_DATA_CREDIT_MASK 0x0000E000L
4094#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001F0000L
4095#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xFFC00000L
4096//TA_CNTL_AUX
4097#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
4098#define TA_CNTL_AUX__RESERVED__SHIFT 0x1
4099#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE__SHIFT 0x5
4100#define TA_CNTL_AUX__GATHERH_DST_SEL__SHIFT 0x6
4101#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE__SHIFT 0x7
4102#define TA_CNTL_AUX__NONIMG_ANISO_BYPASS__SHIFT 0x9
4103#define TA_CNTL_AUX__ANISO_HALF_THRESH__SHIFT 0xa
4104#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS__SHIFT 0xc
4105#define TA_CNTL_AUX__ANISO_STEP_ORDER__SHIFT 0xd
4106#define TA_CNTL_AUX__ANISO_STEP__SHIFT 0xe
4107#define TA_CNTL_AUX__MINMAG_UNNORM__SHIFT 0xf
4108#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x10
4109#define TA_CNTL_AUX__ANISO_RATIO_LUT__SHIFT 0x11
4110#define TA_CNTL_AUX__ANISO_TAP__SHIFT 0x12
4111#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE__SHIFT 0x13
4112#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE__SHIFT 0x14
4113#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE__SHIFT 0x15
4114#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE__SHIFT 0x16
4115#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE__SHIFT 0x17
4116#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE__SHIFT 0x18
4117#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE__SHIFT 0x19
4118#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE__SHIFT 0x1a
4119#define TA_CNTL_AUX__DISABLE_DWORD_X2_COALESCE__SHIFT 0x1b
4120#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP__SHIFT 0x1c
4121#define TA_CNTL_AUX__TRUNC_SMALL_NEG__SHIFT 0x1d
4122#define TA_CNTL_AUX__ARRAY_ROUND_MODE__SHIFT 0x1e
4123#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x00000001L
4124#define TA_CNTL_AUX__RESERVED_MASK 0x0000000EL
4125#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE_MASK 0x00000020L
4126#define TA_CNTL_AUX__GATHERH_DST_SEL_MASK 0x00000040L
4127#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE_MASK 0x00000080L
4128#define TA_CNTL_AUX__NONIMG_ANISO_BYPASS_MASK 0x00000200L
4129#define TA_CNTL_AUX__ANISO_HALF_THRESH_MASK 0x00000C00L
4130#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS_MASK 0x00001000L
4131#define TA_CNTL_AUX__ANISO_STEP_ORDER_MASK 0x00002000L
4132#define TA_CNTL_AUX__ANISO_STEP_MASK 0x00004000L
4133#define TA_CNTL_AUX__MINMAG_UNNORM_MASK 0x00008000L
4134#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
4135#define TA_CNTL_AUX__ANISO_RATIO_LUT_MASK 0x00020000L
4136#define TA_CNTL_AUX__ANISO_TAP_MASK 0x00040000L
4137#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE_MASK 0x00080000L
4138#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE_MASK 0x00100000L
4139#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE_MASK 0x00200000L
4140#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE_MASK 0x00400000L
4141#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE_MASK 0x00800000L
4142#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE_MASK 0x01000000L
4143#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE_MASK 0x02000000L
4144#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE_MASK 0x04000000L
4145#define TA_CNTL_AUX__DISABLE_DWORD_X2_COALESCE_MASK 0x08000000L
4146#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP_MASK 0x10000000L
4147#define TA_CNTL_AUX__TRUNC_SMALL_NEG_MASK 0x20000000L
4148#define TA_CNTL_AUX__ARRAY_ROUND_MODE_MASK 0xC0000000L
4149//TA_RESERVED_010C
4150#define TA_RESERVED_010C__Unused__SHIFT 0x0
4151#define TA_RESERVED_010C__Unused_MASK 0xFFFFFFFFL
4152//TA_STATUS
4153#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
4154#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
4155#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0xe
4156#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
4157#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x11
4158#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x12
4159#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
4160#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x15
4161#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x16
4162#define TA_STATUS__IN_BUSY__SHIFT 0x18
4163#define TA_STATUS__FG_BUSY__SHIFT 0x19
4164#define TA_STATUS__LA_BUSY__SHIFT 0x1a
4165#define TA_STATUS__FL_BUSY__SHIFT 0x1b
4166#define TA_STATUS__TA_BUSY__SHIFT 0x1c
4167#define TA_STATUS__FA_BUSY__SHIFT 0x1d
4168#define TA_STATUS__AL_BUSY__SHIFT 0x1e
4169#define TA_STATUS__BUSY__SHIFT 0x1f
4170#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
4171#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
4172#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
4173#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
4174#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
4175#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
4176#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
4177#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
4178#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
4179#define TA_STATUS__IN_BUSY_MASK 0x01000000L
4180#define TA_STATUS__FG_BUSY_MASK 0x02000000L
4181#define TA_STATUS__LA_BUSY_MASK 0x04000000L
4182#define TA_STATUS__FL_BUSY_MASK 0x08000000L
4183#define TA_STATUS__TA_BUSY_MASK 0x10000000L
4184#define TA_STATUS__FA_BUSY_MASK 0x20000000L
4185#define TA_STATUS__AL_BUSY_MASK 0x40000000L
4186#define TA_STATUS__BUSY_MASK 0x80000000L
4187//TA_SCRATCH
4188#define TA_SCRATCH__SCRATCH__SHIFT 0x0
4189#define TA_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
4190
4191
4192// addressBlock: gc_gdsdec
4193//GDS_CONFIG
4194#define GDS_CONFIG__SH0_GPR_PHASE_SEL__SHIFT 0x1
4195#define GDS_CONFIG__SH1_GPR_PHASE_SEL__SHIFT 0x3
4196#define GDS_CONFIG__SH2_GPR_PHASE_SEL__SHIFT 0x5
4197#define GDS_CONFIG__SH3_GPR_PHASE_SEL__SHIFT 0x7
4198#define GDS_CONFIG__SH0_GPR_PHASE_SEL_MASK 0x00000006L
4199#define GDS_CONFIG__SH1_GPR_PHASE_SEL_MASK 0x00000018L
4200#define GDS_CONFIG__SH2_GPR_PHASE_SEL_MASK 0x00000060L
4201#define GDS_CONFIG__SH3_GPR_PHASE_SEL_MASK 0x00000180L
4202//GDS_CNTL_STATUS
4203#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
4204#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
4205#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
4206#define GDS_CNTL_STATUS__DS_BANK_CONFLICT__SHIFT 0x3
4207#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT__SHIFT 0x4
4208#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x5
4209#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x6
4210#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x7
4211#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x8
4212#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x9
4213#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0xa
4214#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0xb
4215#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xc
4216#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xd
4217#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xe
4218#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
4219#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
4220#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
4221#define GDS_CNTL_STATUS__DS_BANK_CONFLICT_MASK 0x00000008L
4222#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT_MASK 0x00000010L
4223#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000020L
4224#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000040L
4225#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x00000080L
4226#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x00000100L
4227#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x00000200L
4228#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x00000400L
4229#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x00000800L
4230#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x00001000L
4231#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x00002000L
4232#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x00004000L
4233//GDS_ENHANCE2
4234#define GDS_ENHANCE2__MISC__SHIFT 0x0
4235#define GDS_ENHANCE2__UNUSED__SHIFT 0x10
4236#define GDS_ENHANCE2__MISC_MASK 0x0000FFFFL
4237#define GDS_ENHANCE2__UNUSED_MASK 0xFFFF0000L
4238//GDS_PROTECTION_FAULT
4239#define GDS_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
4240#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
4241#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
4242#define GDS_PROTECTION_FAULT__SH_ID__SHIFT 0x3
4243#define GDS_PROTECTION_FAULT__CU_ID__SHIFT 0x6
4244#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xa
4245#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xc
4246#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
4247#define GDS_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
4248#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
4249#define GDS_PROTECTION_FAULT__GRBM_MASK 0x00000004L
4250#define GDS_PROTECTION_FAULT__SH_ID_MASK 0x00000038L
4251#define GDS_PROTECTION_FAULT__CU_ID_MASK 0x000003C0L
4252#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0x00000C00L
4253#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0x0000F000L
4254#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
4255//GDS_VM_PROTECTION_FAULT
4256#define GDS_VM_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
4257#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
4258#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
4259#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
4260#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
4261#define GDS_VM_PROTECTION_FAULT__TMZ__SHIFT 0x5
4262#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
4263#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
4264#define GDS_VM_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
4265#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
4266#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x00000004L
4267#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x00000008L
4268#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x00000010L
4269#define GDS_VM_PROTECTION_FAULT__TMZ_MASK 0x00000020L
4270#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0x00000F00L
4271#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
4272//GDS_EDC_CNT
4273#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
4274#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED__SHIFT 0x2
4275#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
4276#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
4277#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
4278#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED_MASK 0x0000000CL
4279#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
4280#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
4281//GDS_EDC_GRBM_CNT
4282#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
4283#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
4284#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
4285#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
4286#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
4287#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
4288//GDS_EDC_OA_DED
4289#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
4290#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
4291#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
4292#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
4293#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
4294#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
4295#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
4296#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
4297#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
4298#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
4299#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
4300#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
4301#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc
4302#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
4303#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
4304#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
4305#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
4306#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
4307#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
4308#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
4309#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
4310#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
4311#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
4312#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
4313#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
4314#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L
4315//GDS_DSM_CNTL
4316#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0__SHIFT 0x0
4317#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1__SHIFT 0x1
4318#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
4319#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0__SHIFT 0x3
4320#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1__SHIFT 0x4
4321#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE__SHIFT 0x5
4322#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0__SHIFT 0x6
4323#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1__SHIFT 0x7
4324#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x8
4325#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0__SHIFT 0x9
4326#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1__SHIFT 0xa
4327#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
4328#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0__SHIFT 0xc
4329#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1__SHIFT 0xd
4330#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
4331#define GDS_DSM_CNTL__UNUSED__SHIFT 0xf
4332#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0_MASK 0x00000001L
4333#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1_MASK 0x00000002L
4334#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
4335#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0_MASK 0x00000008L
4336#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1_MASK 0x00000010L
4337#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE_MASK 0x00000020L
4338#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0_MASK 0x00000040L
4339#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1_MASK 0x00000080L
4340#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
4341#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0_MASK 0x00000200L
4342#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1_MASK 0x00000400L
4343#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
4344#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0_MASK 0x00001000L
4345#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1_MASK 0x00002000L
4346#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
4347#define GDS_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
4348//GDS_EDC_OA_PHY_CNT
4349#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
4350#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
4351#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
4352#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
4353#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED__SHIFT 0x8
4354#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xa
4355#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
4356#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
4357#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
4358#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
4359#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED_MASK 0x00000300L
4360#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFFC00L
4361//GDS_EDC_OA_PIPE_CNT
4362#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
4363#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
4364#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
4365#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
4366#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
4367#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
4368#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
4369#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
4370#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
4371#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
4372#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
4373#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
4374#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
4375#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
4376#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
4377#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
4378#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
4379#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
4380//GDS_DSM_CNTL2
4381#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
4382#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
4383#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT__SHIFT 0x3
4384#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY__SHIFT 0x5
4385#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
4386#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
4387#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
4388#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
4389#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
4390#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY__SHIFT 0xe
4391#define GDS_DSM_CNTL2__UNUSED__SHIFT 0xf
4392#define GDS_DSM_CNTL2__GDS_INJECT_DELAY__SHIFT 0x1a
4393#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
4394#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
4395#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT_MASK 0x00000018L
4396#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY_MASK 0x00000020L
4397#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
4398#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
4399#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
4400#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
4401#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
4402#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
4403#define GDS_DSM_CNTL2__UNUSED_MASK 0x03FF8000L
4404#define GDS_DSM_CNTL2__GDS_INJECT_DELAY_MASK 0xFC000000L
4405//GDS_WD_GDS_CSB
4406#define GDS_WD_GDS_CSB__COUNTER__SHIFT 0x0
4407#define GDS_WD_GDS_CSB__UNUSED__SHIFT 0xd
4408#define GDS_WD_GDS_CSB__COUNTER_MASK 0x00001FFFL
4409#define GDS_WD_GDS_CSB__UNUSED_MASK 0xFFFFE000L
4410
4411
4412// addressBlock: gc_rbdec
4413//DB_DEBUG
4414#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
4415#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
4416#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
4417#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
4418#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
4419#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
4420#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
4421#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
4422#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
4423#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
4424#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
4425#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
4426#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
4427#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
4428#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
4429#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
4430#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
4431#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
4432#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
4433#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
4434#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
4435#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
4436#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
4437#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
4438#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
4439#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
4440#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
4441#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
4442#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
4443#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
4444#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
4445#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
4446#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000C00L
4447#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
4448#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
4449#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
4450#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
4451#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
4452#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
4453#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
4454#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
4455#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
4456#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
4457#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0F000000L
4458#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
4459#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
4460#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
4461#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
4462//DB_DEBUG2
4463#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
4464#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
4465#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
4466#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
4467#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
4468#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL__SHIFT 0x5
4469#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ__SHIFT 0x6
4470#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL__SHIFT 0x7
4471#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE__SHIFT 0x8
4472#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
4473#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER__SHIFT 0xe
4474#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING__SHIFT 0xf
4475#define DB_DEBUG2__RESERVED__SHIFT 0x10
4476#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
4477#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
4478#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
4479#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
4480#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
4481#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
4482#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
4483#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
4484#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
4485#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
4486#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
4487#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
4488#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_MASK 0x00000020L
4489#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ_MASK 0x00000040L
4490#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL_MASK 0x00000080L
4491#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE_MASK 0x00000100L
4492#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003E00L
4493#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER_MASK 0x00004000L
4494#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING_MASK 0x00008000L
4495#define DB_DEBUG2__RESERVED_MASK 0x00010000L
4496#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
4497#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
4498#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
4499#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
4500#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
4501#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000L
4502#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
4503//DB_DEBUG3
4504#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION__SHIFT 0x0
4505#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION__SHIFT 0x1
4506#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
4507#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
4508#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
4509#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
4510#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
4511#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS__SHIFT 0x7
4512#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
4513#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT__SHIFT 0x9
4514#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
4515#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
4516#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING__SHIFT 0xc
4517#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
4518#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
4519#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
4520#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION__SHIFT 0x10
4521#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
4522#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING__SHIFT 0x12
4523#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
4524#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
4525#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
4526#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
4527#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
4528#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
4529#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
4530#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
4531#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
4532#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
4533#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
4534#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE__SHIFT 0x1e
4535#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK__SHIFT 0x1f
4536#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION_MASK 0x00000001L
4537#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION_MASK 0x00000002L
4538#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
4539#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
4540#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
4541#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
4542#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
4543#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS_MASK 0x00000080L
4544#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
4545#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT_MASK 0x00000200L
4546#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000400L
4547#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00000800L
4548#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING_MASK 0x00001000L
4549#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00002000L
4550#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00004000L
4551#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00008000L
4552#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION_MASK 0x00010000L
4553#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00020000L
4554#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING_MASK 0x00040000L
4555#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00080000L
4556#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00100000L
4557#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00200000L
4558#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00400000L
4559#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
4560#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
4561#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
4562#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
4563#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
4564#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
4565#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
4566#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE_MASK 0x40000000L
4567#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK_MASK 0x80000000L
4568//DB_DEBUG4
4569#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
4570#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
4571#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
4572#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
4573#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF__SHIFT 0x4
4574#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0x5
4575#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x6
4576#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN__SHIFT 0x7
4577#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS__SHIFT 0x8
4578#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR__SHIFT 0x9
4579#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR__SHIFT 0xa
4580#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR__SHIFT 0xb
4581#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK__SHIFT 0xc
4582#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP__SHIFT 0xd
4583#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION__SHIFT 0xe
4584#define DB_DEBUG4__DISABLE_TS_WRITE_L0__SHIFT 0xf
4585#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE__SHIFT 0x10
4586#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT__SHIFT 0x11
4587#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT__SHIFT 0x12
4588#define DB_DEBUG4__DB_EXTRA_DEBUG4__SHIFT 0x13
4589#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
4590#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
4591#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
4592#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000008L
4593#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF_MASK 0x00000010L
4594#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x00000020L
4595#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x00000040L
4596#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN_MASK 0x00000080L
4597#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS_MASK 0x00000100L
4598#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR_MASK 0x00000200L
4599#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR_MASK 0x00000400L
4600#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR_MASK 0x00000800L
4601#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK_MASK 0x00001000L
4602#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP_MASK 0x00002000L
4603#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION_MASK 0x00004000L
4604#define DB_DEBUG4__DISABLE_TS_WRITE_L0_MASK 0x00008000L
4605#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE_MASK 0x00010000L
4606#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT_MASK 0x00020000L
4607#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT_MASK 0x00040000L
4608#define DB_DEBUG4__DB_EXTRA_DEBUG4_MASK 0xFFF80000L
4609//DB_CREDIT_LIMIT
4610#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
4611#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
4612#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
4613#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS__SHIFT 0x18
4614#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001FL
4615#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003E0L
4616#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001C00L
4617#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS_MASK 0x7F000000L
4618//DB_WATERMARKS
4619#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
4620#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x5
4621#define DB_WATERMARKS__FORCE_SUMMARIZE__SHIFT 0xb
4622#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0xf
4623#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x14
4624#define DB_WATERMARKS__AUTO_FLUSH_HTILE__SHIFT 0x1e
4625#define DB_WATERMARKS__AUTO_FLUSH_QUAD__SHIFT 0x1f
4626#define DB_WATERMARKS__DEPTH_FREE_MASK 0x0000001FL
4627#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x000007E0L
4628#define DB_WATERMARKS__FORCE_SUMMARIZE_MASK 0x00007800L
4629#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x000F8000L
4630#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0x0FF00000L
4631#define DB_WATERMARKS__AUTO_FLUSH_HTILE_MASK 0x40000000L
4632#define DB_WATERMARKS__AUTO_FLUSH_QUAD_MASK 0x80000000L
4633//DB_SUBTILE_CONTROL
4634#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
4635#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
4636#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
4637#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
4638#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
4639#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
4640#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
4641#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
4642#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
4643#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
4644#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
4645#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000CL
4646#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
4647#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000C0L
4648#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
4649#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000C00L
4650#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
4651#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000C000L
4652#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
4653#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000C0000L
4654//DB_FREE_CACHELINES
4655#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
4656#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x7
4657#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0xe
4658#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x14
4659#define DB_FREE_CACHELINES__QUAD_READ_REQS__SHIFT 0x18
4660#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x0000007FL
4661#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x00003F80L
4662#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x000FC000L
4663#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0x00F00000L
4664#define DB_FREE_CACHELINES__QUAD_READ_REQS_MASK 0xFF000000L
4665//DB_FIFO_DEPTH1
4666#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS__SHIFT 0x0
4667#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS__SHIFT 0x5
4668#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0xa
4669#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x10
4670#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x15
4671#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS_MASK 0x0000001FL
4672#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS_MASK 0x000003E0L
4673#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x0000FC00L
4674#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0x001F0000L
4675#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH_MASK 0x1FE00000L
4676//DB_FIFO_DEPTH2
4677#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
4678#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
4679#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0xf
4680#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
4681#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000FFL
4682#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x00007F00L
4683#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01FF8000L
4684#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xFE000000L
4685//DB_EXCEPTION_CONTROL
4686#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE__SHIFT 0x0
4687#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE__SHIFT 0x1
4688#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE__SHIFT 0x2
4689#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
4690#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
4691#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE_MASK 0x00000004L
4692//DB_RING_CONTROL
4693#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
4694#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x00000003L
4695//DB_MEM_ARB_WATERMARKS
4696#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK__SHIFT 0x0
4697#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK__SHIFT 0x8
4698#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK__SHIFT 0x10
4699#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK__SHIFT 0x18
4700#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK_MASK 0x00000007L
4701#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK_MASK 0x00000700L
4702#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK_MASK 0x00070000L
4703#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK_MASK 0x07000000L
4704//DB_RMI_CACHE_POLICY
4705#define DB_RMI_CACHE_POLICY__Z_RD__SHIFT 0x0
4706#define DB_RMI_CACHE_POLICY__S_RD__SHIFT 0x1
4707#define DB_RMI_CACHE_POLICY__HTILE_RD__SHIFT 0x2
4708#define DB_RMI_CACHE_POLICY__Z_WR__SHIFT 0x8
4709#define DB_RMI_CACHE_POLICY__S_WR__SHIFT 0x9
4710#define DB_RMI_CACHE_POLICY__HTILE_WR__SHIFT 0xa
4711#define DB_RMI_CACHE_POLICY__ZPCPSD_WR__SHIFT 0xb
4712#define DB_RMI_CACHE_POLICY__CC_RD__SHIFT 0x10
4713#define DB_RMI_CACHE_POLICY__FMASK_RD__SHIFT 0x11
4714#define DB_RMI_CACHE_POLICY__CMASK_RD__SHIFT 0x12
4715#define DB_RMI_CACHE_POLICY__DCC_RD__SHIFT 0x13
4716#define DB_RMI_CACHE_POLICY__CC_WR__SHIFT 0x18
4717#define DB_RMI_CACHE_POLICY__FMASK_WR__SHIFT 0x19
4718#define DB_RMI_CACHE_POLICY__CMASK_WR__SHIFT 0x1a
4719#define DB_RMI_CACHE_POLICY__DCC_WR__SHIFT 0x1b
4720#define DB_RMI_CACHE_POLICY__Z_RD_MASK 0x00000001L
4721#define DB_RMI_CACHE_POLICY__S_RD_MASK 0x00000002L
4722#define DB_RMI_CACHE_POLICY__HTILE_RD_MASK 0x00000004L
4723#define DB_RMI_CACHE_POLICY__Z_WR_MASK 0x00000100L
4724#define DB_RMI_CACHE_POLICY__S_WR_MASK 0x00000200L
4725#define DB_RMI_CACHE_POLICY__HTILE_WR_MASK 0x00000400L
4726#define DB_RMI_CACHE_POLICY__ZPCPSD_WR_MASK 0x00000800L
4727#define DB_RMI_CACHE_POLICY__CC_RD_MASK 0x00010000L
4728#define DB_RMI_CACHE_POLICY__FMASK_RD_MASK 0x00020000L
4729#define DB_RMI_CACHE_POLICY__CMASK_RD_MASK 0x00040000L
4730#define DB_RMI_CACHE_POLICY__DCC_RD_MASK 0x00080000L
4731#define DB_RMI_CACHE_POLICY__CC_WR_MASK 0x01000000L
4732#define DB_RMI_CACHE_POLICY__FMASK_WR_MASK 0x02000000L
4733#define DB_RMI_CACHE_POLICY__CMASK_WR_MASK 0x04000000L
4734#define DB_RMI_CACHE_POLICY__DCC_WR_MASK 0x08000000L
4735//DB_DFSM_CONFIG
4736#define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0
4737#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT__SHIFT 0x1
4738#define DB_DFSM_CONFIG__DISABLE_POPS__SHIFT 0x2
4739#define DB_DFSM_CONFIG__FORCE_FLUSH__SHIFT 0x3
4740#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH__SHIFT 0x8
4741#define DB_DFSM_CONFIG__BYPASS_DFSM_MASK 0x00000001L
4742#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT_MASK 0x00000002L
4743#define DB_DFSM_CONFIG__DISABLE_POPS_MASK 0x00000004L
4744#define DB_DFSM_CONFIG__FORCE_FLUSH_MASK 0x00000008L
4745#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH_MASK 0x00007F00L
4746//DB_DFSM_WATERMARK
4747#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK__SHIFT 0x0
4748#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK__SHIFT 0x10
4749#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK_MASK 0x0000FFFFL
4750#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK_MASK 0xFFFF0000L
4751//DB_DFSM_TILES_IN_FLIGHT
4752#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
4753#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
4754#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
4755#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
4756//DB_DFSM_PRIMS_IN_FLIGHT
4757#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
4758#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
4759#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
4760#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
4761//DB_DFSM_WATCHDOG
4762#define DB_DFSM_WATCHDOG__TIMER_TARGET__SHIFT 0x0
4763#define DB_DFSM_WATCHDOG__TIMER_TARGET_MASK 0xFFFFFFFFL
4764//DB_DFSM_FLUSH_ENABLE
4765#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS__SHIFT 0x0
4766#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU__SHIFT 0x18
4767#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS__SHIFT 0x1c
4768#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS_MASK 0x000003FFL
4769#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU_MASK 0x0F000000L
4770#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS_MASK 0xF0000000L
4771//DB_DFSM_FLUSH_AUX_EVENT
4772#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A__SHIFT 0x0
4773#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B__SHIFT 0x8
4774#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C__SHIFT 0x10
4775#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D__SHIFT 0x18
4776#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A_MASK 0x000000FFL
4777#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B_MASK 0x0000FF00L
4778#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C_MASK 0x00FF0000L
4779#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D_MASK 0xFF000000L
4780//CC_RB_REDUNDANCY
4781#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
4782#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
4783#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
4784#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
4785#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
4786#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
4787#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
4788#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
4789//CC_RB_BACKEND_DISABLE
4790#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
4791#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
4792//GB_ADDR_CONFIG
4793#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
4794#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
4795#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
4796#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
4797#define GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
4798#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
4799#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
4800#define GB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x15
4801#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x18
4802#define GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
4803#define GB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x1c
4804#define GB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x1e
4805#define GB_ADDR_CONFIG__SE_ENABLE__SHIFT 0x1f
4806#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
4807#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
4808#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
4809#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
4810#define GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
4811#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
4812#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
4813#define GB_ADDR_CONFIG__NUM_GPUS_MASK 0x00E00000L
4814#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
4815#define GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
4816#define GB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
4817#define GB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
4818#define GB_ADDR_CONFIG__SE_ENABLE_MASK 0x80000000L
4819//GB_BACKEND_MAP
4820#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
4821#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xFFFFFFFFL
4822//GB_GPU_ID
4823#define GB_GPU_ID__GPU_ID__SHIFT 0x0
4824#define GB_GPU_ID__GPU_ID_MASK 0x0000000FL
4825//CC_RB_DAISY_CHAIN
4826#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
4827#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
4828#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
4829#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
4830#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
4831#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
4832#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
4833#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
4834#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000FL
4835#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000F0L
4836#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000F00L
4837#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000F000L
4838#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000F0000L
4839#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00F00000L
4840#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0F000000L
4841#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xF0000000L
4842//GB_ADDR_CONFIG_READ
4843#define GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
4844#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
4845#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
4846#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
4847#define GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
4848#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
4849#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
4850#define GB_ADDR_CONFIG_READ__NUM_GPUS__SHIFT 0x15
4851#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE__SHIFT 0x18
4852#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
4853#define GB_ADDR_CONFIG_READ__ROW_SIZE__SHIFT 0x1c
4854#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES__SHIFT 0x1e
4855#define GB_ADDR_CONFIG_READ__SE_ENABLE__SHIFT 0x1f
4856#define GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
4857#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
4858#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
4859#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
4860#define GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
4861#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
4862#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
4863#define GB_ADDR_CONFIG_READ__NUM_GPUS_MASK 0x00E00000L
4864#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
4865#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
4866#define GB_ADDR_CONFIG_READ__ROW_SIZE_MASK 0x30000000L
4867#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES_MASK 0x40000000L
4868#define GB_ADDR_CONFIG_READ__SE_ENABLE_MASK 0x80000000L
4869//GB_TILE_MODE0
4870#define GB_TILE_MODE0__ARRAY_MODE__SHIFT 0x2
4871#define GB_TILE_MODE0__PIPE_CONFIG__SHIFT 0x6
4872#define GB_TILE_MODE0__TILE_SPLIT__SHIFT 0xb
4873#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT 0x16
4874#define GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT 0x19
4875#define GB_TILE_MODE0__ARRAY_MODE_MASK 0x0000003CL
4876#define GB_TILE_MODE0__PIPE_CONFIG_MASK 0x000007C0L
4877#define GB_TILE_MODE0__TILE_SPLIT_MASK 0x00003800L
4878#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4879#define GB_TILE_MODE0__SAMPLE_SPLIT_MASK 0x06000000L
4880//GB_TILE_MODE1
4881#define GB_TILE_MODE1__ARRAY_MODE__SHIFT 0x2
4882#define GB_TILE_MODE1__PIPE_CONFIG__SHIFT 0x6
4883#define GB_TILE_MODE1__TILE_SPLIT__SHIFT 0xb
4884#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW__SHIFT 0x16
4885#define GB_TILE_MODE1__SAMPLE_SPLIT__SHIFT 0x19
4886#define GB_TILE_MODE1__ARRAY_MODE_MASK 0x0000003CL
4887#define GB_TILE_MODE1__PIPE_CONFIG_MASK 0x000007C0L
4888#define GB_TILE_MODE1__TILE_SPLIT_MASK 0x00003800L
4889#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4890#define GB_TILE_MODE1__SAMPLE_SPLIT_MASK 0x06000000L
4891//GB_TILE_MODE2
4892#define GB_TILE_MODE2__ARRAY_MODE__SHIFT 0x2
4893#define GB_TILE_MODE2__PIPE_CONFIG__SHIFT 0x6
4894#define GB_TILE_MODE2__TILE_SPLIT__SHIFT 0xb
4895#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW__SHIFT 0x16
4896#define GB_TILE_MODE2__SAMPLE_SPLIT__SHIFT 0x19
4897#define GB_TILE_MODE2__ARRAY_MODE_MASK 0x0000003CL
4898#define GB_TILE_MODE2__PIPE_CONFIG_MASK 0x000007C0L
4899#define GB_TILE_MODE2__TILE_SPLIT_MASK 0x00003800L
4900#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4901#define GB_TILE_MODE2__SAMPLE_SPLIT_MASK 0x06000000L
4902//GB_TILE_MODE3
4903#define GB_TILE_MODE3__ARRAY_MODE__SHIFT 0x2
4904#define GB_TILE_MODE3__PIPE_CONFIG__SHIFT 0x6
4905#define GB_TILE_MODE3__TILE_SPLIT__SHIFT 0xb
4906#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW__SHIFT 0x16
4907#define GB_TILE_MODE3__SAMPLE_SPLIT__SHIFT 0x19
4908#define GB_TILE_MODE3__ARRAY_MODE_MASK 0x0000003CL
4909#define GB_TILE_MODE3__PIPE_CONFIG_MASK 0x000007C0L
4910#define GB_TILE_MODE3__TILE_SPLIT_MASK 0x00003800L
4911#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4912#define GB_TILE_MODE3__SAMPLE_SPLIT_MASK 0x06000000L
4913//GB_TILE_MODE4
4914#define GB_TILE_MODE4__ARRAY_MODE__SHIFT 0x2
4915#define GB_TILE_MODE4__PIPE_CONFIG__SHIFT 0x6
4916#define GB_TILE_MODE4__TILE_SPLIT__SHIFT 0xb
4917#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW__SHIFT 0x16
4918#define GB_TILE_MODE4__SAMPLE_SPLIT__SHIFT 0x19
4919#define GB_TILE_MODE4__ARRAY_MODE_MASK 0x0000003CL
4920#define GB_TILE_MODE4__PIPE_CONFIG_MASK 0x000007C0L
4921#define GB_TILE_MODE4__TILE_SPLIT_MASK 0x00003800L
4922#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4923#define GB_TILE_MODE4__SAMPLE_SPLIT_MASK 0x06000000L
4924//GB_TILE_MODE5
4925#define GB_TILE_MODE5__ARRAY_MODE__SHIFT 0x2
4926#define GB_TILE_MODE5__PIPE_CONFIG__SHIFT 0x6
4927#define GB_TILE_MODE5__TILE_SPLIT__SHIFT 0xb
4928#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW__SHIFT 0x16
4929#define GB_TILE_MODE5__SAMPLE_SPLIT__SHIFT 0x19
4930#define GB_TILE_MODE5__ARRAY_MODE_MASK 0x0000003CL
4931#define GB_TILE_MODE5__PIPE_CONFIG_MASK 0x000007C0L
4932#define GB_TILE_MODE5__TILE_SPLIT_MASK 0x00003800L
4933#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4934#define GB_TILE_MODE5__SAMPLE_SPLIT_MASK 0x06000000L
4935//GB_TILE_MODE6
4936#define GB_TILE_MODE6__ARRAY_MODE__SHIFT 0x2
4937#define GB_TILE_MODE6__PIPE_CONFIG__SHIFT 0x6
4938#define GB_TILE_MODE6__TILE_SPLIT__SHIFT 0xb
4939#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW__SHIFT 0x16
4940#define GB_TILE_MODE6__SAMPLE_SPLIT__SHIFT 0x19
4941#define GB_TILE_MODE6__ARRAY_MODE_MASK 0x0000003CL
4942#define GB_TILE_MODE6__PIPE_CONFIG_MASK 0x000007C0L
4943#define GB_TILE_MODE6__TILE_SPLIT_MASK 0x00003800L
4944#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4945#define GB_TILE_MODE6__SAMPLE_SPLIT_MASK 0x06000000L
4946//GB_TILE_MODE7
4947#define GB_TILE_MODE7__ARRAY_MODE__SHIFT 0x2
4948#define GB_TILE_MODE7__PIPE_CONFIG__SHIFT 0x6
4949#define GB_TILE_MODE7__TILE_SPLIT__SHIFT 0xb
4950#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW__SHIFT 0x16
4951#define GB_TILE_MODE7__SAMPLE_SPLIT__SHIFT 0x19
4952#define GB_TILE_MODE7__ARRAY_MODE_MASK 0x0000003CL
4953#define GB_TILE_MODE7__PIPE_CONFIG_MASK 0x000007C0L
4954#define GB_TILE_MODE7__TILE_SPLIT_MASK 0x00003800L
4955#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4956#define GB_TILE_MODE7__SAMPLE_SPLIT_MASK 0x06000000L
4957//GB_TILE_MODE8
4958#define GB_TILE_MODE8__ARRAY_MODE__SHIFT 0x2
4959#define GB_TILE_MODE8__PIPE_CONFIG__SHIFT 0x6
4960#define GB_TILE_MODE8__TILE_SPLIT__SHIFT 0xb
4961#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW__SHIFT 0x16
4962#define GB_TILE_MODE8__SAMPLE_SPLIT__SHIFT 0x19
4963#define GB_TILE_MODE8__ARRAY_MODE_MASK 0x0000003CL
4964#define GB_TILE_MODE8__PIPE_CONFIG_MASK 0x000007C0L
4965#define GB_TILE_MODE8__TILE_SPLIT_MASK 0x00003800L
4966#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4967#define GB_TILE_MODE8__SAMPLE_SPLIT_MASK 0x06000000L
4968//GB_TILE_MODE9
4969#define GB_TILE_MODE9__ARRAY_MODE__SHIFT 0x2
4970#define GB_TILE_MODE9__PIPE_CONFIG__SHIFT 0x6
4971#define GB_TILE_MODE9__TILE_SPLIT__SHIFT 0xb
4972#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW__SHIFT 0x16
4973#define GB_TILE_MODE9__SAMPLE_SPLIT__SHIFT 0x19
4974#define GB_TILE_MODE9__ARRAY_MODE_MASK 0x0000003CL
4975#define GB_TILE_MODE9__PIPE_CONFIG_MASK 0x000007C0L
4976#define GB_TILE_MODE9__TILE_SPLIT_MASK 0x00003800L
4977#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4978#define GB_TILE_MODE9__SAMPLE_SPLIT_MASK 0x06000000L
4979//GB_TILE_MODE10
4980#define GB_TILE_MODE10__ARRAY_MODE__SHIFT 0x2
4981#define GB_TILE_MODE10__PIPE_CONFIG__SHIFT 0x6
4982#define GB_TILE_MODE10__TILE_SPLIT__SHIFT 0xb
4983#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW__SHIFT 0x16
4984#define GB_TILE_MODE10__SAMPLE_SPLIT__SHIFT 0x19
4985#define GB_TILE_MODE10__ARRAY_MODE_MASK 0x0000003CL
4986#define GB_TILE_MODE10__PIPE_CONFIG_MASK 0x000007C0L
4987#define GB_TILE_MODE10__TILE_SPLIT_MASK 0x00003800L
4988#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
4989#define GB_TILE_MODE10__SAMPLE_SPLIT_MASK 0x06000000L
4990//GB_TILE_MODE11
4991#define GB_TILE_MODE11__ARRAY_MODE__SHIFT 0x2
4992#define GB_TILE_MODE11__PIPE_CONFIG__SHIFT 0x6
4993#define GB_TILE_MODE11__TILE_SPLIT__SHIFT 0xb
4994#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW__SHIFT 0x16
4995#define GB_TILE_MODE11__SAMPLE_SPLIT__SHIFT 0x19
4996#define GB_TILE_MODE11__ARRAY_MODE_MASK 0x0000003CL
4997#define GB_TILE_MODE11__PIPE_CONFIG_MASK 0x000007C0L
4998#define GB_TILE_MODE11__TILE_SPLIT_MASK 0x00003800L
4999#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5000#define GB_TILE_MODE11__SAMPLE_SPLIT_MASK 0x06000000L
5001//GB_TILE_MODE12
5002#define GB_TILE_MODE12__ARRAY_MODE__SHIFT 0x2
5003#define GB_TILE_MODE12__PIPE_CONFIG__SHIFT 0x6
5004#define GB_TILE_MODE12__TILE_SPLIT__SHIFT 0xb
5005#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW__SHIFT 0x16
5006#define GB_TILE_MODE12__SAMPLE_SPLIT__SHIFT 0x19
5007#define GB_TILE_MODE12__ARRAY_MODE_MASK 0x0000003CL
5008#define GB_TILE_MODE12__PIPE_CONFIG_MASK 0x000007C0L
5009#define GB_TILE_MODE12__TILE_SPLIT_MASK 0x00003800L
5010#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5011#define GB_TILE_MODE12__SAMPLE_SPLIT_MASK 0x06000000L
5012//GB_TILE_MODE13
5013#define GB_TILE_MODE13__ARRAY_MODE__SHIFT 0x2
5014#define GB_TILE_MODE13__PIPE_CONFIG__SHIFT 0x6
5015#define GB_TILE_MODE13__TILE_SPLIT__SHIFT 0xb
5016#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW__SHIFT 0x16
5017#define GB_TILE_MODE13__SAMPLE_SPLIT__SHIFT 0x19
5018#define GB_TILE_MODE13__ARRAY_MODE_MASK 0x0000003CL
5019#define GB_TILE_MODE13__PIPE_CONFIG_MASK 0x000007C0L
5020#define GB_TILE_MODE13__TILE_SPLIT_MASK 0x00003800L
5021#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5022#define GB_TILE_MODE13__SAMPLE_SPLIT_MASK 0x06000000L
5023//GB_TILE_MODE14
5024#define GB_TILE_MODE14__ARRAY_MODE__SHIFT 0x2
5025#define GB_TILE_MODE14__PIPE_CONFIG__SHIFT 0x6
5026#define GB_TILE_MODE14__TILE_SPLIT__SHIFT 0xb
5027#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW__SHIFT 0x16
5028#define GB_TILE_MODE14__SAMPLE_SPLIT__SHIFT 0x19
5029#define GB_TILE_MODE14__ARRAY_MODE_MASK 0x0000003CL
5030#define GB_TILE_MODE14__PIPE_CONFIG_MASK 0x000007C0L
5031#define GB_TILE_MODE14__TILE_SPLIT_MASK 0x00003800L
5032#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5033#define GB_TILE_MODE14__SAMPLE_SPLIT_MASK 0x06000000L
5034//GB_TILE_MODE15
5035#define GB_TILE_MODE15__ARRAY_MODE__SHIFT 0x2
5036#define GB_TILE_MODE15__PIPE_CONFIG__SHIFT 0x6
5037#define GB_TILE_MODE15__TILE_SPLIT__SHIFT 0xb
5038#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW__SHIFT 0x16
5039#define GB_TILE_MODE15__SAMPLE_SPLIT__SHIFT 0x19
5040#define GB_TILE_MODE15__ARRAY_MODE_MASK 0x0000003CL
5041#define GB_TILE_MODE15__PIPE_CONFIG_MASK 0x000007C0L
5042#define GB_TILE_MODE15__TILE_SPLIT_MASK 0x00003800L
5043#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5044#define GB_TILE_MODE15__SAMPLE_SPLIT_MASK 0x06000000L
5045//GB_TILE_MODE16
5046#define GB_TILE_MODE16__ARRAY_MODE__SHIFT 0x2
5047#define GB_TILE_MODE16__PIPE_CONFIG__SHIFT 0x6
5048#define GB_TILE_MODE16__TILE_SPLIT__SHIFT 0xb
5049#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW__SHIFT 0x16
5050#define GB_TILE_MODE16__SAMPLE_SPLIT__SHIFT 0x19
5051#define GB_TILE_MODE16__ARRAY_MODE_MASK 0x0000003CL
5052#define GB_TILE_MODE16__PIPE_CONFIG_MASK 0x000007C0L
5053#define GB_TILE_MODE16__TILE_SPLIT_MASK 0x00003800L
5054#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5055#define GB_TILE_MODE16__SAMPLE_SPLIT_MASK 0x06000000L
5056//GB_TILE_MODE17
5057#define GB_TILE_MODE17__ARRAY_MODE__SHIFT 0x2
5058#define GB_TILE_MODE17__PIPE_CONFIG__SHIFT 0x6
5059#define GB_TILE_MODE17__TILE_SPLIT__SHIFT 0xb
5060#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW__SHIFT 0x16
5061#define GB_TILE_MODE17__SAMPLE_SPLIT__SHIFT 0x19
5062#define GB_TILE_MODE17__ARRAY_MODE_MASK 0x0000003CL
5063#define GB_TILE_MODE17__PIPE_CONFIG_MASK 0x000007C0L
5064#define GB_TILE_MODE17__TILE_SPLIT_MASK 0x00003800L
5065#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5066#define GB_TILE_MODE17__SAMPLE_SPLIT_MASK 0x06000000L
5067//GB_TILE_MODE18
5068#define GB_TILE_MODE18__ARRAY_MODE__SHIFT 0x2
5069#define GB_TILE_MODE18__PIPE_CONFIG__SHIFT 0x6
5070#define GB_TILE_MODE18__TILE_SPLIT__SHIFT 0xb
5071#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW__SHIFT 0x16
5072#define GB_TILE_MODE18__SAMPLE_SPLIT__SHIFT 0x19
5073#define GB_TILE_MODE18__ARRAY_MODE_MASK 0x0000003CL
5074#define GB_TILE_MODE18__PIPE_CONFIG_MASK 0x000007C0L
5075#define GB_TILE_MODE18__TILE_SPLIT_MASK 0x00003800L
5076#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5077#define GB_TILE_MODE18__SAMPLE_SPLIT_MASK 0x06000000L
5078//GB_TILE_MODE19
5079#define GB_TILE_MODE19__ARRAY_MODE__SHIFT 0x2
5080#define GB_TILE_MODE19__PIPE_CONFIG__SHIFT 0x6
5081#define GB_TILE_MODE19__TILE_SPLIT__SHIFT 0xb
5082#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW__SHIFT 0x16
5083#define GB_TILE_MODE19__SAMPLE_SPLIT__SHIFT 0x19
5084#define GB_TILE_MODE19__ARRAY_MODE_MASK 0x0000003CL
5085#define GB_TILE_MODE19__PIPE_CONFIG_MASK 0x000007C0L
5086#define GB_TILE_MODE19__TILE_SPLIT_MASK 0x00003800L
5087#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5088#define GB_TILE_MODE19__SAMPLE_SPLIT_MASK 0x06000000L
5089//GB_TILE_MODE20
5090#define GB_TILE_MODE20__ARRAY_MODE__SHIFT 0x2
5091#define GB_TILE_MODE20__PIPE_CONFIG__SHIFT 0x6
5092#define GB_TILE_MODE20__TILE_SPLIT__SHIFT 0xb
5093#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW__SHIFT 0x16
5094#define GB_TILE_MODE20__SAMPLE_SPLIT__SHIFT 0x19
5095#define GB_TILE_MODE20__ARRAY_MODE_MASK 0x0000003CL
5096#define GB_TILE_MODE20__PIPE_CONFIG_MASK 0x000007C0L
5097#define GB_TILE_MODE20__TILE_SPLIT_MASK 0x00003800L
5098#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5099#define GB_TILE_MODE20__SAMPLE_SPLIT_MASK 0x06000000L
5100//GB_TILE_MODE21
5101#define GB_TILE_MODE21__ARRAY_MODE__SHIFT 0x2
5102#define GB_TILE_MODE21__PIPE_CONFIG__SHIFT 0x6
5103#define GB_TILE_MODE21__TILE_SPLIT__SHIFT 0xb
5104#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW__SHIFT 0x16
5105#define GB_TILE_MODE21__SAMPLE_SPLIT__SHIFT 0x19
5106#define GB_TILE_MODE21__ARRAY_MODE_MASK 0x0000003CL
5107#define GB_TILE_MODE21__PIPE_CONFIG_MASK 0x000007C0L
5108#define GB_TILE_MODE21__TILE_SPLIT_MASK 0x00003800L
5109#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5110#define GB_TILE_MODE21__SAMPLE_SPLIT_MASK 0x06000000L
5111//GB_TILE_MODE22
5112#define GB_TILE_MODE22__ARRAY_MODE__SHIFT 0x2
5113#define GB_TILE_MODE22__PIPE_CONFIG__SHIFT 0x6
5114#define GB_TILE_MODE22__TILE_SPLIT__SHIFT 0xb
5115#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW__SHIFT 0x16
5116#define GB_TILE_MODE22__SAMPLE_SPLIT__SHIFT 0x19
5117#define GB_TILE_MODE22__ARRAY_MODE_MASK 0x0000003CL
5118#define GB_TILE_MODE22__PIPE_CONFIG_MASK 0x000007C0L
5119#define GB_TILE_MODE22__TILE_SPLIT_MASK 0x00003800L
5120#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5121#define GB_TILE_MODE22__SAMPLE_SPLIT_MASK 0x06000000L
5122//GB_TILE_MODE23
5123#define GB_TILE_MODE23__ARRAY_MODE__SHIFT 0x2
5124#define GB_TILE_MODE23__PIPE_CONFIG__SHIFT 0x6
5125#define GB_TILE_MODE23__TILE_SPLIT__SHIFT 0xb
5126#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW__SHIFT 0x16
5127#define GB_TILE_MODE23__SAMPLE_SPLIT__SHIFT 0x19
5128#define GB_TILE_MODE23__ARRAY_MODE_MASK 0x0000003CL
5129#define GB_TILE_MODE23__PIPE_CONFIG_MASK 0x000007C0L
5130#define GB_TILE_MODE23__TILE_SPLIT_MASK 0x00003800L
5131#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5132#define GB_TILE_MODE23__SAMPLE_SPLIT_MASK 0x06000000L
5133//GB_TILE_MODE24
5134#define GB_TILE_MODE24__ARRAY_MODE__SHIFT 0x2
5135#define GB_TILE_MODE24__PIPE_CONFIG__SHIFT 0x6
5136#define GB_TILE_MODE24__TILE_SPLIT__SHIFT 0xb
5137#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW__SHIFT 0x16
5138#define GB_TILE_MODE24__SAMPLE_SPLIT__SHIFT 0x19
5139#define GB_TILE_MODE24__ARRAY_MODE_MASK 0x0000003CL
5140#define GB_TILE_MODE24__PIPE_CONFIG_MASK 0x000007C0L
5141#define GB_TILE_MODE24__TILE_SPLIT_MASK 0x00003800L
5142#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5143#define GB_TILE_MODE24__SAMPLE_SPLIT_MASK 0x06000000L
5144//GB_TILE_MODE25
5145#define GB_TILE_MODE25__ARRAY_MODE__SHIFT 0x2
5146#define GB_TILE_MODE25__PIPE_CONFIG__SHIFT 0x6
5147#define GB_TILE_MODE25__TILE_SPLIT__SHIFT 0xb
5148#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW__SHIFT 0x16
5149#define GB_TILE_MODE25__SAMPLE_SPLIT__SHIFT 0x19
5150#define GB_TILE_MODE25__ARRAY_MODE_MASK 0x0000003CL
5151#define GB_TILE_MODE25__PIPE_CONFIG_MASK 0x000007C0L
5152#define GB_TILE_MODE25__TILE_SPLIT_MASK 0x00003800L
5153#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5154#define GB_TILE_MODE25__SAMPLE_SPLIT_MASK 0x06000000L
5155//GB_TILE_MODE26
5156#define GB_TILE_MODE26__ARRAY_MODE__SHIFT 0x2
5157#define GB_TILE_MODE26__PIPE_CONFIG__SHIFT 0x6
5158#define GB_TILE_MODE26__TILE_SPLIT__SHIFT 0xb
5159#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW__SHIFT 0x16
5160#define GB_TILE_MODE26__SAMPLE_SPLIT__SHIFT 0x19
5161#define GB_TILE_MODE26__ARRAY_MODE_MASK 0x0000003CL
5162#define GB_TILE_MODE26__PIPE_CONFIG_MASK 0x000007C0L
5163#define GB_TILE_MODE26__TILE_SPLIT_MASK 0x00003800L
5164#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5165#define GB_TILE_MODE26__SAMPLE_SPLIT_MASK 0x06000000L
5166//GB_TILE_MODE27
5167#define GB_TILE_MODE27__ARRAY_MODE__SHIFT 0x2
5168#define GB_TILE_MODE27__PIPE_CONFIG__SHIFT 0x6
5169#define GB_TILE_MODE27__TILE_SPLIT__SHIFT 0xb
5170#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW__SHIFT 0x16
5171#define GB_TILE_MODE27__SAMPLE_SPLIT__SHIFT 0x19
5172#define GB_TILE_MODE27__ARRAY_MODE_MASK 0x0000003CL
5173#define GB_TILE_MODE27__PIPE_CONFIG_MASK 0x000007C0L
5174#define GB_TILE_MODE27__TILE_SPLIT_MASK 0x00003800L
5175#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5176#define GB_TILE_MODE27__SAMPLE_SPLIT_MASK 0x06000000L
5177//GB_TILE_MODE28
5178#define GB_TILE_MODE28__ARRAY_MODE__SHIFT 0x2
5179#define GB_TILE_MODE28__PIPE_CONFIG__SHIFT 0x6
5180#define GB_TILE_MODE28__TILE_SPLIT__SHIFT 0xb
5181#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW__SHIFT 0x16
5182#define GB_TILE_MODE28__SAMPLE_SPLIT__SHIFT 0x19
5183#define GB_TILE_MODE28__ARRAY_MODE_MASK 0x0000003CL
5184#define GB_TILE_MODE28__PIPE_CONFIG_MASK 0x000007C0L
5185#define GB_TILE_MODE28__TILE_SPLIT_MASK 0x00003800L
5186#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5187#define GB_TILE_MODE28__SAMPLE_SPLIT_MASK 0x06000000L
5188//GB_TILE_MODE29
5189#define GB_TILE_MODE29__ARRAY_MODE__SHIFT 0x2
5190#define GB_TILE_MODE29__PIPE_CONFIG__SHIFT 0x6
5191#define GB_TILE_MODE29__TILE_SPLIT__SHIFT 0xb
5192#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW__SHIFT 0x16
5193#define GB_TILE_MODE29__SAMPLE_SPLIT__SHIFT 0x19
5194#define GB_TILE_MODE29__ARRAY_MODE_MASK 0x0000003CL
5195#define GB_TILE_MODE29__PIPE_CONFIG_MASK 0x000007C0L
5196#define GB_TILE_MODE29__TILE_SPLIT_MASK 0x00003800L
5197#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5198#define GB_TILE_MODE29__SAMPLE_SPLIT_MASK 0x06000000L
5199//GB_TILE_MODE30
5200#define GB_TILE_MODE30__ARRAY_MODE__SHIFT 0x2
5201#define GB_TILE_MODE30__PIPE_CONFIG__SHIFT 0x6
5202#define GB_TILE_MODE30__TILE_SPLIT__SHIFT 0xb
5203#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW__SHIFT 0x16
5204#define GB_TILE_MODE30__SAMPLE_SPLIT__SHIFT 0x19
5205#define GB_TILE_MODE30__ARRAY_MODE_MASK 0x0000003CL
5206#define GB_TILE_MODE30__PIPE_CONFIG_MASK 0x000007C0L
5207#define GB_TILE_MODE30__TILE_SPLIT_MASK 0x00003800L
5208#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5209#define GB_TILE_MODE30__SAMPLE_SPLIT_MASK 0x06000000L
5210//GB_TILE_MODE31
5211#define GB_TILE_MODE31__ARRAY_MODE__SHIFT 0x2
5212#define GB_TILE_MODE31__PIPE_CONFIG__SHIFT 0x6
5213#define GB_TILE_MODE31__TILE_SPLIT__SHIFT 0xb
5214#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW__SHIFT 0x16
5215#define GB_TILE_MODE31__SAMPLE_SPLIT__SHIFT 0x19
5216#define GB_TILE_MODE31__ARRAY_MODE_MASK 0x0000003CL
5217#define GB_TILE_MODE31__PIPE_CONFIG_MASK 0x000007C0L
5218#define GB_TILE_MODE31__TILE_SPLIT_MASK 0x00003800L
5219#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
5220#define GB_TILE_MODE31__SAMPLE_SPLIT_MASK 0x06000000L
5221//GB_MACROTILE_MODE0
5222#define GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT 0x0
5223#define GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT 0x2
5224#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT 0x4
5225#define GB_MACROTILE_MODE0__NUM_BANKS__SHIFT 0x6
5226#define GB_MACROTILE_MODE0__BANK_WIDTH_MASK 0x00000003L
5227#define GB_MACROTILE_MODE0__BANK_HEIGHT_MASK 0x0000000CL
5228#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT_MASK 0x00000030L
5229#define GB_MACROTILE_MODE0__NUM_BANKS_MASK 0x000000C0L
5230//GB_MACROTILE_MODE1
5231#define GB_MACROTILE_MODE1__BANK_WIDTH__SHIFT 0x0
5232#define GB_MACROTILE_MODE1__BANK_HEIGHT__SHIFT 0x2
5233#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT__SHIFT 0x4
5234#define GB_MACROTILE_MODE1__NUM_BANKS__SHIFT 0x6
5235#define GB_MACROTILE_MODE1__BANK_WIDTH_MASK 0x00000003L
5236#define GB_MACROTILE_MODE1__BANK_HEIGHT_MASK 0x0000000CL
5237#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT_MASK 0x00000030L
5238#define GB_MACROTILE_MODE1__NUM_BANKS_MASK 0x000000C0L
5239//GB_MACROTILE_MODE2
5240#define GB_MACROTILE_MODE2__BANK_WIDTH__SHIFT 0x0
5241#define GB_MACROTILE_MODE2__BANK_HEIGHT__SHIFT 0x2
5242#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT__SHIFT 0x4
5243#define GB_MACROTILE_MODE2__NUM_BANKS__SHIFT 0x6
5244#define GB_MACROTILE_MODE2__BANK_WIDTH_MASK 0x00000003L
5245#define GB_MACROTILE_MODE2__BANK_HEIGHT_MASK 0x0000000CL
5246#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT_MASK 0x00000030L
5247#define GB_MACROTILE_MODE2__NUM_BANKS_MASK 0x000000C0L
5248//GB_MACROTILE_MODE3
5249#define GB_MACROTILE_MODE3__BANK_WIDTH__SHIFT 0x0
5250#define GB_MACROTILE_MODE3__BANK_HEIGHT__SHIFT 0x2
5251#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT__SHIFT 0x4
5252#define GB_MACROTILE_MODE3__NUM_BANKS__SHIFT 0x6
5253#define GB_MACROTILE_MODE3__BANK_WIDTH_MASK 0x00000003L
5254#define GB_MACROTILE_MODE3__BANK_HEIGHT_MASK 0x0000000CL
5255#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT_MASK 0x00000030L
5256#define GB_MACROTILE_MODE3__NUM_BANKS_MASK 0x000000C0L
5257//GB_MACROTILE_MODE4
5258#define GB_MACROTILE_MODE4__BANK_WIDTH__SHIFT 0x0
5259#define GB_MACROTILE_MODE4__BANK_HEIGHT__SHIFT 0x2
5260#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT__SHIFT 0x4
5261#define GB_MACROTILE_MODE4__NUM_BANKS__SHIFT 0x6
5262#define GB_MACROTILE_MODE4__BANK_WIDTH_MASK 0x00000003L
5263#define GB_MACROTILE_MODE4__BANK_HEIGHT_MASK 0x0000000CL
5264#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT_MASK 0x00000030L
5265#define GB_MACROTILE_MODE4__NUM_BANKS_MASK 0x000000C0L
5266//GB_MACROTILE_MODE5
5267#define GB_MACROTILE_MODE5__BANK_WIDTH__SHIFT 0x0
5268#define GB_MACROTILE_MODE5__BANK_HEIGHT__SHIFT 0x2
5269#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT__SHIFT 0x4
5270#define GB_MACROTILE_MODE5__NUM_BANKS__SHIFT 0x6
5271#define GB_MACROTILE_MODE5__BANK_WIDTH_MASK 0x00000003L
5272#define GB_MACROTILE_MODE5__BANK_HEIGHT_MASK 0x0000000CL
5273#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT_MASK 0x00000030L
5274#define GB_MACROTILE_MODE5__NUM_BANKS_MASK 0x000000C0L
5275//GB_MACROTILE_MODE6
5276#define GB_MACROTILE_MODE6__BANK_WIDTH__SHIFT 0x0
5277#define GB_MACROTILE_MODE6__BANK_HEIGHT__SHIFT 0x2
5278#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT__SHIFT 0x4
5279#define GB_MACROTILE_MODE6__NUM_BANKS__SHIFT 0x6
5280#define GB_MACROTILE_MODE6__BANK_WIDTH_MASK 0x00000003L
5281#define GB_MACROTILE_MODE6__BANK_HEIGHT_MASK 0x0000000CL
5282#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT_MASK 0x00000030L
5283#define GB_MACROTILE_MODE6__NUM_BANKS_MASK 0x000000C0L
5284//GB_MACROTILE_MODE7
5285#define GB_MACROTILE_MODE7__BANK_WIDTH__SHIFT 0x0
5286#define GB_MACROTILE_MODE7__BANK_HEIGHT__SHIFT 0x2
5287#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT__SHIFT 0x4
5288#define GB_MACROTILE_MODE7__NUM_BANKS__SHIFT 0x6
5289#define GB_MACROTILE_MODE7__BANK_WIDTH_MASK 0x00000003L
5290#define GB_MACROTILE_MODE7__BANK_HEIGHT_MASK 0x0000000CL
5291#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT_MASK 0x00000030L
5292#define GB_MACROTILE_MODE7__NUM_BANKS_MASK 0x000000C0L
5293//GB_MACROTILE_MODE8
5294#define GB_MACROTILE_MODE8__BANK_WIDTH__SHIFT 0x0
5295#define GB_MACROTILE_MODE8__BANK_HEIGHT__SHIFT 0x2
5296#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT__SHIFT 0x4
5297#define GB_MACROTILE_MODE8__NUM_BANKS__SHIFT 0x6
5298#define GB_MACROTILE_MODE8__BANK_WIDTH_MASK 0x00000003L
5299#define GB_MACROTILE_MODE8__BANK_HEIGHT_MASK 0x0000000CL
5300#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT_MASK 0x00000030L
5301#define GB_MACROTILE_MODE8__NUM_BANKS_MASK 0x000000C0L
5302//GB_MACROTILE_MODE9
5303#define GB_MACROTILE_MODE9__BANK_WIDTH__SHIFT 0x0
5304#define GB_MACROTILE_MODE9__BANK_HEIGHT__SHIFT 0x2
5305#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT__SHIFT 0x4
5306#define GB_MACROTILE_MODE9__NUM_BANKS__SHIFT 0x6
5307#define GB_MACROTILE_MODE9__BANK_WIDTH_MASK 0x00000003L
5308#define GB_MACROTILE_MODE9__BANK_HEIGHT_MASK 0x0000000CL
5309#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT_MASK 0x00000030L
5310#define GB_MACROTILE_MODE9__NUM_BANKS_MASK 0x000000C0L
5311//GB_MACROTILE_MODE10
5312#define GB_MACROTILE_MODE10__BANK_WIDTH__SHIFT 0x0
5313#define GB_MACROTILE_MODE10__BANK_HEIGHT__SHIFT 0x2
5314#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT__SHIFT 0x4
5315#define GB_MACROTILE_MODE10__NUM_BANKS__SHIFT 0x6
5316#define GB_MACROTILE_MODE10__BANK_WIDTH_MASK 0x00000003L
5317#define GB_MACROTILE_MODE10__BANK_HEIGHT_MASK 0x0000000CL
5318#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT_MASK 0x00000030L
5319#define GB_MACROTILE_MODE10__NUM_BANKS_MASK 0x000000C0L
5320//GB_MACROTILE_MODE11
5321#define GB_MACROTILE_MODE11__BANK_WIDTH__SHIFT 0x0
5322#define GB_MACROTILE_MODE11__BANK_HEIGHT__SHIFT 0x2
5323#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT__SHIFT 0x4
5324#define GB_MACROTILE_MODE11__NUM_BANKS__SHIFT 0x6
5325#define GB_MACROTILE_MODE11__BANK_WIDTH_MASK 0x00000003L
5326#define GB_MACROTILE_MODE11__BANK_HEIGHT_MASK 0x0000000CL
5327#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT_MASK 0x00000030L
5328#define GB_MACROTILE_MODE11__NUM_BANKS_MASK 0x000000C0L
5329//GB_MACROTILE_MODE12
5330#define GB_MACROTILE_MODE12__BANK_WIDTH__SHIFT 0x0
5331#define GB_MACROTILE_MODE12__BANK_HEIGHT__SHIFT 0x2
5332#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT__SHIFT 0x4
5333#define GB_MACROTILE_MODE12__NUM_BANKS__SHIFT 0x6
5334#define GB_MACROTILE_MODE12__BANK_WIDTH_MASK 0x00000003L
5335#define GB_MACROTILE_MODE12__BANK_HEIGHT_MASK 0x0000000CL
5336#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT_MASK 0x00000030L
5337#define GB_MACROTILE_MODE12__NUM_BANKS_MASK 0x000000C0L
5338//GB_MACROTILE_MODE13
5339#define GB_MACROTILE_MODE13__BANK_WIDTH__SHIFT 0x0
5340#define GB_MACROTILE_MODE13__BANK_HEIGHT__SHIFT 0x2
5341#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT__SHIFT 0x4
5342#define GB_MACROTILE_MODE13__NUM_BANKS__SHIFT 0x6
5343#define GB_MACROTILE_MODE13__BANK_WIDTH_MASK 0x00000003L
5344#define GB_MACROTILE_MODE13__BANK_HEIGHT_MASK 0x0000000CL
5345#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT_MASK 0x00000030L
5346#define GB_MACROTILE_MODE13__NUM_BANKS_MASK 0x000000C0L
5347//GB_MACROTILE_MODE14
5348#define GB_MACROTILE_MODE14__BANK_WIDTH__SHIFT 0x0
5349#define GB_MACROTILE_MODE14__BANK_HEIGHT__SHIFT 0x2
5350#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT__SHIFT 0x4
5351#define GB_MACROTILE_MODE14__NUM_BANKS__SHIFT 0x6
5352#define GB_MACROTILE_MODE14__BANK_WIDTH_MASK 0x00000003L
5353#define GB_MACROTILE_MODE14__BANK_HEIGHT_MASK 0x0000000CL
5354#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT_MASK 0x00000030L
5355#define GB_MACROTILE_MODE14__NUM_BANKS_MASK 0x000000C0L
5356//GB_MACROTILE_MODE15
5357#define GB_MACROTILE_MODE15__BANK_WIDTH__SHIFT 0x0
5358#define GB_MACROTILE_MODE15__BANK_HEIGHT__SHIFT 0x2
5359#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT__SHIFT 0x4
5360#define GB_MACROTILE_MODE15__NUM_BANKS__SHIFT 0x6
5361#define GB_MACROTILE_MODE15__BANK_WIDTH_MASK 0x00000003L
5362#define GB_MACROTILE_MODE15__BANK_HEIGHT_MASK 0x0000000CL
5363#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT_MASK 0x00000030L
5364#define GB_MACROTILE_MODE15__NUM_BANKS_MASK 0x000000C0L
5365//CB_HW_CONTROL
5366#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT__SHIFT 0x0
5367#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT__SHIFT 0x6
5368#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT__SHIFT 0xc
5369#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x10
5370#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING__SHIFT 0x12
5371#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
5372#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE__SHIFT 0x14
5373#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
5374#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK__SHIFT 0x16
5375#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG__SHIFT 0x17
5376#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
5377#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
5378#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
5379#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
5380#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT__SHIFT 0x1c
5381#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT__SHIFT 0x1d
5382#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
5383#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
5384#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT_MASK 0x0000000FL
5385#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT_MASK 0x000003C0L
5386#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT_MASK 0x0000F000L
5387#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00010000L
5388#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING_MASK 0x00040000L
5389#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
5390#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE_MASK 0x00100000L
5391#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
5392#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK_MASK 0x00400000L
5393#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG_MASK 0x00800000L
5394#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
5395#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
5396#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
5397#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
5398#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT_MASK 0x10000000L
5399#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT_MASK 0x20000000L
5400#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
5401#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
5402//CB_HW_CONTROL_1
5403#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS__SHIFT 0x0
5404#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS__SHIFT 0x5
5405#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0xb
5406#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH__SHIFT 0x11
5407#define CB_HW_CONTROL_1__RMI_CREDITS__SHIFT 0x1a
5408#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS_MASK 0x0000001FL
5409#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS_MASK 0x000007E0L
5410#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0001F800L
5411#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH_MASK 0x03FE0000L
5412#define CB_HW_CONTROL_1__RMI_CREDITS_MASK 0xFC000000L
5413//CB_HW_CONTROL_2
5414#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH__SHIFT 0x0
5415#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH__SHIFT 0x8
5416#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH__SHIFT 0xf
5417#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x18
5418#define CB_HW_CONTROL_2__CHICKEN_BITS__SHIFT 0x1c
5419#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH_MASK 0x000000FFL
5420#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH_MASK 0x00007F00L
5421#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH_MASK 0x007F8000L
5422#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0x0F000000L
5423#define CB_HW_CONTROL_2__CHICKEN_BITS_MASK 0xF0000000L
5424//CB_HW_CONTROL_3
5425#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL__SHIFT 0x0
5426#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
5427#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT__SHIFT 0x2
5428#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP__SHIFT 0x3
5429#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR__SHIFT 0x4
5430#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x5
5431#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD__SHIFT 0x6
5432#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x7
5433#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION__SHIFT 0x8
5434#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x9
5435#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0xa
5436#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION__SHIFT 0xb
5437#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967__SHIFT 0xc
5438#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657__SHIFT 0xd
5439#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542__SHIFT 0xe
5440#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH__SHIFT 0xf
5441#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH__SHIFT 0x10
5442#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC__SHIFT 0x11
5443#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC__SHIFT 0x12
5444#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC__SHIFT 0x13
5445#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM__SHIFT 0x14
5446#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC__SHIFT 0x15
5447#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC__SHIFT 0x16
5448#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC__SHIFT 0x17
5449#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18
5450#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19
5451#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a
5452#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX__SHIFT 0x1b
5453#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS__SHIFT 0x1c
5454#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
5455#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x00000002L
5456#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT_MASK 0x00000004L
5457#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP_MASK 0x00000008L
5458#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR_MASK 0x00000010L
5459#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x00000020L
5460#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD_MASK 0x00000040L
5461#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x00000080L
5462#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION_MASK 0x00000100L
5463#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x00000200L
5464#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x00000400L
5465#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION_MASK 0x00000800L
5466#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967_MASK 0x00001000L
5467#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657_MASK 0x00002000L
5468#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542_MASK 0x00004000L
5469#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH_MASK 0x00008000L
5470#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH_MASK 0x00010000L
5471#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC_MASK 0x00020000L
5472#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC_MASK 0x00040000L
5473#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC_MASK 0x00080000L
5474#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM_MASK 0x00100000L
5475#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC_MASK 0x00200000L
5476#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC_MASK 0x00400000L
5477#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC_MASK 0x00800000L
5478#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L
5479#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L
5480#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L
5481#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX_MASK 0x08000000L
5482#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS_MASK 0x30000000L
5483//CB_HW_MEM_ARBITER_RD
5484#define CB_HW_MEM_ARBITER_RD__MODE__SHIFT 0x0
5485#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE__SHIFT 0x2
5486#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE__SHIFT 0x6
5487#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC__SHIFT 0xa
5488#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC__SHIFT 0xc
5489#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM__SHIFT 0xe
5490#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC__SHIFT 0x10
5491#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS__SHIFT 0x12
5492#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS__SHIFT 0x14
5493#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS__SHIFT 0x16
5494#define CB_HW_MEM_ARBITER_RD__SCALE_AGE__SHIFT 0x17
5495#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT__SHIFT 0x1a
5496#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
5497#define CB_HW_MEM_ARBITER_RD__MODE_MASK 0x00000003L
5498#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE_MASK 0x0000003CL
5499#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE_MASK 0x000003C0L
5500#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC_MASK 0x00000C00L
5501#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC_MASK 0x00003000L
5502#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM_MASK 0x0000C000L
5503#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC_MASK 0x00030000L
5504#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS_MASK 0x000C0000L
5505#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
5506#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS_MASK 0x00400000L
5507#define CB_HW_MEM_ARBITER_RD__SCALE_AGE_MASK 0x03800000L
5508#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT_MASK 0x1C000000L
5509#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
5510//CB_HW_MEM_ARBITER_WR
5511#define CB_HW_MEM_ARBITER_WR__MODE__SHIFT 0x0
5512#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE__SHIFT 0x2
5513#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE__SHIFT 0x6
5514#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC__SHIFT 0xa
5515#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC__SHIFT 0xc
5516#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM__SHIFT 0xe
5517#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC__SHIFT 0x10
5518#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS__SHIFT 0x12
5519#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS__SHIFT 0x14
5520#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK__SHIFT 0x16
5521#define CB_HW_MEM_ARBITER_WR__SCALE_AGE__SHIFT 0x17
5522#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT__SHIFT 0x1a
5523#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
5524#define CB_HW_MEM_ARBITER_WR__MODE_MASK 0x00000003L
5525#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE_MASK 0x0000003CL
5526#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE_MASK 0x000003C0L
5527#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC_MASK 0x00000C00L
5528#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC_MASK 0x00003000L
5529#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM_MASK 0x0000C000L
5530#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC_MASK 0x00030000L
5531#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS_MASK 0x000C0000L
5532#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
5533#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK_MASK 0x00400000L
5534#define CB_HW_MEM_ARBITER_WR__SCALE_AGE_MASK 0x03800000L
5535#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT_MASK 0x1C000000L
5536#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
5537//CB_DCC_CONFIG
5538#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH__SHIFT 0x0
5539#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE__SHIFT 0x5
5540#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE__SHIFT 0x6
5541#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH__SHIFT 0x8
5542#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
5543#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT__SHIFT 0x18
5544#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x1c
5545#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH_MASK 0x0000001FL
5546#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE_MASK 0x00000020L
5547#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE_MASK 0x00000040L
5548#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH_MASK 0x0000FF00L
5549#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x007F0000L
5550#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT_MASK 0x0F000000L
5551#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xF0000000L
5552//GC_USER_RB_REDUNDANCY
5553#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
5554#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
5555#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
5556#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
5557#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
5558#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
5559#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
5560#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
5561//GC_USER_RB_BACKEND_DISABLE
5562#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
5563#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
5564
5565
5566// addressBlock: gc_ea_gceadec2
5567//GCEA_EDC_CNT
5568#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
5569#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
5570#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
5571#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
5572#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
5573#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
5574#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
5575#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
5576#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
5577#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
5578#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
5579#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
5580#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
5581#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
5582#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
5583#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
5584#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
5585#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
5586#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
5587#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
5588#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
5589#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
5590#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
5591#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
5592#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
5593#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
5594#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
5595#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
5596#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
5597#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
5598//GCEA_EDC_CNT2
5599#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
5600#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
5601#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
5602#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
5603#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
5604#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
5605#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
5606#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
5607#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
5608#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
5609#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
5610#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
5611#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
5612#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
5613#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
5614#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
5615//GCEA_DSM_CNTL
5616#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
5617#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
5618#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
5619#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
5620#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
5621#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
5622#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
5623#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
5624#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
5625#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
5626#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
5627#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
5628#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
5629#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
5630#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
5631#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
5632#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
5633#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
5634#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
5635#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
5636#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
5637#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
5638#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
5639#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
5640#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
5641#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
5642#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
5643#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
5644#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
5645#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
5646#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
5647#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
5648//GCEA_DSM_CNTLA
5649#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
5650#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
5651#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
5652#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
5653#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
5654#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
5655#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
5656#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
5657#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
5658#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
5659#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
5660#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
5661#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
5662#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
5663#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
5664#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
5665#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
5666#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
5667#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
5668#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
5669#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
5670#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
5671#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
5672#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
5673#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
5674#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
5675#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
5676#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
5677//GCEA_DSM_CNTLB
5678//GCEA_DSM_CNTL2
5679#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
5680#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
5681#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
5682#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
5683#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
5684#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
5685#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
5686#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
5687#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
5688#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
5689#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
5690#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
5691#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
5692#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
5693#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
5694#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
5695#define GCEA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
5696#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
5697#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
5698#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
5699#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
5700#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
5701#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
5702#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
5703#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
5704#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
5705#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
5706#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
5707#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
5708#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
5709#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
5710#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
5711#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
5712#define GCEA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
5713//GCEA_DSM_CNTL2A
5714#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
5715#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
5716#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
5717#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
5718#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
5719#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
5720#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
5721#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
5722#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
5723#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
5724#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
5725#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
5726#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
5727#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
5728#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
5729#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
5730#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
5731#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
5732#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
5733#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
5734#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
5735#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
5736#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
5737#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
5738#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
5739#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
5740#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
5741#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
5742//GCEA_DSM_CNTL2B
5743//GCEA_TCC_XBR_CREDITS
5744#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT__SHIFT 0x0
5745#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE__SHIFT 0x6
5746#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT__SHIFT 0x8
5747#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE__SHIFT 0xe
5748#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT__SHIFT 0x10
5749#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE__SHIFT 0x16
5750#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT__SHIFT 0x18
5751#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE__SHIFT 0x1e
5752#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT_MASK 0x0000003FL
5753#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE_MASK 0x000000C0L
5754#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT_MASK 0x00003F00L
5755#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE_MASK 0x0000C000L
5756#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT_MASK 0x003F0000L
5757#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE_MASK 0x00C00000L
5758#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT_MASK 0x3F000000L
5759#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE_MASK 0xC0000000L
5760//GCEA_TCC_XBR_MAXBURST
5761#define GCEA_TCC_XBR_MAXBURST__DRAM_RD__SHIFT 0x0
5762#define GCEA_TCC_XBR_MAXBURST__IO_RD__SHIFT 0x4
5763#define GCEA_TCC_XBR_MAXBURST__DRAM_WR__SHIFT 0x8
5764#define GCEA_TCC_XBR_MAXBURST__IO_WR__SHIFT 0xc
5765#define GCEA_TCC_XBR_MAXBURST__DRAM_RD_MASK 0x0000000FL
5766#define GCEA_TCC_XBR_MAXBURST__IO_RD_MASK 0x000000F0L
5767#define GCEA_TCC_XBR_MAXBURST__DRAM_WR_MASK 0x00000F00L
5768#define GCEA_TCC_XBR_MAXBURST__IO_WR_MASK 0x0000F000L
5769//GCEA_PROBE_CNTL
5770#define GCEA_PROBE_CNTL__REQ2RSP_DELAY__SHIFT 0x0
5771#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE__SHIFT 0x5
5772#define GCEA_PROBE_CNTL__REQ2RSP_DELAY_MASK 0x0000001FL
5773#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE_MASK 0x00000020L
5774//GCEA_PROBE_MAP
5775#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC__SHIFT 0x0
5776#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC__SHIFT 0x1
5777#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC__SHIFT 0x2
5778#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC__SHIFT 0x3
5779#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC__SHIFT 0x4
5780#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC__SHIFT 0x5
5781#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC__SHIFT 0x6
5782#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC__SHIFT 0x7
5783#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC__SHIFT 0x8
5784#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC__SHIFT 0x9
5785#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC__SHIFT 0xa
5786#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC__SHIFT 0xb
5787#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC__SHIFT 0xc
5788#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC__SHIFT 0xd
5789#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC__SHIFT 0xe
5790#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC__SHIFT 0xf
5791#define GCEA_PROBE_MAP__INTLV_SIZE__SHIFT 0x10
5792#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC_MASK 0x00000001L
5793#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC_MASK 0x00000002L
5794#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC_MASK 0x00000004L
5795#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC_MASK 0x00000008L
5796#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC_MASK 0x00000010L
5797#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC_MASK 0x00000020L
5798#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC_MASK 0x00000040L
5799#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC_MASK 0x00000080L
5800#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC_MASK 0x00000100L
5801#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC_MASK 0x00000200L
5802#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC_MASK 0x00000400L
5803#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC_MASK 0x00000800L
5804#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC_MASK 0x00001000L
5805#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC_MASK 0x00002000L
5806#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC_MASK 0x00004000L
5807#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC_MASK 0x00008000L
5808#define GCEA_PROBE_MAP__INTLV_SIZE_MASK 0x00030000L
5809//GCEA_ERR_STATUS
5810#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
5811#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
5812#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0x8
5813#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0x9
5814#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xa
5815#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
5816#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
5817#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000100L
5818#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000200L
5819#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00000400L
5820//GCEA_MISC2
5821#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
5822#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
5823#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
5824#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
5825#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
5826#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
5827#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
5828#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
5829//GCEA_SDP_BACKDOOR_CMDCREDITS0
5830#define GCEA_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED__SHIFT 0x0
5831#define GCEA_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
5832//GCEA_SDP_BACKDOOR_CMDCREDITS1
5833#define GCEA_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED__SHIFT 0x0
5834#define GCEA_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
5835//GCEA_SDP_BACKDOOR_DATACREDITS0
5836#define GCEA_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED__SHIFT 0x0
5837#define GCEA_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
5838//GCEA_SDP_BACKDOOR_DATACREDITS1
5839#define GCEA_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED__SHIFT 0x0
5840#define GCEA_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
5841//GCEA_SDP_BACKDOOR_MISCCREDITS
5842#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED__SHIFT 0x0
5843#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED__SHIFT 0x8
5844#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED__SHIFT 0x10
5845#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED__SHIFT 0x17
5846#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED_MASK 0x000000FFL
5847#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
5848#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED_MASK 0x007F0000L
5849#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x3F800000L
5850//GCEA_SDP_ENABLE
5851#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
5852#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
5853
5854
5855// addressBlock: gc_rmi_rmidec
5856//RMI_GENERAL_CNTL
5857#define RMI_GENERAL_CNTL__BURST_DISABLE__SHIFT 0x0
5858#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE__SHIFT 0x1
5859#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG__SHIFT 0x11
5860#define RMI_GENERAL_CNTL__RB0_HARVEST_EN__SHIFT 0x13
5861#define RMI_GENERAL_CNTL__RB1_HARVEST_EN__SHIFT 0x14
5862#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE__SHIFT 0x15
5863#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE__SHIFT 0x19
5864#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK__SHIFT 0x1a
5865#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK__SHIFT 0x1b
5866#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK__SHIFT 0x1c
5867#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK__SHIFT 0x1d
5868#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK__SHIFT 0x1e
5869#define RMI_GENERAL_CNTL__BURST_DISABLE_MASK 0x00000001L
5870#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE_MASK 0x0001FFFEL
5871#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_MASK 0x00060000L
5872#define RMI_GENERAL_CNTL__RB0_HARVEST_EN_MASK 0x00080000L
5873#define RMI_GENERAL_CNTL__RB1_HARVEST_EN_MASK 0x00100000L
5874#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE_MASK 0x01E00000L
5875#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE_MASK 0x02000000L
5876#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK_MASK 0x04000000L
5877#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK_MASK 0x08000000L
5878#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK_MASK 0x10000000L
5879#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK_MASK 0x20000000L
5880#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK_MASK 0x40000000L
5881//RMI_GENERAL_CNTL1
5882#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE__SHIFT 0x0
5883#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE__SHIFT 0x4
5884#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE__SHIFT 0x6
5885#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK__SHIFT 0x8
5886#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE__SHIFT 0x9
5887#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE__SHIFT 0xa
5888#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN__SHIFT 0xb
5889#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN__SHIFT 0xc
5890#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE_MASK 0x0000000FL
5891#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE_MASK 0x00000030L
5892#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE_MASK 0x000000C0L
5893#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK_MASK 0x00000100L
5894#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE_MASK 0x00000200L
5895#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_MASK 0x00000400L
5896#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN_MASK 0x00000800L
5897#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN_MASK 0x00001000L
5898//RMI_GENERAL_STATUS
5899#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED__SHIFT 0x0
5900#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR__SHIFT 0x1
5901#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR__SHIFT 0x2
5902#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR__SHIFT 0x3
5903#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR__SHIFT 0x4
5904#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY__SHIFT 0x5
5905#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY__SHIFT 0x6
5906#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY__SHIFT 0x7
5907#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY__SHIFT 0x8
5908#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY__SHIFT 0x9
5909#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY__SHIFT 0xa
5910#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xb
5911#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xc
5912#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY__SHIFT 0xd
5913#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY__SHIFT 0xe
5914#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY__SHIFT 0xf
5915#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x10
5916#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x11
5917#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY__SHIFT 0x12
5918#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY__SHIFT 0x13
5919#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY__SHIFT 0x14
5920#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED__SHIFT 0x15
5921#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY__SHIFT 0x1d
5922#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL__SHIFT 0x1e
5923#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR__SHIFT 0x1f
5924#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED_MASK 0x00000001L
5925#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR_MASK 0x00000002L
5926#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR_MASK 0x00000004L
5927#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR_MASK 0x00000008L
5928#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR_MASK 0x00000010L
5929#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY_MASK 0x00000020L
5930#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY_MASK 0x00000040L
5931#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY_MASK 0x00000080L
5932#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY_MASK 0x00000100L
5933#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY_MASK 0x00000200L
5934#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY_MASK 0x00000400L
5935#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00000800L
5936#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00001000L
5937#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY_MASK 0x00002000L
5938#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY_MASK 0x00004000L
5939#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY_MASK 0x00008000L
5940#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00010000L
5941#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00020000L
5942#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY_MASK 0x00040000L
5943#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY_MASK 0x00080000L
5944#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY_MASK 0x00100000L
5945#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED_MASK 0x1FE00000L
5946#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY_MASK 0x20000000L
5947#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL_MASK 0x40000000L
5948#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK 0x80000000L
5949//RMI_SUBBLOCK_STATUS0
5950#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0__SHIFT 0x0
5951#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0__SHIFT 0x7
5952#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0__SHIFT 0x8
5953#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1__SHIFT 0x9
5954#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1__SHIFT 0x10
5955#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1__SHIFT 0x11
5956#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT__SHIFT 0x12
5957#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0_MASK 0x0000007FL
5958#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0_MASK 0x00000080L
5959#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0_MASK 0x00000100L
5960#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1_MASK 0x0000FE00L
5961#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1_MASK 0x00010000L
5962#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1_MASK 0x00020000L
5963#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT_MASK 0x0FFC0000L
5964//RMI_SUBBLOCK_STATUS1
5965#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE__SHIFT 0x0
5966#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE__SHIFT 0xa
5967#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT__SHIFT 0x14
5968#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE_MASK 0x000003FFL
5969#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE_MASK 0x000FFC00L
5970#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT_MASK 0x3FF00000L
5971//RMI_SUBBLOCK_STATUS2
5972#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED__SHIFT 0x0
5973#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED__SHIFT 0x9
5974#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED_MASK 0x000001FFL
5975#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED_MASK 0x0003FE00L
5976//RMI_SUBBLOCK_STATUS3
5977#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL__SHIFT 0x0
5978#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL__SHIFT 0xa
5979#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL_MASK 0x000003FFL
5980#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL_MASK 0x000FFC00L
5981//RMI_XBAR_CONFIG
5982#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE__SHIFT 0x0
5983#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE__SHIFT 0x2
5984#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE__SHIFT 0x6
5985#define RMI_XBAR_CONFIG__ARBITER_DIS__SHIFT 0x7
5986#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ__SHIFT 0x8
5987#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE__SHIFT 0xc
5988#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0__SHIFT 0xd
5989#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1__SHIFT 0xe
5990#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE_MASK 0x00000003L
5991#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE_MASK 0x0000003CL
5992#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE_MASK 0x00000040L
5993#define RMI_XBAR_CONFIG__ARBITER_DIS_MASK 0x00000080L
5994#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_MASK 0x00000F00L
5995#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE_MASK 0x00001000L
5996#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0_MASK 0x00002000L
5997#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1_MASK 0x00004000L
5998//RMI_PROBE_POP_LOGIC_CNTL
5999#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH__SHIFT 0x0
6000#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS__SHIFT 0x7
6001#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2__SHIFT 0x8
6002#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH__SHIFT 0xa
6003#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS__SHIFT 0x11
6004#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH_MASK 0x0000007FL
6005#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS_MASK 0x00000080L
6006#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2_MASK 0x00000300L
6007#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH_MASK 0x0001FC00L
6008#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS_MASK 0x00020000L
6009//RMI_UTC_XNACK_N_MISC_CNTL
6010#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC__SHIFT 0x0
6011#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE__SHIFT 0x8
6012#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE__SHIFT 0xc
6013#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE__SHIFT 0xd
6014#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC_MASK 0x000000FFL
6015#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE_MASK 0x00000F00L
6016#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE_MASK 0x00001000L
6017#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE_MASK 0x00002000L
6018//RMI_DEMUX_CNTL
6019#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL__SHIFT 0x0
6020#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x1
6021#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x4
6022#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x6
6023#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE__SHIFT 0xe
6024#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL__SHIFT 0x10
6025#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x11
6026#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x14
6027#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x16
6028#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE__SHIFT 0x1e
6029#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_MASK 0x00000001L
6030#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000002L
6031#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE_MASK 0x00000030L
6032#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE_MASK 0x00003FC0L
6033#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_MASK 0x0000C000L
6034#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_MASK 0x00010000L
6035#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00020000L
6036#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00300000L
6037#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE_MASK 0x3FC00000L
6038#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_MASK 0xC0000000L
6039//RMI_UTCL1_CNTL1
6040#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
6041#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
6042#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
6043#define RMI_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
6044#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
6045#define RMI_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
6046#define RMI_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
6047#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
6048#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
6049#define RMI_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
6050#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
6051#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
6052#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
6053#define RMI_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
6054#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
6055#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
6056#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
6057#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
6058#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
6059#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
6060#define RMI_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
6061#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
6062#define RMI_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
6063#define RMI_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
6064#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
6065#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
6066#define RMI_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
6067#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
6068#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
6069#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
6070#define RMI_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
6071#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
6072#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
6073#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
6074//RMI_UTCL1_CNTL2
6075#define RMI_UTCL1_CNTL2__UTC_SPARE__SHIFT 0x0
6076#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
6077#define RMI_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
6078#define RMI_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
6079#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
6080#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
6081#define RMI_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
6082#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
6083#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE__SHIFT 0x10
6084#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
6085#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR__SHIFT 0x13
6086#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID__SHIFT 0x14
6087#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID__SHIFT 0x15
6088#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ__SHIFT 0x19
6089#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
6090#define RMI_UTCL1_CNTL2__UTC_SPARE_MASK 0x000000FFL
6091#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
6092#define RMI_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
6093#define RMI_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
6094#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
6095#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
6096#define RMI_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
6097#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
6098#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE_MASK 0x00030000L
6099#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
6100#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR_MASK 0x00080000L
6101#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
6102#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID_MASK 0x01E00000L
6103#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ_MASK 0x02000000L
6104#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
6105//RMI_UTC_UNIT_CONFIG
6106//RMI_TCIW_FORMATTER0_CNTL
6107#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE__SHIFT 0x0
6108#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW__SHIFT 0x1
6109#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
6110#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA__SHIFT 0x13
6111#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
6112#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE__SHIFT 0x1c
6113#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS__SHIFT 0x1d
6114#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
6115#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA__SHIFT 0x1f
6116#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE_MASK 0x00000001L
6117#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW_MASK 0x000001FEL
6118#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
6119#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_MASK 0x07F80000L
6120#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
6121#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE_MASK 0x10000000L
6122#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS_MASK 0x20000000L
6123#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
6124#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA_MASK 0x80000000L
6125//RMI_TCIW_FORMATTER1_CNTL
6126#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE__SHIFT 0x0
6127#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW__SHIFT 0x1
6128#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
6129#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA__SHIFT 0x13
6130#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
6131#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE__SHIFT 0x1c
6132#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS__SHIFT 0x1d
6133#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
6134#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA__SHIFT 0x1f
6135#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE_MASK 0x00000001L
6136#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW_MASK 0x000001FEL
6137#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
6138#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_MASK 0x07F80000L
6139#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
6140#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE_MASK 0x10000000L
6141#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS_MASK 0x20000000L
6142#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
6143#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA_MASK 0x80000000L
6144//RMI_SCOREBOARD_CNTL
6145#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH__SHIFT 0x0
6146#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0__SHIFT 0x1
6147#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH__SHIFT 0x2
6148#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1__SHIFT 0x3
6149#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1__SHIFT 0x4
6150#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN__SHIFT 0x5
6151#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE__SHIFT 0x6
6152#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0__SHIFT 0x7
6153#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN__SHIFT 0x8
6154#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE__SHIFT 0x9
6155#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH_MASK 0x00000001L
6156#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0_MASK 0x00000002L
6157#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH_MASK 0x00000004L
6158#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1_MASK 0x00000008L
6159#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1_MASK 0x00000010L
6160#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN_MASK 0x00000020L
6161#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE_MASK 0x00000040L
6162#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0_MASK 0x00000080L
6163#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN_MASK 0x00000100L
6164#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE_MASK 0x001FFE00L
6165//RMI_SCOREBOARD_STATUS0
6166#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID__SHIFT 0x0
6167#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG__SHIFT 0x1
6168#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID__SHIFT 0x2
6169#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE__SHIFT 0x12
6170#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE__SHIFT 0x13
6171#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE__SHIFT 0x14
6172#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE__SHIFT 0x15
6173#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID_MASK 0x00000001L
6174#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG_MASK 0x00000002L
6175#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID_MASK 0x0003FFFCL
6176#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE_MASK 0x00040000L
6177#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE_MASK 0x00080000L
6178#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE_MASK 0x00100000L
6179#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE_MASK 0x00200000L
6180//RMI_SCOREBOARD_STATUS1
6181#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0__SHIFT 0x0
6182#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0__SHIFT 0xc
6183#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0__SHIFT 0xd
6184#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED__SHIFT 0xe
6185#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1__SHIFT 0xf
6186#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1__SHIFT 0x1b
6187#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1__SHIFT 0x1c
6188#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1__SHIFT 0x1d
6189#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0__SHIFT 0x1e
6190#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0_MASK 0x00000FFFL
6191#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0_MASK 0x00001000L
6192#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0_MASK 0x00002000L
6193#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED_MASK 0x00004000L
6194#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1_MASK 0x07FF8000L
6195#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1_MASK 0x08000000L
6196#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1_MASK 0x10000000L
6197#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1_MASK 0x20000000L
6198#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0_MASK 0x40000000L
6199//RMI_SCOREBOARD_STATUS2
6200#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0__SHIFT 0x0
6201#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0__SHIFT 0xc
6202#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1__SHIFT 0xd
6203#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1__SHIFT 0x19
6204#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1__SHIFT 0x1a
6205#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0__SHIFT 0x1b
6206#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0__SHIFT 0x1c
6207#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1__SHIFT 0x1d
6208#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0__SHIFT 0x1e
6209#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1__SHIFT 0x1f
6210#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0_MASK 0x00000FFFL
6211#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0_MASK 0x00001000L
6212#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1_MASK 0x01FFE000L
6213#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1_MASK 0x02000000L
6214#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1_MASK 0x04000000L
6215#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0_MASK 0x08000000L
6216#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0_MASK 0x10000000L
6217#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1_MASK 0x20000000L
6218#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0_MASK 0x40000000L
6219#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1_MASK 0x80000000L
6220//RMI_XBAR_ARBITER_CONFIG
6221#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE__SHIFT 0x0
6222#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x2
6223#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL__SHIFT 0x3
6224#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x4
6225#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x6
6226#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x8
6227#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE__SHIFT 0x10
6228#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x12
6229#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL__SHIFT 0x13
6230#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x14
6231#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x16
6232#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x18
6233#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_MASK 0x00000003L
6234#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00000004L
6235#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_MASK 0x00000008L
6236#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000010L
6237#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE_MASK 0x000000C0L
6238#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE_MASK 0x0000FF00L
6239#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_MASK 0x00030000L
6240#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00040000L
6241#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_MASK 0x00080000L
6242#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00100000L
6243#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00C00000L
6244#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE_MASK 0xFF000000L
6245//RMI_XBAR_ARBITER_CONFIG_1
6246#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD__SHIFT 0x0
6247#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR__SHIFT 0x8
6248#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD__SHIFT 0x10
6249#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR__SHIFT 0x18
6250#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD_MASK 0x000000FFL
6251#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR_MASK 0x0000FF00L
6252#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD_MASK 0x00FF0000L
6253#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR_MASK 0xFF000000L
6254//RMI_CLOCK_CNTRL
6255#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK__SHIFT 0x0
6256#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK__SHIFT 0x5
6257#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK__SHIFT 0xa
6258#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK__SHIFT 0xf
6259#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK__SHIFT 0x14
6260#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK__SHIFT 0x19
6261#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK_MASK 0x0000001FL
6262#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK_MASK 0x000003E0L
6263#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK_MASK 0x00007C00L
6264#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK_MASK 0x000F8000L
6265#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK_MASK 0x01F00000L
6266#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK_MASK 0x3E000000L
6267//RMI_UTCL1_STATUS
6268#define RMI_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
6269#define RMI_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
6270#define RMI_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
6271#define RMI_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
6272#define RMI_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
6273#define RMI_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
6274//RMI_SPARE
6275#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING__SHIFT 0x0
6276#define RMI_SPARE__SPARE_BIT_1__SHIFT 0x1
6277#define RMI_SPARE__SPARE_BIT_2__SHIFT 0x2
6278#define RMI_SPARE__SPARE_BIT_3__SHIFT 0x3
6279#define RMI_SPARE__SPARE_BIT_4__SHIFT 0x4
6280#define RMI_SPARE__SPARE_BIT_5__SHIFT 0x5
6281#define RMI_SPARE__SPARE_BIT_6__SHIFT 0x6
6282#define RMI_SPARE__SPARE_BIT_7__SHIFT 0x7
6283#define RMI_SPARE__SPARE_BIT_8_0__SHIFT 0x8
6284#define RMI_SPARE__SPARE_BIT_16_0__SHIFT 0x10
6285#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING_MASK 0x00000001L
6286#define RMI_SPARE__SPARE_BIT_1_MASK 0x00000002L
6287#define RMI_SPARE__SPARE_BIT_2_MASK 0x00000004L
6288#define RMI_SPARE__SPARE_BIT_3_MASK 0x00000008L
6289#define RMI_SPARE__SPARE_BIT_4_MASK 0x00000010L
6290#define RMI_SPARE__SPARE_BIT_5_MASK 0x00000020L
6291#define RMI_SPARE__SPARE_BIT_6_MASK 0x00000040L
6292#define RMI_SPARE__SPARE_BIT_7_MASK 0x00000080L
6293#define RMI_SPARE__SPARE_BIT_8_0_MASK 0x0000FF00L
6294#define RMI_SPARE__SPARE_BIT_16_0_MASK 0xFFFF0000L
6295//RMI_SPARE_1
6296#define RMI_SPARE_1__SPARE_BIT_8__SHIFT 0x0
6297#define RMI_SPARE_1__SPARE_BIT_9__SHIFT 0x1
6298#define RMI_SPARE_1__SPARE_BIT_10__SHIFT 0x2
6299#define RMI_SPARE_1__SPARE_BIT_11__SHIFT 0x3
6300#define RMI_SPARE_1__SPARE_BIT_12__SHIFT 0x4
6301#define RMI_SPARE_1__SPARE_BIT_13__SHIFT 0x5
6302#define RMI_SPARE_1__SPARE_BIT_14__SHIFT 0x6
6303#define RMI_SPARE_1__SPARE_BIT_15__SHIFT 0x7
6304#define RMI_SPARE_1__SPARE_BIT_8_1__SHIFT 0x8
6305#define RMI_SPARE_1__SPARE_BIT_16_1__SHIFT 0x10
6306#define RMI_SPARE_1__SPARE_BIT_8_MASK 0x00000001L
6307#define RMI_SPARE_1__SPARE_BIT_9_MASK 0x00000002L
6308#define RMI_SPARE_1__SPARE_BIT_10_MASK 0x00000004L
6309#define RMI_SPARE_1__SPARE_BIT_11_MASK 0x00000008L
6310#define RMI_SPARE_1__SPARE_BIT_12_MASK 0x00000010L
6311#define RMI_SPARE_1__SPARE_BIT_13_MASK 0x00000020L
6312#define RMI_SPARE_1__SPARE_BIT_14_MASK 0x00000040L
6313#define RMI_SPARE_1__SPARE_BIT_15_MASK 0x00000080L
6314#define RMI_SPARE_1__SPARE_BIT_8_1_MASK 0x0000FF00L
6315#define RMI_SPARE_1__SPARE_BIT_16_1_MASK 0xFFFF0000L
6316//RMI_SPARE_2
6317#define RMI_SPARE_2__SPARE_BIT_16__SHIFT 0x0
6318#define RMI_SPARE_2__SPARE_BIT_17__SHIFT 0x1
6319#define RMI_SPARE_2__SPARE_BIT_18__SHIFT 0x2
6320#define RMI_SPARE_2__SPARE_BIT_19__SHIFT 0x3
6321#define RMI_SPARE_2__SPARE_BIT_20__SHIFT 0x4
6322#define RMI_SPARE_2__SPARE_BIT_21__SHIFT 0x5
6323#define RMI_SPARE_2__SPARE_BIT_22__SHIFT 0x6
6324#define RMI_SPARE_2__SPARE_BIT_23__SHIFT 0x7
6325#define RMI_SPARE_2__SPARE_BIT_4_0__SHIFT 0x8
6326#define RMI_SPARE_2__SPARE_BIT_4_1__SHIFT 0xc
6327#define RMI_SPARE_2__SPARE_BIT_8_2__SHIFT 0x10
6328#define RMI_SPARE_2__SPARE_BIT_8_3__SHIFT 0x18
6329#define RMI_SPARE_2__SPARE_BIT_16_MASK 0x00000001L
6330#define RMI_SPARE_2__SPARE_BIT_17_MASK 0x00000002L
6331#define RMI_SPARE_2__SPARE_BIT_18_MASK 0x00000004L
6332#define RMI_SPARE_2__SPARE_BIT_19_MASK 0x00000008L
6333#define RMI_SPARE_2__SPARE_BIT_20_MASK 0x00000010L
6334#define RMI_SPARE_2__SPARE_BIT_21_MASK 0x00000020L
6335#define RMI_SPARE_2__SPARE_BIT_22_MASK 0x00000040L
6336#define RMI_SPARE_2__SPARE_BIT_23_MASK 0x00000080L
6337#define RMI_SPARE_2__SPARE_BIT_4_0_MASK 0x00000F00L
6338#define RMI_SPARE_2__SPARE_BIT_4_1_MASK 0x0000F000L
6339#define RMI_SPARE_2__SPARE_BIT_8_2_MASK 0x00FF0000L
6340#define RMI_SPARE_2__SPARE_BIT_8_3_MASK 0xFF000000L
6341
6342
6343// addressBlock: gc_dbgu_gfx_dbgudec
6344//port_a_addr
6345#define port_a_addr__Index__SHIFT 0x0
6346#define port_a_addr__Reserved__SHIFT 0x8
6347#define port_a_addr__ReadEnable__SHIFT 0x1f
6348#define port_a_addr__Index_MASK 0x000000FFL
6349#define port_a_addr__Reserved_MASK 0x7FFFFF00L
6350#define port_a_addr__ReadEnable_MASK 0x80000000L
6351//port_a_data_lo
6352#define port_a_data_lo__Data__SHIFT 0x0
6353#define port_a_data_lo__Data_MASK 0xFFFFFFFFL
6354//port_a_data_hi
6355#define port_a_data_hi__Data__SHIFT 0x0
6356#define port_a_data_hi__Data_MASK 0xFFFFFFFFL
6357//port_b_addr
6358#define port_b_addr__Index__SHIFT 0x0
6359#define port_b_addr__Reserved__SHIFT 0x8
6360#define port_b_addr__ReadEnable__SHIFT 0x1f
6361#define port_b_addr__Index_MASK 0x000000FFL
6362#define port_b_addr__Reserved_MASK 0x7FFFFF00L
6363#define port_b_addr__ReadEnable_MASK 0x80000000L
6364//port_b_data_lo
6365#define port_b_data_lo__Data__SHIFT 0x0
6366#define port_b_data_lo__Data_MASK 0xFFFFFFFFL
6367//port_b_data_hi
6368#define port_b_data_hi__Data__SHIFT 0x0
6369#define port_b_data_hi__Data_MASK 0xFFFFFFFFL
6370//port_c_addr
6371#define port_c_addr__Index__SHIFT 0x0
6372#define port_c_addr__Reserved__SHIFT 0x8
6373#define port_c_addr__ReadEnable__SHIFT 0x1f
6374#define port_c_addr__Index_MASK 0x000000FFL
6375#define port_c_addr__Reserved_MASK 0x7FFFFF00L
6376#define port_c_addr__ReadEnable_MASK 0x80000000L
6377//port_c_data_lo
6378#define port_c_data_lo__Data__SHIFT 0x0
6379#define port_c_data_lo__Data_MASK 0xFFFFFFFFL
6380//port_c_data_hi
6381#define port_c_data_hi__Data__SHIFT 0x0
6382#define port_c_data_hi__Data_MASK 0xFFFFFFFFL
6383//port_d_addr
6384#define port_d_addr__Index__SHIFT 0x0
6385#define port_d_addr__Reserved__SHIFT 0x8
6386#define port_d_addr__ReadEnable__SHIFT 0x1f
6387#define port_d_addr__Index_MASK 0x000000FFL
6388#define port_d_addr__Reserved_MASK 0x7FFFFF00L
6389#define port_d_addr__ReadEnable_MASK 0x80000000L
6390//port_d_data_lo
6391#define port_d_data_lo__Data__SHIFT 0x0
6392#define port_d_data_lo__Data_MASK 0xFFFFFFFFL
6393//port_d_data_hi
6394#define port_d_data_hi__Data__SHIFT 0x0
6395#define port_d_data_hi__Data_MASK 0xFFFFFFFFL
6396
6397
6398// addressBlock: gc_utcl2_atcl2dec
6399//ATC_L2_CNTL
6400#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
6401#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
6402#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
6403#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
6404#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x8
6405#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
6406#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
6407#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
6408#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
6409#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
6410#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00000700L
6411#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
6412//ATC_L2_CNTL2
6413#define ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
6414#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6
6415#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8
6416#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9
6417#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc
6418#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf
6419#define ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
6420#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
6421#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L
6422#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L
6423#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L
6424#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L
6425//ATC_L2_CACHE_DATA0
6426#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
6427#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
6428#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
6429#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
6430#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
6431#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
6432#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
6433#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
6434//ATC_L2_CACHE_DATA1
6435#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
6436#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
6437//ATC_L2_CACHE_DATA2
6438#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
6439#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
6440//ATC_L2_CNTL3
6441#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0
6442#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3
6443#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L
6444#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L
6445//ATC_L2_STATUS
6446#define ATC_L2_STATUS__BUSY__SHIFT 0x0
6447#define ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1
6448#define ATC_L2_STATUS__BUSY_MASK 0x00000001L
6449#define ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x3FFFFFFEL
6450//ATC_L2_STATUS2
6451#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
6452#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
6453#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
6454#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
6455//ATC_L2_MISC_CG
6456#define ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
6457#define ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
6458#define ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
6459#define ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
6460#define ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
6461#define ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
6462//ATC_L2_MEM_POWER_LS
6463#define ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
6464#define ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
6465#define ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
6466#define ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
6467//ATC_L2_CGTT_CLK_CTRL
6468#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
6469#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
6470#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
6471#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
6472#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
6473#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
6474#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
6475#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
6476#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
6477#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
6478
6479
6480// addressBlock: gc_utcl2_vml2pfdec
6481//VM_L2_CNTL
6482#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
6483#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
6484#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
6485#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
6486#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
6487#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
6488#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
6489#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
6490#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
6491#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
6492#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
6493#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
6494#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
6495#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
6496#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
6497#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
6498#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
6499#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
6500#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
6501#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
6502#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
6503#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
6504#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
6505#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
6506#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
6507#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
6508#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
6509#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
6510//VM_L2_CNTL2
6511#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
6512#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
6513#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
6514#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
6515#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
6516#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
6517#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
6518#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
6519#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
6520#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
6521#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
6522#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
6523#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
6524#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
6525//VM_L2_CNTL3
6526#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
6527#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
6528#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
6529#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
6530#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
6531#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
6532#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
6533#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
6534#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
6535#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
6536#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
6537#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
6538#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
6539#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
6540#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
6541#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
6542#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
6543#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
6544#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
6545#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
6546#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
6547#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
6548//VM_L2_STATUS
6549#define VM_L2_STATUS__L2_BUSY__SHIFT 0x0
6550#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
6551#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
6552#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
6553#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
6554#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
6555#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
6556#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
6557#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
6558#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
6559#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
6560#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
6561#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
6562#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
6563//VM_DUMMY_PAGE_FAULT_CNTL
6564#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
6565#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
6566#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
6567#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
6568#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
6569#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
6570//VM_DUMMY_PAGE_FAULT_ADDR_LO32
6571#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
6572#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
6573//VM_DUMMY_PAGE_FAULT_ADDR_HI32
6574#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
6575#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
6576//VM_L2_PROTECTION_FAULT_CNTL
6577#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
6578#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
6579#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
6580#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
6581#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
6582#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
6583#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
6584#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
6585#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
6586#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
6587#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
6588#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
6589#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
6590#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
6591#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
6592#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
6593#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
6594#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
6595#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
6596#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
6597#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
6598#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
6599#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
6600#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
6601#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
6602#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
6603#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
6604#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
6605#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
6606#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
6607#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
6608#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
6609#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
6610#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
6611//VM_L2_PROTECTION_FAULT_CNTL2
6612#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
6613#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
6614#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
6615#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
6616#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
6617#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
6618#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
6619#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
6620#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
6621#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
6622//VM_L2_PROTECTION_FAULT_MM_CNTL3
6623#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
6624#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
6625//VM_L2_PROTECTION_FAULT_MM_CNTL4
6626#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
6627#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
6628//VM_L2_PROTECTION_FAULT_STATUS
6629#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
6630#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
6631#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
6632#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
6633#define VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
6634#define VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
6635#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
6636#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
6637#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
6638#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
6639#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
6640#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
6641#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
6642#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
6643#define VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
6644#define VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
6645#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
6646#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
6647#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
6648#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
6649//VM_L2_PROTECTION_FAULT_ADDR_LO32
6650#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
6651#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
6652//VM_L2_PROTECTION_FAULT_ADDR_HI32
6653#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
6654#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
6655//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
6656#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
6657#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
6658//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
6659#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
6660#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
6661//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
6662#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
6663#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
6664//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
6665#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
6666#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
6667//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
6668#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
6669#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
6670//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
6671#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
6672#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
6673//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
6674#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
6675#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
6676//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
6677#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
6678#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
6679//VM_L2_CNTL4
6680#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
6681#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
6682#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
6683#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
6684#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
6685#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
6686#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
6687#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
6688#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
6689#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
6690#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
6691#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
6692//VM_L2_MM_GROUP_RT_CLASSES
6693#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
6694#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
6695#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
6696#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
6697#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
6698#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
6699#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
6700#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
6701#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
6702#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
6703#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
6704#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
6705#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
6706#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
6707#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
6708#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
6709#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
6710#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
6711#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
6712#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
6713#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
6714#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
6715#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
6716#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
6717#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
6718#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
6719#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
6720#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
6721#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
6722#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
6723#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
6724#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
6725#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
6726#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
6727#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
6728#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
6729#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
6730#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
6731#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
6732#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
6733#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
6734#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
6735#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
6736#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
6737#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
6738#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
6739#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
6740#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
6741#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
6742#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
6743#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
6744#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
6745#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
6746#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
6747#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
6748#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
6749#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
6750#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
6751#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
6752#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
6753#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
6754#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
6755#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
6756#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
6757//VM_L2_BANK_SELECT_RESERVED_CID
6758#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
6759#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
6760#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
6761#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
6762#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
6763#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
6764#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
6765#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
6766#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
6767#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
6768//VM_L2_BANK_SELECT_RESERVED_CID2
6769#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
6770#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
6771#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
6772#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
6773#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
6774#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
6775#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
6776#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
6777#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
6778#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
6779//VM_L2_CACHE_PARITY_CNTL
6780#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
6781#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
6782#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
6783#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
6784#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
6785#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
6786#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
6787#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
6788#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
6789#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
6790#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
6791#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
6792#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
6793#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
6794#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
6795#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
6796#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
6797#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
6798//VM_L2_CGTT_CLK_CTRL
6799#define VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
6800#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
6801#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
6802#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
6803#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
6804#define VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
6805#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
6806#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
6807#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
6808#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
6809
6810
6811// addressBlock: gc_utcl2_vml2vcdec
6812//VM_CONTEXT0_CNTL
6813#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
6814#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
6815#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
6816#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
6817#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
6818#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
6819#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
6820#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
6821#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
6822#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
6823#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
6824#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
6825#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
6826#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
6827#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
6828#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
6829#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
6830#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
6831#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
6832#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
6833#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
6834#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
6835#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
6836#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
6837#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
6838#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
6839#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
6840#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
6841#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
6842#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
6843#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
6844#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
6845#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
6846#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
6847#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
6848#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
6849#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
6850#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
6851//VM_CONTEXT1_CNTL
6852#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
6853#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
6854#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
6855#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
6856#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
6857#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
6858#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
6859#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
6860#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
6861#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
6862#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
6863#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
6864#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
6865#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
6866#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
6867#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
6868#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
6869#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
6870#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
6871#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
6872#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
6873#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
6874#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
6875#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
6876#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
6877#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
6878#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
6879#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
6880#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
6881#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
6882#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
6883#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
6884#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
6885#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
6886#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
6887#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
6888#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
6889#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
6890//VM_CONTEXT2_CNTL
6891#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
6892#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
6893#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
6894#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
6895#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
6896#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
6897#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
6898#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
6899#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
6900#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
6901#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
6902#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
6903#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
6904#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
6905#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
6906#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
6907#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
6908#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
6909#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
6910#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
6911#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
6912#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
6913#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
6914#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
6915#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
6916#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
6917#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
6918#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
6919#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
6920#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
6921#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
6922#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
6923#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
6924#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
6925#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
6926#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
6927#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
6928#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
6929//VM_CONTEXT3_CNTL
6930#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
6931#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
6932#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
6933#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
6934#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
6935#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
6936#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
6937#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
6938#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
6939#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
6940#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
6941#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
6942#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
6943#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
6944#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
6945#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
6946#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
6947#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
6948#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
6949#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
6950#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
6951#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
6952#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
6953#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
6954#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
6955#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
6956#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
6957#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
6958#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
6959#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
6960#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
6961#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
6962#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
6963#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
6964#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
6965#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
6966#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
6967#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
6968//VM_CONTEXT4_CNTL
6969#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
6970#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
6971#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
6972#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
6973#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
6974#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
6975#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
6976#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
6977#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
6978#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
6979#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
6980#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
6981#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
6982#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
6983#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
6984#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
6985#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
6986#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
6987#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
6988#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
6989#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
6990#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
6991#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
6992#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
6993#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
6994#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
6995#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
6996#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
6997#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
6998#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
6999#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7000#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7001#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7002#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7003#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7004#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7005#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7006#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7007//VM_CONTEXT5_CNTL
7008#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7009#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7010#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7011#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7012#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7013#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7014#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7015#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7016#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7017#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7018#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7019#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7020#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7021#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7022#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7023#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7024#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7025#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7026#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7027#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7028#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7029#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7030#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7031#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7032#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7033#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7034#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7035#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7036#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7037#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7038#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7039#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7040#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7041#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7042#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7043#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7044#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7045#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7046//VM_CONTEXT6_CNTL
7047#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7048#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7049#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7050#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7051#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7052#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7053#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7054#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7055#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7056#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7057#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7058#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7059#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7060#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7061#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7062#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7063#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7064#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7065#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7066#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7067#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7068#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7069#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7070#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7071#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7072#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7073#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7074#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7075#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7076#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7077#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7078#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7079#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7080#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7081#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7082#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7083#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7084#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7085//VM_CONTEXT7_CNTL
7086#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7087#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7088#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7089#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7090#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7091#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7092#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7093#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7094#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7095#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7096#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7097#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7098#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7099#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7100#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7101#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7102#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7103#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7104#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7105#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7106#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7107#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7108#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7109#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7110#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7111#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7112#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7113#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7114#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7115#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7116#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7117#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7118#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7119#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7120#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7121#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7122#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7123#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7124//VM_CONTEXT8_CNTL
7125#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7126#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7127#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7128#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7129#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7130#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7131#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7132#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7133#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7134#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7135#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7136#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7137#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7138#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7139#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7140#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7141#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7142#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7143#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7144#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7145#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7146#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7147#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7148#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7149#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7150#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7151#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7152#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7153#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7154#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7155#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7156#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7157#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7158#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7159#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7160#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7161#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7162#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7163//VM_CONTEXT9_CNTL
7164#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7165#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7166#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7167#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7168#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7169#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7170#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7171#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7172#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7173#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7174#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7175#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7176#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7177#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7178#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7179#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7180#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7181#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7182#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7183#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7184#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7185#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7186#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7187#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7188#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7189#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7190#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7191#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7192#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7193#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7194#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7195#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7196#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7197#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7198#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7199#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7200#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7201#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7202//VM_CONTEXT10_CNTL
7203#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7204#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7205#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7206#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7207#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7208#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7209#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7210#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7211#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7212#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7213#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7214#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7215#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7216#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7217#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7218#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7219#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7220#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7221#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7222#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7223#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7224#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7225#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7226#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7227#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7228#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7229#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7230#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7231#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7232#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7233#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7234#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7235#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7236#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7237#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7238#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7239#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7240#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7241//VM_CONTEXT11_CNTL
7242#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7243#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7244#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7245#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7246#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7247#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7248#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7249#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7250#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7251#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7252#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7253#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7254#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7255#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7256#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7257#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7258#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7259#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7260#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7261#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7262#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7263#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7264#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7265#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7266#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7267#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7268#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7269#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7270#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7271#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7272#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7273#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7274#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7275#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7276#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7277#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7278#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7279#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7280//VM_CONTEXT12_CNTL
7281#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7282#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7283#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7284#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7285#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7286#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7287#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7288#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7289#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7290#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7291#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7292#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7293#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7294#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7295#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7296#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7297#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7298#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7299#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7300#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7301#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7302#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7303#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7304#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7305#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7306#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7307#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7308#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7309#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7310#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7311#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7312#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7313#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7314#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7315#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7316#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7317#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7318#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7319//VM_CONTEXT13_CNTL
7320#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7321#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7322#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7323#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7324#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7325#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7326#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7327#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7328#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7329#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7330#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7331#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7332#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7333#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7334#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7335#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7336#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7337#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7338#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7339#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7340#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7341#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7342#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7343#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7344#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7345#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7346#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7347#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7348#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7349#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7350#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7351#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7352#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7353#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7354#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7355#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7356#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7357#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7358//VM_CONTEXT14_CNTL
7359#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7360#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7361#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7362#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7363#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7364#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7365#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7366#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7367#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7368#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7369#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7370#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7371#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7372#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7373#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7374#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7375#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7376#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7377#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7378#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7379#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7380#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7381#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7382#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7383#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7384#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7385#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7386#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7387#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7388#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7389#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7390#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7391#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7392#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7393#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7394#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7395#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7396#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7397//VM_CONTEXT15_CNTL
7398#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
7399#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
7400#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
7401#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
7402#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
7403#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
7404#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
7405#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
7406#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
7407#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
7408#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
7409#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
7410#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
7411#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
7412#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
7413#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
7414#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
7415#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
7416#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
7417#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
7418#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
7419#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
7420#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
7421#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
7422#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
7423#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
7424#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
7425#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
7426#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
7427#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
7428#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
7429#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
7430#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
7431#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
7432#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
7433#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
7434#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
7435#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
7436//VM_CONTEXTS_DISABLE
7437#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
7438#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
7439#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
7440#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
7441#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
7442#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
7443#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
7444#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
7445#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
7446#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
7447#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
7448#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
7449#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
7450#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
7451#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
7452#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
7453#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
7454#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
7455#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
7456#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
7457#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
7458#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
7459#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
7460#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
7461#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
7462#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
7463#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
7464#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
7465#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
7466#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
7467#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
7468#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
7469//VM_INVALIDATE_ENG0_SEM
7470#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
7471#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
7472//VM_INVALIDATE_ENG1_SEM
7473#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
7474#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
7475//VM_INVALIDATE_ENG2_SEM
7476#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
7477#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
7478//VM_INVALIDATE_ENG3_SEM
7479#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
7480#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
7481//VM_INVALIDATE_ENG4_SEM
7482#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
7483#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
7484//VM_INVALIDATE_ENG5_SEM
7485#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
7486#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
7487//VM_INVALIDATE_ENG6_SEM
7488#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
7489#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
7490//VM_INVALIDATE_ENG7_SEM
7491#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
7492#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
7493//VM_INVALIDATE_ENG8_SEM
7494#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
7495#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
7496//VM_INVALIDATE_ENG9_SEM
7497#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
7498#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
7499//VM_INVALIDATE_ENG10_SEM
7500#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
7501#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
7502//VM_INVALIDATE_ENG11_SEM
7503#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
7504#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
7505//VM_INVALIDATE_ENG12_SEM
7506#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
7507#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
7508//VM_INVALIDATE_ENG13_SEM
7509#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
7510#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
7511//VM_INVALIDATE_ENG14_SEM
7512#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
7513#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
7514//VM_INVALIDATE_ENG15_SEM
7515#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
7516#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
7517//VM_INVALIDATE_ENG16_SEM
7518#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
7519#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
7520//VM_INVALIDATE_ENG17_SEM
7521#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
7522#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
7523//VM_INVALIDATE_ENG0_REQ
7524#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7525#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
7526#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7527#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7528#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7529#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7530#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7531#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7532#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7533#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
7534#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7535#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7536#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7537#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7538#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7539#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7540//VM_INVALIDATE_ENG1_REQ
7541#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7542#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
7543#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7544#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7545#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7546#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7547#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7548#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7549#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7550#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
7551#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7552#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7553#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7554#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7555#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7556#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7557//VM_INVALIDATE_ENG2_REQ
7558#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7559#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
7560#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7561#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7562#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7563#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7564#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7565#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7566#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7567#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
7568#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7569#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7570#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7571#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7572#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7573#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7574//VM_INVALIDATE_ENG3_REQ
7575#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7576#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
7577#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7578#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7579#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7580#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7581#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7582#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7583#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7584#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
7585#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7586#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7587#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7588#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7589#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7590#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7591//VM_INVALIDATE_ENG4_REQ
7592#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7593#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
7594#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7595#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7596#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7597#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7598#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7599#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7600#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7601#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
7602#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7603#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7604#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7605#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7606#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7607#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7608//VM_INVALIDATE_ENG5_REQ
7609#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7610#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
7611#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7612#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7613#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7614#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7615#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7616#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7617#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7618#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
7619#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7620#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7621#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7622#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7623#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7624#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7625//VM_INVALIDATE_ENG6_REQ
7626#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7627#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
7628#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7629#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7630#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7631#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7632#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7633#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7634#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7635#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
7636#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7637#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7638#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7639#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7640#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7641#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7642//VM_INVALIDATE_ENG7_REQ
7643#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7644#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
7645#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7646#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7647#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7648#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7649#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7650#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7651#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7652#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
7653#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7654#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7655#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7656#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7657#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7658#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7659//VM_INVALIDATE_ENG8_REQ
7660#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7661#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
7662#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7663#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7664#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7665#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7666#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7667#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7668#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7669#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
7670#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7671#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7672#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7673#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7674#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7675#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7676//VM_INVALIDATE_ENG9_REQ
7677#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7678#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
7679#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7680#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7681#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7682#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7683#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7684#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7685#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7686#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
7687#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7688#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7689#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7690#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7691#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7692#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7693//VM_INVALIDATE_ENG10_REQ
7694#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7695#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
7696#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7697#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7698#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7699#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7700#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7701#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7702#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7703#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
7704#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7705#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7706#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7707#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7708#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7709#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7710//VM_INVALIDATE_ENG11_REQ
7711#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7712#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
7713#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7714#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7715#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7716#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7717#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7718#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7719#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7720#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
7721#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7722#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7723#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7724#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7725#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7726#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7727//VM_INVALIDATE_ENG12_REQ
7728#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7729#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
7730#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7731#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7732#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7733#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7734#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7735#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7736#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7737#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
7738#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7739#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7740#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7741#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7742#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7743#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7744//VM_INVALIDATE_ENG13_REQ
7745#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7746#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
7747#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7748#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7749#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7750#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7751#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7752#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7753#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7754#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
7755#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7756#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7757#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7758#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7759#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7760#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7761//VM_INVALIDATE_ENG14_REQ
7762#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7763#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
7764#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7765#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7766#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7767#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7768#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7769#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7770#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7771#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
7772#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7773#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7774#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7775#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7776#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7777#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7778//VM_INVALIDATE_ENG15_REQ
7779#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7780#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
7781#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7782#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7783#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7784#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7785#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7786#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7787#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7788#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
7789#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7790#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7791#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7792#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7793#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7794#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7795//VM_INVALIDATE_ENG16_REQ
7796#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7797#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
7798#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7799#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7800#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7801#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7802#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7803#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7804#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7805#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
7806#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7807#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7808#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7809#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7810#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7811#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7812//VM_INVALIDATE_ENG17_REQ
7813#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
7814#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
7815#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
7816#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
7817#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
7818#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
7819#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
7820#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
7821#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
7822#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
7823#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
7824#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
7825#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
7826#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
7827#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
7828#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
7829//VM_INVALIDATE_ENG0_ACK
7830#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7831#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
7832#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7833#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
7834//VM_INVALIDATE_ENG1_ACK
7835#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7836#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
7837#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7838#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
7839//VM_INVALIDATE_ENG2_ACK
7840#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7841#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
7842#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7843#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
7844//VM_INVALIDATE_ENG3_ACK
7845#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7846#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
7847#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7848#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
7849//VM_INVALIDATE_ENG4_ACK
7850#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7851#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
7852#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7853#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
7854//VM_INVALIDATE_ENG5_ACK
7855#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7856#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
7857#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7858#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
7859//VM_INVALIDATE_ENG6_ACK
7860#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7861#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
7862#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7863#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
7864//VM_INVALIDATE_ENG7_ACK
7865#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7866#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
7867#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7868#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
7869//VM_INVALIDATE_ENG8_ACK
7870#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7871#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
7872#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7873#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
7874//VM_INVALIDATE_ENG9_ACK
7875#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7876#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
7877#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7878#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
7879//VM_INVALIDATE_ENG10_ACK
7880#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7881#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
7882#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7883#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
7884//VM_INVALIDATE_ENG11_ACK
7885#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7886#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
7887#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7888#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
7889//VM_INVALIDATE_ENG12_ACK
7890#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7891#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
7892#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7893#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
7894//VM_INVALIDATE_ENG13_ACK
7895#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7896#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
7897#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7898#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
7899//VM_INVALIDATE_ENG14_ACK
7900#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7901#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
7902#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7903#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
7904//VM_INVALIDATE_ENG15_ACK
7905#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7906#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
7907#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7908#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
7909//VM_INVALIDATE_ENG16_ACK
7910#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7911#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
7912#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7913#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
7914//VM_INVALIDATE_ENG17_ACK
7915#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
7916#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
7917#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
7918#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
7919//VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
7920#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7921#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7922#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7923#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7924//VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
7925#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7926#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7927//VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
7928#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7929#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7930#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7931#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7932//VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
7933#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7934#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7935//VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
7936#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7937#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7938#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7939#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7940//VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
7941#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7942#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7943//VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
7944#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7945#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7946#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7947#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7948//VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
7949#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7950#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7951//VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
7952#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7953#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7954#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7955#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7956//VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
7957#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7958#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7959//VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
7960#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7961#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7962#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7963#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7964//VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
7965#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7966#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7967//VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
7968#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7969#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7970#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7971#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7972//VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
7973#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7974#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7975//VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
7976#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7977#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7978#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7979#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7980//VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
7981#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7982#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7983//VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
7984#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7985#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7986#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7987#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7988//VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
7989#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7990#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7991//VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
7992#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
7993#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
7994#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
7995#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
7996//VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
7997#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
7998#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
7999//VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
8000#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8001#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8002#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8003#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8004//VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
8005#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8006#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8007//VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
8008#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8009#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8010#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8011#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8012//VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
8013#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8014#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8015//VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
8016#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8017#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8018#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8019#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8020//VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
8021#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8022#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8023//VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
8024#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8025#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8026#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8027#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8028//VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
8029#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8030#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8031//VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
8032#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8033#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8034#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8035#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8036//VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
8037#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8038#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8039//VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
8040#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8041#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8042#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8043#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8044//VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
8045#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8046#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8047//VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
8048#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8049#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8050#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8051#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8052//VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
8053#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8054#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8055//VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
8056#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
8057#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
8058#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
8059#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
8060//VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
8061#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
8062#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
8063//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
8064#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8065#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8066//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
8067#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8068#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8069//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
8070#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8071#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8072//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
8073#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8074#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8075//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
8076#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8077#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8078//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
8079#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8080#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8081//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
8082#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8083#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8084//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
8085#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8086#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8087//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
8088#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8089#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8090//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
8091#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8092#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8093//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
8094#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8095#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8096//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
8097#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8098#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8099//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
8100#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8101#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8102//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
8103#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8104#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8105//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
8106#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8107#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8108//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
8109#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8110#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8111//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
8112#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8113#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8114//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
8115#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8116#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8117//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
8118#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8119#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8120//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
8121#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8122#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8123//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
8124#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8125#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8126//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
8127#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8128#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8129//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
8130#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8131#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8132//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
8133#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8134#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8135//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
8136#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8137#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8138//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
8139#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8140#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8141//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
8142#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8143#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8144//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
8145#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8146#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8147//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
8148#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8149#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8150//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
8151#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8152#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8153//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
8154#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
8155#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
8156//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
8157#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
8158#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
8159//VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
8160#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8161#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8162//VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
8163#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8164#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8165//VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
8166#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8167#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8168//VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
8169#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8170#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8171//VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
8172#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8173#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8174//VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
8175#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8176#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8177//VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
8178#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8179#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8180//VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
8181#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8182#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8183//VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
8184#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8185#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8186//VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
8187#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8188#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8189//VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
8190#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8191#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8192//VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
8193#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8194#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8195//VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
8196#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8197#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8198//VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
8199#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8200#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8201//VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
8202#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8203#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8204//VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
8205#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8206#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8207//VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
8208#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8209#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8210//VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
8211#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8212#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8213//VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
8214#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8215#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8216//VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
8217#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8218#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8219//VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
8220#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8221#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8222//VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
8223#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8224#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8225//VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
8226#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8227#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8228//VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
8229#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8230#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8231//VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
8232#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8233#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8234//VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
8235#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8236#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8237//VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
8238#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8239#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8240//VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
8241#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8242#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8243//VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
8244#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8245#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8246//VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
8247#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8248#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8249//VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
8250#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8251#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8252//VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
8253#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8254#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8255//VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
8256#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8257#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8258//VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
8259#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8260#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8261//VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
8262#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8263#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8264//VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
8265#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8266#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8267//VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
8268#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8269#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8270//VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
8271#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8272#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8273//VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
8274#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8275#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8276//VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
8277#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8278#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8279//VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
8280#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8281#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8282//VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
8283#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8284#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8285//VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
8286#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8287#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8288//VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
8289#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8290#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8291//VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
8292#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8293#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8294//VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
8295#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8296#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8297//VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
8298#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8299#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8300//VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
8301#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8302#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8303//VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
8304#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8305#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8306//VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
8307#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8308#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8309//VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
8310#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8311#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8312//VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
8313#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8314#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8315//VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
8316#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8317#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8318//VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
8319#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8320#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8321//VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
8322#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8323#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8324//VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
8325#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8326#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8327//VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
8328#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8329#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8330//VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
8331#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8332#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8333//VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
8334#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8335#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8336//VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
8337#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8338#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8339//VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
8340#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8341#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8342//VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
8343#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8344#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8345//VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
8346#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
8347#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
8348//VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
8349#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
8350#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
8351
8352
8353// addressBlock: gc_utcl2_vmsharedpfdec
8354//MC_VM_NB_MMIOBASE
8355#define MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
8356#define MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
8357//MC_VM_NB_MMIOLIMIT
8358#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
8359#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
8360//MC_VM_NB_PCI_CTRL
8361#define MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
8362#define MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
8363//MC_VM_NB_PCI_ARB
8364#define MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
8365#define MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
8366//MC_VM_NB_TOP_OF_DRAM_SLOT1
8367#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
8368#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
8369//MC_VM_NB_LOWER_TOP_OF_DRAM2
8370#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
8371#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
8372#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
8373#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
8374//MC_VM_NB_UPPER_TOP_OF_DRAM2
8375#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
8376#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
8377//MC_VM_FB_OFFSET
8378#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
8379#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
8380//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
8381#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
8382#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
8383//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
8384#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
8385#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
8386//MC_VM_STEERING
8387#define MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
8388#define MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
8389//MC_SHARED_VIRT_RESET_REQ
8390#define MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
8391#define MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
8392#define MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
8393#define MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
8394//MC_MEM_POWER_LS
8395#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
8396#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
8397#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
8398#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
8399//MC_VM_CACHEABLE_DRAM_ADDRESS_START
8400#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
8401#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
8402//MC_VM_CACHEABLE_DRAM_ADDRESS_END
8403#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
8404#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
8405//MC_VM_APT_CNTL
8406#define MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
8407#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
8408#define MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
8409#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
8410//MC_VM_LOCAL_HBM_ADDRESS_START
8411#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
8412#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
8413//MC_VM_LOCAL_HBM_ADDRESS_END
8414#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
8415#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
8416//MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
8417#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
8418#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
8419
8420
8421// addressBlock: gc_utcl2_vmsharedvcdec
8422//MC_VM_FB_LOCATION_BASE
8423#define MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
8424#define MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
8425//MC_VM_FB_LOCATION_TOP
8426#define MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
8427#define MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
8428//MC_VM_AGP_TOP
8429#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
8430#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
8431//MC_VM_AGP_BOT
8432#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
8433#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
8434//MC_VM_AGP_BASE
8435#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
8436#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
8437//MC_VM_SYSTEM_APERTURE_LOW_ADDR
8438#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
8439#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
8440//MC_VM_SYSTEM_APERTURE_HIGH_ADDR
8441#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
8442#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
8443//MC_VM_MX_L1_TLB_CNTL
8444#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
8445#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
8446#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
8447#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
8448#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
8449#define MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
8450#define MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
8451#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
8452#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
8453#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
8454#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
8455#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
8456#define MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
8457#define MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
8458
8459
8460// addressBlock: gc_ea_gceadec
8461//GCEA_DRAM_RD_CLI2GRP_MAP0
8462#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
8463#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
8464#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
8465#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
8466#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
8467#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
8468#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
8469#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
8470#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
8471#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
8472#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
8473#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
8474#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
8475#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
8476#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
8477#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
8478#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
8479#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
8480#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
8481#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
8482#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
8483#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
8484#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
8485#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
8486#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
8487#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
8488#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
8489#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
8490#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
8491#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
8492#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
8493#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
8494//GCEA_DRAM_RD_CLI2GRP_MAP1
8495#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
8496#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
8497#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
8498#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
8499#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
8500#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
8501#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
8502#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
8503#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
8504#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
8505#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
8506#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
8507#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
8508#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
8509#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
8510#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
8511#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
8512#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
8513#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
8514#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
8515#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
8516#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
8517#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
8518#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
8519#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
8520#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
8521#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
8522#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
8523#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
8524#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
8525#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
8526#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
8527//GCEA_DRAM_WR_CLI2GRP_MAP0
8528#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
8529#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
8530#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
8531#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
8532#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
8533#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
8534#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
8535#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
8536#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
8537#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
8538#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
8539#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
8540#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
8541#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
8542#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
8543#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
8544#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
8545#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
8546#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
8547#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
8548#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
8549#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
8550#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
8551#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
8552#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
8553#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
8554#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
8555#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
8556#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
8557#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
8558#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
8559#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
8560//GCEA_DRAM_WR_CLI2GRP_MAP1
8561#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
8562#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
8563#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
8564#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
8565#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
8566#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
8567#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
8568#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
8569#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
8570#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
8571#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
8572#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
8573#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
8574#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
8575#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
8576#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
8577#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
8578#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
8579#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
8580#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
8581#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
8582#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
8583#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
8584#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
8585#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
8586#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
8587#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
8588#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
8589#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
8590#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
8591#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
8592#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
8593//GCEA_DRAM_RD_GRP2VC_MAP
8594#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
8595#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
8596#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
8597#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
8598#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
8599#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
8600#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
8601#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
8602//GCEA_DRAM_WR_GRP2VC_MAP
8603#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
8604#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
8605#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
8606#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
8607#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
8608#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
8609#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
8610#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
8611//GCEA_DRAM_RD_LAZY
8612#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
8613#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
8614#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
8615#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
8616#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
8617#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
8618#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
8619#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
8620//GCEA_DRAM_WR_LAZY
8621#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
8622#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
8623#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
8624#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
8625#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
8626#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
8627#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
8628#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
8629//GCEA_DRAM_RD_CAM_CNTL
8630#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
8631#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
8632#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
8633#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
8634#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
8635#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
8636#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
8637#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
8638#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
8639#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
8640#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
8641#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
8642#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
8643#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
8644#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
8645#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
8646//GCEA_DRAM_WR_CAM_CNTL
8647#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
8648#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
8649#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
8650#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
8651#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
8652#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
8653#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
8654#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
8655#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
8656#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
8657#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
8658#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
8659#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
8660#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
8661#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
8662#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
8663//GCEA_DRAM_PAGE_BURST
8664#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
8665#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
8666#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
8667#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
8668#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
8669#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
8670#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
8671#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
8672//GCEA_DRAM_RD_PRI_AGE
8673#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
8674#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
8675#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
8676#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
8677#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
8678#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
8679#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
8680#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
8681#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
8682#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
8683#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
8684#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
8685#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
8686#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
8687#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
8688#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
8689//GCEA_DRAM_WR_PRI_AGE
8690#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
8691#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
8692#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
8693#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
8694#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
8695#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
8696#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
8697#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
8698#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
8699#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
8700#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
8701#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
8702#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
8703#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
8704#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
8705#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
8706//GCEA_DRAM_RD_PRI_QUEUING
8707#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
8708#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
8709#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
8710#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
8711#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
8712#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
8713#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
8714#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
8715//GCEA_DRAM_WR_PRI_QUEUING
8716#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
8717#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
8718#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
8719#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
8720#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
8721#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
8722#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
8723#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
8724//GCEA_DRAM_RD_PRI_FIXED
8725#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
8726#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
8727#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
8728#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
8729#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
8730#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
8731#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
8732#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
8733//GCEA_DRAM_WR_PRI_FIXED
8734#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
8735#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
8736#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
8737#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
8738#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
8739#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
8740#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
8741#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
8742//GCEA_DRAM_RD_PRI_URGENCY
8743#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
8744#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
8745#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
8746#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
8747#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
8748#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
8749#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
8750#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
8751#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
8752#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
8753#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
8754#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
8755#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
8756#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
8757#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
8758#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
8759//GCEA_DRAM_WR_PRI_URGENCY
8760#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
8761#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
8762#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
8763#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
8764#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
8765#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
8766#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
8767#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
8768#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
8769#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
8770#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
8771#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
8772#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
8773#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
8774#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
8775#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
8776//GCEA_DRAM_RD_PRI_QUANT_PRI1
8777#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
8778#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
8779#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
8780#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
8781#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
8782#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
8783#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
8784#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
8785//GCEA_DRAM_RD_PRI_QUANT_PRI2
8786#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
8787#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
8788#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
8789#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
8790#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
8791#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
8792#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
8793#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
8794//GCEA_DRAM_RD_PRI_QUANT_PRI3
8795#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
8796#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
8797#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
8798#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
8799#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
8800#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
8801#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
8802#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
8803//GCEA_DRAM_WR_PRI_QUANT_PRI1
8804#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
8805#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
8806#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
8807#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
8808#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
8809#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
8810#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
8811#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
8812//GCEA_DRAM_WR_PRI_QUANT_PRI2
8813#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
8814#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
8815#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
8816#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
8817#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
8818#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
8819#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
8820#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
8821//GCEA_DRAM_WR_PRI_QUANT_PRI3
8822#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
8823#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
8824#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
8825#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
8826#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
8827#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
8828#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
8829#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
8830//GCEA_ADDRNORM_BASE_ADDR0
8831#define GCEA_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
8832#define GCEA_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
8833#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x4
8834#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x8
8835#define GCEA_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
8836#define GCEA_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
8837#define GCEA_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
8838#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x000000F0L
8839#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000700L
8840#define GCEA_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
8841//GCEA_ADDRNORM_LIMIT_ADDR0
8842#define GCEA_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
8843#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
8844#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES__SHIFT 0xa
8845#define GCEA_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
8846#define GCEA_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000000FL
8847#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
8848#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES_MASK 0x00000C00L
8849#define GCEA_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
8850//GCEA_ADDRNORM_BASE_ADDR1
8851#define GCEA_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
8852#define GCEA_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
8853#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x4
8854#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x8
8855#define GCEA_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
8856#define GCEA_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
8857#define GCEA_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
8858#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x000000F0L
8859#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000700L
8860#define GCEA_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
8861//GCEA_ADDRNORM_LIMIT_ADDR1
8862#define GCEA_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
8863#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
8864#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES__SHIFT 0xa
8865#define GCEA_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
8866#define GCEA_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000000FL
8867#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
8868#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES_MASK 0x00000C00L
8869#define GCEA_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
8870//GCEA_ADDRNORM_OFFSET_ADDR1
8871#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
8872#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
8873#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
8874#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
8875//GCEA_ADDRNORM_HOLE_CNTL
8876#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
8877#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
8878#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
8879#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
8880//GCEA_ADDRDEC_BANK_CFG
8881#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
8882#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x5
8883#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xa
8884#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xd
8885#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x10
8886#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x11
8887#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000001FL
8888#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x000003E0L
8889#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00001C00L
8890#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x0000E000L
8891#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00010000L
8892#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00020000L
8893//GCEA_ADDRDEC_MISC_CFG
8894#define GCEA_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
8895#define GCEA_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
8896#define GCEA_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
8897#define GCEA_ADDRDEC_MISC_CFG__VCM_EN3__SHIFT 0x3
8898#define GCEA_ADDRDEC_MISC_CFG__VCM_EN4__SHIFT 0x4
8899#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
8900#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
8901#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
8902#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x10
8903#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x14
8904#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x16
8905#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x18
8906#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1b
8907#define GCEA_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
8908#define GCEA_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
8909#define GCEA_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
8910#define GCEA_ADDRDEC_MISC_CFG__VCM_EN3_MASK 0x00000008L
8911#define GCEA_ADDRDEC_MISC_CFG__VCM_EN4_MASK 0x00000010L
8912#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
8913#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
8914#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0000F000L
8915#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x000F0000L
8916#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00300000L
8917#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x00C00000L
8918#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x07000000L
8919#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0x38000000L
8920//GCEA_ADDRDECDRAM_ADDR_HASH_BANK0
8921#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
8922#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
8923#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
8924#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
8925#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
8926#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
8927//GCEA_ADDRDECDRAM_ADDR_HASH_BANK1
8928#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
8929#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
8930#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
8931#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
8932#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
8933#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
8934//GCEA_ADDRDECDRAM_ADDR_HASH_BANK2
8935#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
8936#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
8937#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
8938#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
8939#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
8940#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
8941//GCEA_ADDRDECDRAM_ADDR_HASH_BANK3
8942#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
8943#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
8944#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
8945#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
8946#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
8947#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
8948//GCEA_ADDRDECDRAM_ADDR_HASH_BANK4
8949#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
8950#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
8951#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
8952#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
8953#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
8954#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
8955//GCEA_ADDRDECDRAM_ADDR_HASH_PC
8956#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
8957#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
8958#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
8959#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
8960#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
8961#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
8962//GCEA_ADDRDECDRAM_ADDR_HASH_PC2
8963#define GCEA_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
8964#define GCEA_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000001FL
8965//GCEA_ADDRDECDRAM_ADDR_HASH_CS0
8966#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
8967#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
8968#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
8969#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
8970//GCEA_ADDRDECDRAM_ADDR_HASH_CS1
8971#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
8972#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
8973#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
8974#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
8975//GCEA_ADDRDECDRAM_HARVEST_ENABLE
8976#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
8977#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
8978#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
8979#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
8980#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
8981#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
8982#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
8983#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
8984//GCEA_ADDRDEC0_BASE_ADDR_CS0
8985#define GCEA_ADDRDEC0_BASE_ADDR_CS0__CS_ENABLE__SHIFT 0x0
8986#define GCEA_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
8987#define GCEA_ADDRDEC0_BASE_ADDR_CS0__CS_ENABLE_MASK 0x00000001L
8988#define GCEA_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
8989//GCEA_ADDRDEC0_BASE_ADDR_CS1
8990#define GCEA_ADDRDEC0_BASE_ADDR_CS1__CS_ENABLE__SHIFT 0x0
8991#define GCEA_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
8992#define GCEA_ADDRDEC0_BASE_ADDR_CS1__CS_ENABLE_MASK 0x00000001L
8993#define GCEA_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
8994//GCEA_ADDRDEC0_BASE_ADDR_CS2
8995#define GCEA_ADDRDEC0_BASE_ADDR_CS2__CS_ENABLE__SHIFT 0x0
8996#define GCEA_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
8997#define GCEA_ADDRDEC0_BASE_ADDR_CS2__CS_ENABLE_MASK 0x00000001L
8998#define GCEA_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
8999//GCEA_ADDRDEC0_BASE_ADDR_CS3
9000#define GCEA_ADDRDEC0_BASE_ADDR_CS3__CS_ENABLE__SHIFT 0x0
9001#define GCEA_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
9002#define GCEA_ADDRDEC0_BASE_ADDR_CS3__CS_ENABLE_MASK 0x00000001L
9003#define GCEA_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
9004//GCEA_ADDRDEC0_BASE_ADDR_SECCS0
9005#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__CS_ENABLE__SHIFT 0x0
9006#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
9007#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__CS_ENABLE_MASK 0x00000001L
9008#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
9009//GCEA_ADDRDEC0_BASE_ADDR_SECCS1
9010#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__CS_ENABLE__SHIFT 0x0
9011#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
9012#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__CS_ENABLE_MASK 0x00000001L
9013#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
9014//GCEA_ADDRDEC0_BASE_ADDR_SECCS2
9015#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__CS_ENABLE__SHIFT 0x0
9016#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
9017#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__CS_ENABLE_MASK 0x00000001L
9018#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
9019//GCEA_ADDRDEC0_BASE_ADDR_SECCS3
9020#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__CS_ENABLE__SHIFT 0x0
9021#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
9022#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__CS_ENABLE_MASK 0x00000001L
9023#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
9024//GCEA_ADDRDEC0_ADDR_MASK_CS01
9025#define GCEA_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
9026#define GCEA_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
9027//GCEA_ADDRDEC0_ADDR_MASK_CS23
9028#define GCEA_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
9029#define GCEA_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
9030//GCEA_ADDRDEC0_ADDR_MASK_SECCS01
9031#define GCEA_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
9032#define GCEA_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
9033//GCEA_ADDRDEC0_ADDR_MASK_SECCS23
9034#define GCEA_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
9035#define GCEA_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
9036//GCEA_ADDRDEC0_ADDR_CFG_CS01
9037#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
9038#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
9039#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
9040#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
9041#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
9042#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
9043#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
9044#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
9045#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
9046#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
9047#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
9048#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
9049//GCEA_ADDRDEC0_ADDR_CFG_CS23
9050#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
9051#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
9052#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
9053#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
9054#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
9055#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
9056#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
9057#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
9058#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
9059#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
9060#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
9061#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
9062//GCEA_ADDRDEC0_ADDR_SEL_CS01
9063#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
9064#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
9065#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
9066#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
9067#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
9068#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
9069#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
9070#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
9071#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
9072#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
9073#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
9074#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x000F0000L
9075#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
9076#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
9077//GCEA_ADDRDEC0_ADDR_SEL_CS23
9078#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
9079#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
9080#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
9081#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
9082#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
9083#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
9084#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
9085#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
9086#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
9087#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
9088#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
9089#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x000F0000L
9090#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
9091#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
9092//GCEA_ADDRDEC0_COL_SEL_LO_CS01
9093#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
9094#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
9095#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
9096#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
9097#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
9098#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
9099#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
9100#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
9101#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
9102#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
9103#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
9104#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
9105#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
9106#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
9107#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
9108#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
9109//GCEA_ADDRDEC0_COL_SEL_LO_CS23
9110#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
9111#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
9112#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
9113#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
9114#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
9115#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
9116#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
9117#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
9118#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
9119#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
9120#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
9121#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
9122#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
9123#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
9124#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
9125#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
9126//GCEA_ADDRDEC0_COL_SEL_HI_CS01
9127#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
9128#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
9129#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
9130#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
9131#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
9132#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
9133#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
9134#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
9135#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
9136#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
9137#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
9138#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
9139#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
9140#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
9141#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
9142#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
9143//GCEA_ADDRDEC0_COL_SEL_HI_CS23
9144#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
9145#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
9146#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
9147#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
9148#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
9149#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
9150#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
9151#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
9152#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
9153#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
9154#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
9155#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
9156#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
9157#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
9158#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
9159#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
9160//GCEA_ADDRDEC0_RM_SEL_CS01
9161#define GCEA_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
9162#define GCEA_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
9163#define GCEA_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
9164#define GCEA_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
9165#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9166#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9167#define GCEA_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
9168#define GCEA_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
9169#define GCEA_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
9170#define GCEA_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
9171#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9172#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9173//GCEA_ADDRDEC0_RM_SEL_CS23
9174#define GCEA_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
9175#define GCEA_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
9176#define GCEA_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
9177#define GCEA_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
9178#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9179#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9180#define GCEA_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
9181#define GCEA_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
9182#define GCEA_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
9183#define GCEA_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
9184#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9185#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9186//GCEA_ADDRDEC0_RM_SEL_SECCS01
9187#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
9188#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
9189#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
9190#define GCEA_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
9191#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9192#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9193#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
9194#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
9195#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
9196#define GCEA_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
9197#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9198#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9199//GCEA_ADDRDEC0_RM_SEL_SECCS23
9200#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
9201#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
9202#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
9203#define GCEA_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
9204#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9205#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9206#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
9207#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
9208#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
9209#define GCEA_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
9210#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9211#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9212//GCEA_ADDRDEC1_BASE_ADDR_CS0
9213#define GCEA_ADDRDEC1_BASE_ADDR_CS0__CS_ENABLE__SHIFT 0x0
9214#define GCEA_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
9215#define GCEA_ADDRDEC1_BASE_ADDR_CS0__CS_ENABLE_MASK 0x00000001L
9216#define GCEA_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
9217//GCEA_ADDRDEC1_BASE_ADDR_CS1
9218#define GCEA_ADDRDEC1_BASE_ADDR_CS1__CS_ENABLE__SHIFT 0x0
9219#define GCEA_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
9220#define GCEA_ADDRDEC1_BASE_ADDR_CS1__CS_ENABLE_MASK 0x00000001L
9221#define GCEA_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
9222//GCEA_ADDRDEC1_BASE_ADDR_CS2
9223#define GCEA_ADDRDEC1_BASE_ADDR_CS2__CS_ENABLE__SHIFT 0x0
9224#define GCEA_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
9225#define GCEA_ADDRDEC1_BASE_ADDR_CS2__CS_ENABLE_MASK 0x00000001L
9226#define GCEA_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
9227//GCEA_ADDRDEC1_BASE_ADDR_CS3
9228#define GCEA_ADDRDEC1_BASE_ADDR_CS3__CS_ENABLE__SHIFT 0x0
9229#define GCEA_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
9230#define GCEA_ADDRDEC1_BASE_ADDR_CS3__CS_ENABLE_MASK 0x00000001L
9231#define GCEA_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
9232//GCEA_ADDRDEC1_BASE_ADDR_SECCS0
9233#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__CS_ENABLE__SHIFT 0x0
9234#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
9235#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__CS_ENABLE_MASK 0x00000001L
9236#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
9237//GCEA_ADDRDEC1_BASE_ADDR_SECCS1
9238#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__CS_ENABLE__SHIFT 0x0
9239#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
9240#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__CS_ENABLE_MASK 0x00000001L
9241#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
9242//GCEA_ADDRDEC1_BASE_ADDR_SECCS2
9243#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__CS_ENABLE__SHIFT 0x0
9244#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
9245#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__CS_ENABLE_MASK 0x00000001L
9246#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
9247//GCEA_ADDRDEC1_BASE_ADDR_SECCS3
9248#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__CS_ENABLE__SHIFT 0x0
9249#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
9250#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__CS_ENABLE_MASK 0x00000001L
9251#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
9252//GCEA_ADDRDEC1_ADDR_MASK_CS01
9253#define GCEA_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
9254#define GCEA_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
9255//GCEA_ADDRDEC1_ADDR_MASK_CS23
9256#define GCEA_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
9257#define GCEA_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
9258//GCEA_ADDRDEC1_ADDR_MASK_SECCS01
9259#define GCEA_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
9260#define GCEA_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
9261//GCEA_ADDRDEC1_ADDR_MASK_SECCS23
9262#define GCEA_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
9263#define GCEA_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
9264//GCEA_ADDRDEC1_ADDR_CFG_CS01
9265#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
9266#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
9267#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
9268#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
9269#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
9270#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
9271#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
9272#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
9273#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
9274#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
9275#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
9276#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
9277//GCEA_ADDRDEC1_ADDR_CFG_CS23
9278#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
9279#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
9280#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
9281#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
9282#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
9283#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
9284#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
9285#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
9286#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
9287#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
9288#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
9289#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
9290//GCEA_ADDRDEC1_ADDR_SEL_CS01
9291#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
9292#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
9293#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
9294#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
9295#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
9296#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
9297#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
9298#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
9299#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
9300#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
9301#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
9302#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x000F0000L
9303#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
9304#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
9305//GCEA_ADDRDEC1_ADDR_SEL_CS23
9306#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
9307#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
9308#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
9309#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
9310#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
9311#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
9312#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
9313#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
9314#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
9315#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
9316#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
9317#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x000F0000L
9318#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
9319#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
9320//GCEA_ADDRDEC1_COL_SEL_LO_CS01
9321#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
9322#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
9323#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
9324#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
9325#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
9326#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
9327#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
9328#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
9329#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
9330#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
9331#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
9332#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
9333#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
9334#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
9335#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
9336#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
9337//GCEA_ADDRDEC1_COL_SEL_LO_CS23
9338#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
9339#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
9340#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
9341#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
9342#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
9343#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
9344#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
9345#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
9346#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
9347#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
9348#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
9349#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
9350#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
9351#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
9352#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
9353#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
9354//GCEA_ADDRDEC1_COL_SEL_HI_CS01
9355#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
9356#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
9357#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
9358#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
9359#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
9360#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
9361#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
9362#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
9363#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
9364#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
9365#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
9366#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
9367#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
9368#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
9369#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
9370#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
9371//GCEA_ADDRDEC1_COL_SEL_HI_CS23
9372#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
9373#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
9374#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
9375#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
9376#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
9377#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
9378#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
9379#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
9380#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
9381#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
9382#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
9383#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
9384#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
9385#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
9386#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
9387#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
9388//GCEA_ADDRDEC1_RM_SEL_CS01
9389#define GCEA_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
9390#define GCEA_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
9391#define GCEA_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
9392#define GCEA_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
9393#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9394#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9395#define GCEA_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
9396#define GCEA_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
9397#define GCEA_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
9398#define GCEA_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
9399#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9400#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9401//GCEA_ADDRDEC1_RM_SEL_CS23
9402#define GCEA_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
9403#define GCEA_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
9404#define GCEA_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
9405#define GCEA_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
9406#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9407#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9408#define GCEA_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
9409#define GCEA_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
9410#define GCEA_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
9411#define GCEA_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
9412#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9413#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9414//GCEA_ADDRDEC1_RM_SEL_SECCS01
9415#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
9416#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
9417#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
9418#define GCEA_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
9419#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9420#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9421#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
9422#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
9423#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
9424#define GCEA_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
9425#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9426#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9427//GCEA_ADDRDEC1_RM_SEL_SECCS23
9428#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
9429#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
9430#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
9431#define GCEA_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
9432#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
9433#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
9434#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
9435#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
9436#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
9437#define GCEA_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
9438#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
9439#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
9440//GCEA_IO_RD_CLI2GRP_MAP0
9441#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
9442#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
9443#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
9444#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
9445#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
9446#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
9447#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
9448#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
9449#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
9450#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
9451#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
9452#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
9453#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
9454#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
9455#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
9456#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
9457#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
9458#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
9459#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
9460#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
9461#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
9462#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
9463#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
9464#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
9465#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
9466#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
9467#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
9468#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
9469#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
9470#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
9471#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
9472#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
9473//GCEA_IO_RD_CLI2GRP_MAP1
9474#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
9475#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
9476#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
9477#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
9478#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
9479#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
9480#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
9481#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
9482#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
9483#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
9484#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
9485#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
9486#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
9487#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
9488#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
9489#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
9490#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
9491#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
9492#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
9493#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
9494#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
9495#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
9496#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
9497#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
9498#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
9499#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
9500#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
9501#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
9502#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
9503#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
9504#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
9505#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
9506//GCEA_IO_WR_CLI2GRP_MAP0
9507#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
9508#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
9509#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
9510#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
9511#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
9512#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
9513#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
9514#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
9515#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
9516#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
9517#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
9518#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
9519#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
9520#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
9521#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
9522#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
9523#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
9524#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
9525#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
9526#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
9527#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
9528#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
9529#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
9530#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
9531#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
9532#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
9533#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
9534#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
9535#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
9536#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
9537#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
9538#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
9539//GCEA_IO_WR_CLI2GRP_MAP1
9540#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
9541#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
9542#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
9543#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
9544#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
9545#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
9546#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
9547#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
9548#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
9549#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
9550#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
9551#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
9552#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
9553#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
9554#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
9555#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
9556#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
9557#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
9558#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
9559#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
9560#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
9561#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
9562#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
9563#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
9564#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
9565#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
9566#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
9567#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
9568#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
9569#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
9570#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
9571#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
9572//GCEA_IO_RD_COMBINE_FLUSH
9573#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
9574#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
9575#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
9576#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
9577#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
9578#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
9579#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
9580#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
9581//GCEA_IO_WR_COMBINE_FLUSH
9582#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
9583#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
9584#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
9585#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
9586#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
9587#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
9588#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
9589#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
9590//GCEA_IO_GROUP_BURST
9591#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
9592#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
9593#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
9594#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
9595#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
9596#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
9597#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
9598#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
9599//GCEA_IO_RD_PRI_AGE
9600#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
9601#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
9602#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
9603#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
9604#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
9605#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
9606#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
9607#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
9608#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
9609#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
9610#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
9611#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
9612#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
9613#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
9614#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
9615#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
9616//GCEA_IO_WR_PRI_AGE
9617#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
9618#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
9619#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
9620#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
9621#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
9622#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
9623#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
9624#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
9625#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
9626#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
9627#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
9628#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
9629#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
9630#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
9631#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
9632#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
9633//GCEA_IO_RD_PRI_QUEUING
9634#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
9635#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
9636#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
9637#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
9638#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
9639#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
9640#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
9641#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
9642//GCEA_IO_WR_PRI_QUEUING
9643#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
9644#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
9645#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
9646#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
9647#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
9648#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
9649#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
9650#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
9651//GCEA_IO_RD_PRI_FIXED
9652#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
9653#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
9654#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
9655#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
9656#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
9657#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
9658#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
9659#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
9660//GCEA_IO_WR_PRI_FIXED
9661#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
9662#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
9663#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
9664#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
9665#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
9666#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
9667#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
9668#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
9669//GCEA_IO_RD_PRI_URGENCY
9670#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
9671#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
9672#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
9673#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
9674#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
9675#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
9676#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
9677#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
9678#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
9679#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
9680#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
9681#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
9682#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
9683#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
9684#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
9685#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
9686//GCEA_IO_WR_PRI_URGENCY
9687#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
9688#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
9689#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
9690#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
9691#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
9692#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
9693#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
9694#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
9695#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
9696#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
9697#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
9698#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
9699#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
9700#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
9701#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
9702#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
9703//GCEA_IO_RD_PRI_URGENCY_MASK
9704#define GCEA_IO_RD_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
9705#define GCEA_IO_RD_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
9706#define GCEA_IO_RD_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
9707#define GCEA_IO_RD_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
9708#define GCEA_IO_RD_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
9709#define GCEA_IO_RD_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
9710#define GCEA_IO_RD_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
9711#define GCEA_IO_RD_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
9712#define GCEA_IO_RD_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
9713#define GCEA_IO_RD_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
9714#define GCEA_IO_RD_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
9715#define GCEA_IO_RD_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
9716#define GCEA_IO_RD_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
9717#define GCEA_IO_RD_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
9718#define GCEA_IO_RD_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
9719#define GCEA_IO_RD_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
9720#define GCEA_IO_RD_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
9721#define GCEA_IO_RD_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
9722#define GCEA_IO_RD_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
9723#define GCEA_IO_RD_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
9724#define GCEA_IO_RD_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
9725#define GCEA_IO_RD_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
9726#define GCEA_IO_RD_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
9727#define GCEA_IO_RD_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
9728#define GCEA_IO_RD_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
9729#define GCEA_IO_RD_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
9730#define GCEA_IO_RD_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
9731#define GCEA_IO_RD_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
9732#define GCEA_IO_RD_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
9733#define GCEA_IO_RD_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
9734#define GCEA_IO_RD_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
9735#define GCEA_IO_RD_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
9736#define GCEA_IO_RD_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
9737#define GCEA_IO_RD_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
9738#define GCEA_IO_RD_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
9739#define GCEA_IO_RD_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
9740#define GCEA_IO_RD_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
9741#define GCEA_IO_RD_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
9742#define GCEA_IO_RD_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
9743#define GCEA_IO_RD_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
9744#define GCEA_IO_RD_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
9745#define GCEA_IO_RD_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
9746#define GCEA_IO_RD_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
9747#define GCEA_IO_RD_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
9748#define GCEA_IO_RD_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
9749#define GCEA_IO_RD_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
9750#define GCEA_IO_RD_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
9751#define GCEA_IO_RD_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
9752#define GCEA_IO_RD_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
9753#define GCEA_IO_RD_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
9754#define GCEA_IO_RD_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
9755#define GCEA_IO_RD_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
9756#define GCEA_IO_RD_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
9757#define GCEA_IO_RD_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
9758#define GCEA_IO_RD_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
9759#define GCEA_IO_RD_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
9760#define GCEA_IO_RD_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
9761#define GCEA_IO_RD_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
9762#define GCEA_IO_RD_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
9763#define GCEA_IO_RD_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
9764#define GCEA_IO_RD_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
9765#define GCEA_IO_RD_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
9766#define GCEA_IO_RD_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
9767#define GCEA_IO_RD_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
9768//GCEA_IO_WR_PRI_URGENCY_MASK
9769#define GCEA_IO_WR_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
9770#define GCEA_IO_WR_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
9771#define GCEA_IO_WR_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
9772#define GCEA_IO_WR_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
9773#define GCEA_IO_WR_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
9774#define GCEA_IO_WR_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
9775#define GCEA_IO_WR_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
9776#define GCEA_IO_WR_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
9777#define GCEA_IO_WR_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
9778#define GCEA_IO_WR_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
9779#define GCEA_IO_WR_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
9780#define GCEA_IO_WR_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
9781#define GCEA_IO_WR_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
9782#define GCEA_IO_WR_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
9783#define GCEA_IO_WR_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
9784#define GCEA_IO_WR_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
9785#define GCEA_IO_WR_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
9786#define GCEA_IO_WR_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
9787#define GCEA_IO_WR_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
9788#define GCEA_IO_WR_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
9789#define GCEA_IO_WR_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
9790#define GCEA_IO_WR_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
9791#define GCEA_IO_WR_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
9792#define GCEA_IO_WR_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
9793#define GCEA_IO_WR_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
9794#define GCEA_IO_WR_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
9795#define GCEA_IO_WR_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
9796#define GCEA_IO_WR_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
9797#define GCEA_IO_WR_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
9798#define GCEA_IO_WR_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
9799#define GCEA_IO_WR_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
9800#define GCEA_IO_WR_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
9801#define GCEA_IO_WR_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
9802#define GCEA_IO_WR_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
9803#define GCEA_IO_WR_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
9804#define GCEA_IO_WR_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
9805#define GCEA_IO_WR_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
9806#define GCEA_IO_WR_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
9807#define GCEA_IO_WR_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
9808#define GCEA_IO_WR_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
9809#define GCEA_IO_WR_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
9810#define GCEA_IO_WR_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
9811#define GCEA_IO_WR_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
9812#define GCEA_IO_WR_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
9813#define GCEA_IO_WR_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
9814#define GCEA_IO_WR_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
9815#define GCEA_IO_WR_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
9816#define GCEA_IO_WR_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
9817#define GCEA_IO_WR_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
9818#define GCEA_IO_WR_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
9819#define GCEA_IO_WR_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
9820#define GCEA_IO_WR_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
9821#define GCEA_IO_WR_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
9822#define GCEA_IO_WR_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
9823#define GCEA_IO_WR_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
9824#define GCEA_IO_WR_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
9825#define GCEA_IO_WR_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
9826#define GCEA_IO_WR_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
9827#define GCEA_IO_WR_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
9828#define GCEA_IO_WR_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
9829#define GCEA_IO_WR_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
9830#define GCEA_IO_WR_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
9831#define GCEA_IO_WR_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
9832#define GCEA_IO_WR_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
9833//GCEA_IO_RD_PRI_QUANT_PRI1
9834#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
9835#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
9836#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
9837#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
9838#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
9839#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
9840#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
9841#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
9842//GCEA_IO_RD_PRI_QUANT_PRI2
9843#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
9844#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
9845#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
9846#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
9847#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
9848#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
9849#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
9850#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
9851//GCEA_IO_RD_PRI_QUANT_PRI3
9852#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
9853#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
9854#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
9855#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
9856#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
9857#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
9858#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
9859#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
9860//GCEA_IO_WR_PRI_QUANT_PRI1
9861#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
9862#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
9863#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
9864#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
9865#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
9866#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
9867#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
9868#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
9869//GCEA_IO_WR_PRI_QUANT_PRI2
9870#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
9871#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
9872#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
9873#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
9874#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
9875#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
9876#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
9877#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
9878//GCEA_IO_WR_PRI_QUANT_PRI3
9879#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
9880#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
9881#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
9882#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
9883#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
9884#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
9885#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
9886#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
9887//GCEA_SDP_ARB_DRAM
9888#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
9889#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
9890#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
9891#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
9892#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
9893#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
9894#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
9895#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
9896#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
9897#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
9898#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
9899#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
9900#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
9901#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
9902//GCEA_SDP_ARB_FINAL
9903#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
9904#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
9905#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
9906#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
9907#define GCEA_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
9908#define GCEA_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
9909#define GCEA_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
9910#define GCEA_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
9911#define GCEA_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
9912#define GCEA_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
9913#define GCEA_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
9914#define GCEA_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
9915#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
9916#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
9917#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
9918#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
9919#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
9920#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
9921#define GCEA_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
9922#define GCEA_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
9923#define GCEA_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
9924#define GCEA_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
9925#define GCEA_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
9926#define GCEA_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
9927#define GCEA_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
9928#define GCEA_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
9929#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
9930#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
9931//GCEA_SDP_DRAM_PRIORITY
9932#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
9933#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
9934#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
9935#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
9936#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
9937#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
9938#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
9939#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
9940#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
9941#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
9942#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
9943#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
9944#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
9945#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
9946#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
9947#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
9948//GCEA_SDP_IO_PRIORITY
9949#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
9950#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
9951#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
9952#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
9953#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
9954#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
9955#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
9956#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
9957#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
9958#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
9959#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
9960#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
9961#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
9962#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
9963#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
9964#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
9965//GCEA_SDP_CREDITS
9966#define GCEA_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
9967#define GCEA_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
9968#define GCEA_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
9969#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS__SHIFT 0x18
9970#define GCEA_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
9971#define GCEA_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
9972#define GCEA_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
9973#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS_MASK 0x3F000000L
9974//GCEA_SDP_TAG_RESERVE0
9975#define GCEA_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
9976#define GCEA_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
9977#define GCEA_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
9978#define GCEA_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
9979#define GCEA_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
9980#define GCEA_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
9981#define GCEA_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
9982#define GCEA_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
9983//GCEA_SDP_TAG_RESERVE1
9984#define GCEA_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
9985#define GCEA_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
9986#define GCEA_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
9987#define GCEA_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
9988#define GCEA_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
9989#define GCEA_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
9990#define GCEA_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
9991#define GCEA_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
9992//GCEA_SDP_VCC_RESERVE0
9993#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
9994#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
9995#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
9996#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
9997#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
9998#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
9999#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
10000#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
10001#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
10002#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
10003//GCEA_SDP_VCC_RESERVE1
10004#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
10005#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
10006#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
10007#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
10008#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
10009#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
10010#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
10011#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
10012//GCEA_SDP_VCD_RESERVE0
10013#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
10014#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
10015#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
10016#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
10017#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
10018#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
10019#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
10020#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
10021#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
10022#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
10023//GCEA_SDP_VCD_RESERVE1
10024#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
10025#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
10026#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
10027#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
10028#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
10029#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
10030#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
10031#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
10032//GCEA_SDP_REQ_CNTL
10033#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
10034#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
10035#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
10036#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
10037#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
10038#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
10039#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
10040#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
10041#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
10042#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
10043//GCEA_MISC
10044#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
10045#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
10046#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
10047#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
10048#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
10049#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
10050#define GCEA_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
10051#define GCEA_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
10052#define GCEA_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
10053#define GCEA_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
10054#define GCEA_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
10055#define GCEA_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
10056#define GCEA_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
10057#define GCEA_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
10058#define GCEA_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
10059#define GCEA_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
10060#define GCEA_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
10061#define GCEA_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
10062#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
10063#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
10064#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
10065#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
10066#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
10067#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
10068#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
10069#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
10070#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
10071#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
10072#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
10073#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
10074#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
10075#define GCEA_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
10076#define GCEA_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
10077#define GCEA_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
10078#define GCEA_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
10079#define GCEA_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
10080#define GCEA_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
10081#define GCEA_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
10082#define GCEA_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
10083#define GCEA_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
10084#define GCEA_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
10085#define GCEA_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
10086#define GCEA_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
10087#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
10088#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
10089#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
10090#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
10091#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
10092#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
10093#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
10094//GCEA_LATENCY_SAMPLING
10095#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
10096#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
10097#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
10098#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
10099#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
10100#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
10101#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
10102#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
10103#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
10104#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
10105#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
10106#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
10107#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
10108#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
10109#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
10110#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
10111#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
10112#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
10113#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
10114#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
10115#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
10116#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
10117#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
10118#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
10119#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
10120#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
10121#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
10122#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
10123#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
10124#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
10125#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
10126#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
10127//GCEA_PERFCOUNTER_LO
10128#define GCEA_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
10129#define GCEA_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
10130//GCEA_PERFCOUNTER_HI
10131#define GCEA_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
10132#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
10133#define GCEA_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
10134#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
10135//GCEA_PERFCOUNTER0_CFG
10136#define GCEA_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
10137#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
10138#define GCEA_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
10139#define GCEA_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
10140#define GCEA_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
10141#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
10142#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
10143#define GCEA_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
10144#define GCEA_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
10145#define GCEA_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
10146//GCEA_PERFCOUNTER1_CFG
10147#define GCEA_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
10148#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
10149#define GCEA_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
10150#define GCEA_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
10151#define GCEA_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
10152#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
10153#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
10154#define GCEA_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
10155#define GCEA_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
10156#define GCEA_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
10157//GCEA_PERFCOUNTER_RSLT_CNTL
10158#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
10159#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
10160#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
10161#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
10162#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
10163#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
10164#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
10165#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
10166#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
10167#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
10168#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
10169#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
10170
10171
10172// addressBlock: gc_tcdec
10173//TCP_INVALIDATE
10174#define TCP_INVALIDATE__START__SHIFT 0x0
10175#define TCP_INVALIDATE__START_MASK 0x00000001L
10176//TCP_STATUS
10177#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
10178#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
10179#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
10180#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
10181#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
10182#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
10183#define TCP_STATUS__READ_BUSY__SHIFT 0x6
10184#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
10185#define TCP_STATUS__VM_BUSY__SHIFT 0x8
10186#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
10187#define TCP_STATUS__INPUT_BUSY_MASK 0x00000002L
10188#define TCP_STATUS__ADRS_BUSY_MASK 0x00000004L
10189#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x00000008L
10190#define TCP_STATUS__CNTRL_BUSY_MASK 0x00000010L
10191#define TCP_STATUS__LFIFO_BUSY_MASK 0x00000020L
10192#define TCP_STATUS__READ_BUSY_MASK 0x00000040L
10193#define TCP_STATUS__FORMAT_BUSY_MASK 0x00000080L
10194#define TCP_STATUS__VM_BUSY_MASK 0x00000100L
10195//TCP_CNTL
10196#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
10197#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
10198#define TCP_CNTL__L1_SIZE__SHIFT 0x2
10199#define TCP_CNTL__FLAT_BUF_HASH_ENABLE__SHIFT 0x4
10200#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
10201#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
10202#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT__SHIFT 0x16
10203#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
10204#define TCP_CNTL__INV_ALL_VMIDS__SHIFT 0x1d
10205#define TCP_CNTL__ASTC_VE_MSB_TOLERANT__SHIFT 0x1e
10206#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
10207#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
10208#define TCP_CNTL__L1_SIZE_MASK 0x0000000CL
10209#define TCP_CNTL__FLAT_BUF_HASH_ENABLE_MASK 0x00000010L
10210#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
10211#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001F8000L
10212#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT_MASK 0x0FC00000L
10213#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
10214#define TCP_CNTL__INV_ALL_VMIDS_MASK 0x20000000L
10215#define TCP_CNTL__ASTC_VE_MSB_TOLERANT_MASK 0x40000000L
10216//TCP_CHAN_STEER_LO
10217#define TCP_CHAN_STEER_LO__CHAN0__SHIFT 0x0
10218#define TCP_CHAN_STEER_LO__CHAN1__SHIFT 0x4
10219#define TCP_CHAN_STEER_LO__CHAN2__SHIFT 0x8
10220#define TCP_CHAN_STEER_LO__CHAN3__SHIFT 0xc
10221#define TCP_CHAN_STEER_LO__CHAN4__SHIFT 0x10
10222#define TCP_CHAN_STEER_LO__CHAN5__SHIFT 0x14
10223#define TCP_CHAN_STEER_LO__CHAN6__SHIFT 0x18
10224#define TCP_CHAN_STEER_LO__CHAN7__SHIFT 0x1c
10225#define TCP_CHAN_STEER_LO__CHAN0_MASK 0x0000000FL
10226#define TCP_CHAN_STEER_LO__CHAN1_MASK 0x000000F0L
10227#define TCP_CHAN_STEER_LO__CHAN2_MASK 0x00000F00L
10228#define TCP_CHAN_STEER_LO__CHAN3_MASK 0x0000F000L
10229#define TCP_CHAN_STEER_LO__CHAN4_MASK 0x000F0000L
10230#define TCP_CHAN_STEER_LO__CHAN5_MASK 0x00F00000L
10231#define TCP_CHAN_STEER_LO__CHAN6_MASK 0x0F000000L
10232#define TCP_CHAN_STEER_LO__CHAN7_MASK 0xF0000000L
10233//TCP_CHAN_STEER_HI
10234#define TCP_CHAN_STEER_HI__CHAN8__SHIFT 0x0
10235#define TCP_CHAN_STEER_HI__CHAN9__SHIFT 0x4
10236#define TCP_CHAN_STEER_HI__CHANA__SHIFT 0x8
10237#define TCP_CHAN_STEER_HI__CHANB__SHIFT 0xc
10238#define TCP_CHAN_STEER_HI__CHANC__SHIFT 0x10
10239#define TCP_CHAN_STEER_HI__CHAND__SHIFT 0x14
10240#define TCP_CHAN_STEER_HI__CHANE__SHIFT 0x18
10241#define TCP_CHAN_STEER_HI__CHANF__SHIFT 0x1c
10242#define TCP_CHAN_STEER_HI__CHAN8_MASK 0x0000000FL
10243#define TCP_CHAN_STEER_HI__CHAN9_MASK 0x000000F0L
10244#define TCP_CHAN_STEER_HI__CHANA_MASK 0x00000F00L
10245#define TCP_CHAN_STEER_HI__CHANB_MASK 0x0000F000L
10246#define TCP_CHAN_STEER_HI__CHANC_MASK 0x000F0000L
10247#define TCP_CHAN_STEER_HI__CHAND_MASK 0x00F00000L
10248#define TCP_CHAN_STEER_HI__CHANE_MASK 0x0F000000L
10249#define TCP_CHAN_STEER_HI__CHANF_MASK 0xF0000000L
10250//TCP_ADDR_CONFIG
10251#define TCP_ADDR_CONFIG__NUM_TCC_BANKS__SHIFT 0x0
10252#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4
10253#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6
10254#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9
10255#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000FL
10256#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
10257#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001C0L
10258#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
10259//TCP_CREDIT
10260#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
10261#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
10262#define TCP_CREDIT__TD_CREDIT__SHIFT 0x1d
10263#define TCP_CREDIT__LFIFO_CREDIT_MASK 0x000003FFL
10264#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007F0000L
10265#define TCP_CREDIT__TD_CREDIT_MASK 0xE0000000L
10266//TCP_BUFFER_ADDR_HASH_CNTL
10267#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS__SHIFT 0x0
10268#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS__SHIFT 0x8
10269#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT__SHIFT 0x10
10270#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT__SHIFT 0x18
10271#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS_MASK 0x00000007L
10272#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS_MASK 0x00000700L
10273#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT_MASK 0x00070000L
10274#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT_MASK 0x07000000L
10275//TCP_EDC_CNT
10276#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0
10277#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8
10278#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10
10279#define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL
10280#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L
10281#define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L
10282//TC_CFG_L1_LOAD_POLICY0
10283#define TC_CFG_L1_LOAD_POLICY0__POLICY_0__SHIFT 0x0
10284#define TC_CFG_L1_LOAD_POLICY0__POLICY_1__SHIFT 0x2
10285#define TC_CFG_L1_LOAD_POLICY0__POLICY_2__SHIFT 0x4
10286#define TC_CFG_L1_LOAD_POLICY0__POLICY_3__SHIFT 0x6
10287#define TC_CFG_L1_LOAD_POLICY0__POLICY_4__SHIFT 0x8
10288#define TC_CFG_L1_LOAD_POLICY0__POLICY_5__SHIFT 0xa
10289#define TC_CFG_L1_LOAD_POLICY0__POLICY_6__SHIFT 0xc
10290#define TC_CFG_L1_LOAD_POLICY0__POLICY_7__SHIFT 0xe
10291#define TC_CFG_L1_LOAD_POLICY0__POLICY_8__SHIFT 0x10
10292#define TC_CFG_L1_LOAD_POLICY0__POLICY_9__SHIFT 0x12
10293#define TC_CFG_L1_LOAD_POLICY0__POLICY_10__SHIFT 0x14
10294#define TC_CFG_L1_LOAD_POLICY0__POLICY_11__SHIFT 0x16
10295#define TC_CFG_L1_LOAD_POLICY0__POLICY_12__SHIFT 0x18
10296#define TC_CFG_L1_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
10297#define TC_CFG_L1_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
10298#define TC_CFG_L1_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
10299#define TC_CFG_L1_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
10300#define TC_CFG_L1_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
10301#define TC_CFG_L1_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
10302#define TC_CFG_L1_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
10303#define TC_CFG_L1_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
10304#define TC_CFG_L1_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
10305#define TC_CFG_L1_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
10306#define TC_CFG_L1_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
10307#define TC_CFG_L1_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
10308#define TC_CFG_L1_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
10309#define TC_CFG_L1_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
10310#define TC_CFG_L1_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
10311#define TC_CFG_L1_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
10312#define TC_CFG_L1_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
10313#define TC_CFG_L1_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
10314#define TC_CFG_L1_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
10315//TC_CFG_L1_LOAD_POLICY1
10316#define TC_CFG_L1_LOAD_POLICY1__POLICY_16__SHIFT 0x0
10317#define TC_CFG_L1_LOAD_POLICY1__POLICY_17__SHIFT 0x2
10318#define TC_CFG_L1_LOAD_POLICY1__POLICY_18__SHIFT 0x4
10319#define TC_CFG_L1_LOAD_POLICY1__POLICY_19__SHIFT 0x6
10320#define TC_CFG_L1_LOAD_POLICY1__POLICY_20__SHIFT 0x8
10321#define TC_CFG_L1_LOAD_POLICY1__POLICY_21__SHIFT 0xa
10322#define TC_CFG_L1_LOAD_POLICY1__POLICY_22__SHIFT 0xc
10323#define TC_CFG_L1_LOAD_POLICY1__POLICY_23__SHIFT 0xe
10324#define TC_CFG_L1_LOAD_POLICY1__POLICY_24__SHIFT 0x10
10325#define TC_CFG_L1_LOAD_POLICY1__POLICY_25__SHIFT 0x12
10326#define TC_CFG_L1_LOAD_POLICY1__POLICY_26__SHIFT 0x14
10327#define TC_CFG_L1_LOAD_POLICY1__POLICY_27__SHIFT 0x16
10328#define TC_CFG_L1_LOAD_POLICY1__POLICY_28__SHIFT 0x18
10329#define TC_CFG_L1_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
10330#define TC_CFG_L1_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
10331#define TC_CFG_L1_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
10332#define TC_CFG_L1_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
10333#define TC_CFG_L1_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
10334#define TC_CFG_L1_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
10335#define TC_CFG_L1_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
10336#define TC_CFG_L1_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
10337#define TC_CFG_L1_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
10338#define TC_CFG_L1_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
10339#define TC_CFG_L1_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
10340#define TC_CFG_L1_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
10341#define TC_CFG_L1_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
10342#define TC_CFG_L1_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
10343#define TC_CFG_L1_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
10344#define TC_CFG_L1_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
10345#define TC_CFG_L1_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
10346#define TC_CFG_L1_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
10347#define TC_CFG_L1_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
10348//TC_CFG_L1_STORE_POLICY
10349#define TC_CFG_L1_STORE_POLICY__POLICY_0__SHIFT 0x0
10350#define TC_CFG_L1_STORE_POLICY__POLICY_1__SHIFT 0x1
10351#define TC_CFG_L1_STORE_POLICY__POLICY_2__SHIFT 0x2
10352#define TC_CFG_L1_STORE_POLICY__POLICY_3__SHIFT 0x3
10353#define TC_CFG_L1_STORE_POLICY__POLICY_4__SHIFT 0x4
10354#define TC_CFG_L1_STORE_POLICY__POLICY_5__SHIFT 0x5
10355#define TC_CFG_L1_STORE_POLICY__POLICY_6__SHIFT 0x6
10356#define TC_CFG_L1_STORE_POLICY__POLICY_7__SHIFT 0x7
10357#define TC_CFG_L1_STORE_POLICY__POLICY_8__SHIFT 0x8
10358#define TC_CFG_L1_STORE_POLICY__POLICY_9__SHIFT 0x9
10359#define TC_CFG_L1_STORE_POLICY__POLICY_10__SHIFT 0xa
10360#define TC_CFG_L1_STORE_POLICY__POLICY_11__SHIFT 0xb
10361#define TC_CFG_L1_STORE_POLICY__POLICY_12__SHIFT 0xc
10362#define TC_CFG_L1_STORE_POLICY__POLICY_13__SHIFT 0xd
10363#define TC_CFG_L1_STORE_POLICY__POLICY_14__SHIFT 0xe
10364#define TC_CFG_L1_STORE_POLICY__POLICY_15__SHIFT 0xf
10365#define TC_CFG_L1_STORE_POLICY__POLICY_16__SHIFT 0x10
10366#define TC_CFG_L1_STORE_POLICY__POLICY_17__SHIFT 0x11
10367#define TC_CFG_L1_STORE_POLICY__POLICY_18__SHIFT 0x12
10368#define TC_CFG_L1_STORE_POLICY__POLICY_19__SHIFT 0x13
10369#define TC_CFG_L1_STORE_POLICY__POLICY_20__SHIFT 0x14
10370#define TC_CFG_L1_STORE_POLICY__POLICY_21__SHIFT 0x15
10371#define TC_CFG_L1_STORE_POLICY__POLICY_22__SHIFT 0x16
10372#define TC_CFG_L1_STORE_POLICY__POLICY_23__SHIFT 0x17
10373#define TC_CFG_L1_STORE_POLICY__POLICY_24__SHIFT 0x18
10374#define TC_CFG_L1_STORE_POLICY__POLICY_25__SHIFT 0x19
10375#define TC_CFG_L1_STORE_POLICY__POLICY_26__SHIFT 0x1a
10376#define TC_CFG_L1_STORE_POLICY__POLICY_27__SHIFT 0x1b
10377#define TC_CFG_L1_STORE_POLICY__POLICY_28__SHIFT 0x1c
10378#define TC_CFG_L1_STORE_POLICY__POLICY_29__SHIFT 0x1d
10379#define TC_CFG_L1_STORE_POLICY__POLICY_30__SHIFT 0x1e
10380#define TC_CFG_L1_STORE_POLICY__POLICY_31__SHIFT 0x1f
10381#define TC_CFG_L1_STORE_POLICY__POLICY_0_MASK 0x00000001L
10382#define TC_CFG_L1_STORE_POLICY__POLICY_1_MASK 0x00000002L
10383#define TC_CFG_L1_STORE_POLICY__POLICY_2_MASK 0x00000004L
10384#define TC_CFG_L1_STORE_POLICY__POLICY_3_MASK 0x00000008L
10385#define TC_CFG_L1_STORE_POLICY__POLICY_4_MASK 0x00000010L
10386#define TC_CFG_L1_STORE_POLICY__POLICY_5_MASK 0x00000020L
10387#define TC_CFG_L1_STORE_POLICY__POLICY_6_MASK 0x00000040L
10388#define TC_CFG_L1_STORE_POLICY__POLICY_7_MASK 0x00000080L
10389#define TC_CFG_L1_STORE_POLICY__POLICY_8_MASK 0x00000100L
10390#define TC_CFG_L1_STORE_POLICY__POLICY_9_MASK 0x00000200L
10391#define TC_CFG_L1_STORE_POLICY__POLICY_10_MASK 0x00000400L
10392#define TC_CFG_L1_STORE_POLICY__POLICY_11_MASK 0x00000800L
10393#define TC_CFG_L1_STORE_POLICY__POLICY_12_MASK 0x00001000L
10394#define TC_CFG_L1_STORE_POLICY__POLICY_13_MASK 0x00002000L
10395#define TC_CFG_L1_STORE_POLICY__POLICY_14_MASK 0x00004000L
10396#define TC_CFG_L1_STORE_POLICY__POLICY_15_MASK 0x00008000L
10397#define TC_CFG_L1_STORE_POLICY__POLICY_16_MASK 0x00010000L
10398#define TC_CFG_L1_STORE_POLICY__POLICY_17_MASK 0x00020000L
10399#define TC_CFG_L1_STORE_POLICY__POLICY_18_MASK 0x00040000L
10400#define TC_CFG_L1_STORE_POLICY__POLICY_19_MASK 0x00080000L
10401#define TC_CFG_L1_STORE_POLICY__POLICY_20_MASK 0x00100000L
10402#define TC_CFG_L1_STORE_POLICY__POLICY_21_MASK 0x00200000L
10403#define TC_CFG_L1_STORE_POLICY__POLICY_22_MASK 0x00400000L
10404#define TC_CFG_L1_STORE_POLICY__POLICY_23_MASK 0x00800000L
10405#define TC_CFG_L1_STORE_POLICY__POLICY_24_MASK 0x01000000L
10406#define TC_CFG_L1_STORE_POLICY__POLICY_25_MASK 0x02000000L
10407#define TC_CFG_L1_STORE_POLICY__POLICY_26_MASK 0x04000000L
10408#define TC_CFG_L1_STORE_POLICY__POLICY_27_MASK 0x08000000L
10409#define TC_CFG_L1_STORE_POLICY__POLICY_28_MASK 0x10000000L
10410#define TC_CFG_L1_STORE_POLICY__POLICY_29_MASK 0x20000000L
10411#define TC_CFG_L1_STORE_POLICY__POLICY_30_MASK 0x40000000L
10412#define TC_CFG_L1_STORE_POLICY__POLICY_31_MASK 0x80000000L
10413//TC_CFG_L2_LOAD_POLICY0
10414#define TC_CFG_L2_LOAD_POLICY0__POLICY_0__SHIFT 0x0
10415#define TC_CFG_L2_LOAD_POLICY0__POLICY_1__SHIFT 0x2
10416#define TC_CFG_L2_LOAD_POLICY0__POLICY_2__SHIFT 0x4
10417#define TC_CFG_L2_LOAD_POLICY0__POLICY_3__SHIFT 0x6
10418#define TC_CFG_L2_LOAD_POLICY0__POLICY_4__SHIFT 0x8
10419#define TC_CFG_L2_LOAD_POLICY0__POLICY_5__SHIFT 0xa
10420#define TC_CFG_L2_LOAD_POLICY0__POLICY_6__SHIFT 0xc
10421#define TC_CFG_L2_LOAD_POLICY0__POLICY_7__SHIFT 0xe
10422#define TC_CFG_L2_LOAD_POLICY0__POLICY_8__SHIFT 0x10
10423#define TC_CFG_L2_LOAD_POLICY0__POLICY_9__SHIFT 0x12
10424#define TC_CFG_L2_LOAD_POLICY0__POLICY_10__SHIFT 0x14
10425#define TC_CFG_L2_LOAD_POLICY0__POLICY_11__SHIFT 0x16
10426#define TC_CFG_L2_LOAD_POLICY0__POLICY_12__SHIFT 0x18
10427#define TC_CFG_L2_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
10428#define TC_CFG_L2_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
10429#define TC_CFG_L2_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
10430#define TC_CFG_L2_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
10431#define TC_CFG_L2_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
10432#define TC_CFG_L2_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
10433#define TC_CFG_L2_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
10434#define TC_CFG_L2_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
10435#define TC_CFG_L2_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
10436#define TC_CFG_L2_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
10437#define TC_CFG_L2_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
10438#define TC_CFG_L2_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
10439#define TC_CFG_L2_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
10440#define TC_CFG_L2_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
10441#define TC_CFG_L2_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
10442#define TC_CFG_L2_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
10443#define TC_CFG_L2_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
10444#define TC_CFG_L2_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
10445#define TC_CFG_L2_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
10446//TC_CFG_L2_LOAD_POLICY1
10447#define TC_CFG_L2_LOAD_POLICY1__POLICY_16__SHIFT 0x0
10448#define TC_CFG_L2_LOAD_POLICY1__POLICY_17__SHIFT 0x2
10449#define TC_CFG_L2_LOAD_POLICY1__POLICY_18__SHIFT 0x4
10450#define TC_CFG_L2_LOAD_POLICY1__POLICY_19__SHIFT 0x6
10451#define TC_CFG_L2_LOAD_POLICY1__POLICY_20__SHIFT 0x8
10452#define TC_CFG_L2_LOAD_POLICY1__POLICY_21__SHIFT 0xa
10453#define TC_CFG_L2_LOAD_POLICY1__POLICY_22__SHIFT 0xc
10454#define TC_CFG_L2_LOAD_POLICY1__POLICY_23__SHIFT 0xe
10455#define TC_CFG_L2_LOAD_POLICY1__POLICY_24__SHIFT 0x10
10456#define TC_CFG_L2_LOAD_POLICY1__POLICY_25__SHIFT 0x12
10457#define TC_CFG_L2_LOAD_POLICY1__POLICY_26__SHIFT 0x14
10458#define TC_CFG_L2_LOAD_POLICY1__POLICY_27__SHIFT 0x16
10459#define TC_CFG_L2_LOAD_POLICY1__POLICY_28__SHIFT 0x18
10460#define TC_CFG_L2_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
10461#define TC_CFG_L2_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
10462#define TC_CFG_L2_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
10463#define TC_CFG_L2_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
10464#define TC_CFG_L2_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
10465#define TC_CFG_L2_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
10466#define TC_CFG_L2_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
10467#define TC_CFG_L2_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
10468#define TC_CFG_L2_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
10469#define TC_CFG_L2_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
10470#define TC_CFG_L2_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
10471#define TC_CFG_L2_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
10472#define TC_CFG_L2_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
10473#define TC_CFG_L2_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
10474#define TC_CFG_L2_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
10475#define TC_CFG_L2_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
10476#define TC_CFG_L2_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
10477#define TC_CFG_L2_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
10478#define TC_CFG_L2_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
10479//TC_CFG_L2_STORE_POLICY0
10480#define TC_CFG_L2_STORE_POLICY0__POLICY_0__SHIFT 0x0
10481#define TC_CFG_L2_STORE_POLICY0__POLICY_1__SHIFT 0x2
10482#define TC_CFG_L2_STORE_POLICY0__POLICY_2__SHIFT 0x4
10483#define TC_CFG_L2_STORE_POLICY0__POLICY_3__SHIFT 0x6
10484#define TC_CFG_L2_STORE_POLICY0__POLICY_4__SHIFT 0x8
10485#define TC_CFG_L2_STORE_POLICY0__POLICY_5__SHIFT 0xa
10486#define TC_CFG_L2_STORE_POLICY0__POLICY_6__SHIFT 0xc
10487#define TC_CFG_L2_STORE_POLICY0__POLICY_7__SHIFT 0xe
10488#define TC_CFG_L2_STORE_POLICY0__POLICY_8__SHIFT 0x10
10489#define TC_CFG_L2_STORE_POLICY0__POLICY_9__SHIFT 0x12
10490#define TC_CFG_L2_STORE_POLICY0__POLICY_10__SHIFT 0x14
10491#define TC_CFG_L2_STORE_POLICY0__POLICY_11__SHIFT 0x16
10492#define TC_CFG_L2_STORE_POLICY0__POLICY_12__SHIFT 0x18
10493#define TC_CFG_L2_STORE_POLICY0__POLICY_13__SHIFT 0x1a
10494#define TC_CFG_L2_STORE_POLICY0__POLICY_14__SHIFT 0x1c
10495#define TC_CFG_L2_STORE_POLICY0__POLICY_15__SHIFT 0x1e
10496#define TC_CFG_L2_STORE_POLICY0__POLICY_0_MASK 0x00000003L
10497#define TC_CFG_L2_STORE_POLICY0__POLICY_1_MASK 0x0000000CL
10498#define TC_CFG_L2_STORE_POLICY0__POLICY_2_MASK 0x00000030L
10499#define TC_CFG_L2_STORE_POLICY0__POLICY_3_MASK 0x000000C0L
10500#define TC_CFG_L2_STORE_POLICY0__POLICY_4_MASK 0x00000300L
10501#define TC_CFG_L2_STORE_POLICY0__POLICY_5_MASK 0x00000C00L
10502#define TC_CFG_L2_STORE_POLICY0__POLICY_6_MASK 0x00003000L
10503#define TC_CFG_L2_STORE_POLICY0__POLICY_7_MASK 0x0000C000L
10504#define TC_CFG_L2_STORE_POLICY0__POLICY_8_MASK 0x00030000L
10505#define TC_CFG_L2_STORE_POLICY0__POLICY_9_MASK 0x000C0000L
10506#define TC_CFG_L2_STORE_POLICY0__POLICY_10_MASK 0x00300000L
10507#define TC_CFG_L2_STORE_POLICY0__POLICY_11_MASK 0x00C00000L
10508#define TC_CFG_L2_STORE_POLICY0__POLICY_12_MASK 0x03000000L
10509#define TC_CFG_L2_STORE_POLICY0__POLICY_13_MASK 0x0C000000L
10510#define TC_CFG_L2_STORE_POLICY0__POLICY_14_MASK 0x30000000L
10511#define TC_CFG_L2_STORE_POLICY0__POLICY_15_MASK 0xC0000000L
10512//TC_CFG_L2_STORE_POLICY1
10513#define TC_CFG_L2_STORE_POLICY1__POLICY_16__SHIFT 0x0
10514#define TC_CFG_L2_STORE_POLICY1__POLICY_17__SHIFT 0x2
10515#define TC_CFG_L2_STORE_POLICY1__POLICY_18__SHIFT 0x4
10516#define TC_CFG_L2_STORE_POLICY1__POLICY_19__SHIFT 0x6
10517#define TC_CFG_L2_STORE_POLICY1__POLICY_20__SHIFT 0x8
10518#define TC_CFG_L2_STORE_POLICY1__POLICY_21__SHIFT 0xa
10519#define TC_CFG_L2_STORE_POLICY1__POLICY_22__SHIFT 0xc
10520#define TC_CFG_L2_STORE_POLICY1__POLICY_23__SHIFT 0xe
10521#define TC_CFG_L2_STORE_POLICY1__POLICY_24__SHIFT 0x10
10522#define TC_CFG_L2_STORE_POLICY1__POLICY_25__SHIFT 0x12
10523#define TC_CFG_L2_STORE_POLICY1__POLICY_26__SHIFT 0x14
10524#define TC_CFG_L2_STORE_POLICY1__POLICY_27__SHIFT 0x16
10525#define TC_CFG_L2_STORE_POLICY1__POLICY_28__SHIFT 0x18
10526#define TC_CFG_L2_STORE_POLICY1__POLICY_29__SHIFT 0x1a
10527#define TC_CFG_L2_STORE_POLICY1__POLICY_30__SHIFT 0x1c
10528#define TC_CFG_L2_STORE_POLICY1__POLICY_31__SHIFT 0x1e
10529#define TC_CFG_L2_STORE_POLICY1__POLICY_16_MASK 0x00000003L
10530#define TC_CFG_L2_STORE_POLICY1__POLICY_17_MASK 0x0000000CL
10531#define TC_CFG_L2_STORE_POLICY1__POLICY_18_MASK 0x00000030L
10532#define TC_CFG_L2_STORE_POLICY1__POLICY_19_MASK 0x000000C0L
10533#define TC_CFG_L2_STORE_POLICY1__POLICY_20_MASK 0x00000300L
10534#define TC_CFG_L2_STORE_POLICY1__POLICY_21_MASK 0x00000C00L
10535#define TC_CFG_L2_STORE_POLICY1__POLICY_22_MASK 0x00003000L
10536#define TC_CFG_L2_STORE_POLICY1__POLICY_23_MASK 0x0000C000L
10537#define TC_CFG_L2_STORE_POLICY1__POLICY_24_MASK 0x00030000L
10538#define TC_CFG_L2_STORE_POLICY1__POLICY_25_MASK 0x000C0000L
10539#define TC_CFG_L2_STORE_POLICY1__POLICY_26_MASK 0x00300000L
10540#define TC_CFG_L2_STORE_POLICY1__POLICY_27_MASK 0x00C00000L
10541#define TC_CFG_L2_STORE_POLICY1__POLICY_28_MASK 0x03000000L
10542#define TC_CFG_L2_STORE_POLICY1__POLICY_29_MASK 0x0C000000L
10543#define TC_CFG_L2_STORE_POLICY1__POLICY_30_MASK 0x30000000L
10544#define TC_CFG_L2_STORE_POLICY1__POLICY_31_MASK 0xC0000000L
10545//TC_CFG_L2_ATOMIC_POLICY
10546#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0__SHIFT 0x0
10547#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1__SHIFT 0x2
10548#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2__SHIFT 0x4
10549#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3__SHIFT 0x6
10550#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4__SHIFT 0x8
10551#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5__SHIFT 0xa
10552#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6__SHIFT 0xc
10553#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7__SHIFT 0xe
10554#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8__SHIFT 0x10
10555#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9__SHIFT 0x12
10556#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10__SHIFT 0x14
10557#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11__SHIFT 0x16
10558#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12__SHIFT 0x18
10559#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13__SHIFT 0x1a
10560#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14__SHIFT 0x1c
10561#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15__SHIFT 0x1e
10562#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0_MASK 0x00000003L
10563#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1_MASK 0x0000000CL
10564#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2_MASK 0x00000030L
10565#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3_MASK 0x000000C0L
10566#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4_MASK 0x00000300L
10567#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5_MASK 0x00000C00L
10568#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6_MASK 0x00003000L
10569#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7_MASK 0x0000C000L
10570#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8_MASK 0x00030000L
10571#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9_MASK 0x000C0000L
10572#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10_MASK 0x00300000L
10573#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11_MASK 0x00C00000L
10574#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12_MASK 0x03000000L
10575#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13_MASK 0x0C000000L
10576#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14_MASK 0x30000000L
10577#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15_MASK 0xC0000000L
10578//TC_CFG_L1_VOLATILE
10579#define TC_CFG_L1_VOLATILE__VOL__SHIFT 0x0
10580#define TC_CFG_L1_VOLATILE__VOL_MASK 0x0000000FL
10581//TC_CFG_L2_VOLATILE
10582#define TC_CFG_L2_VOLATILE__VOL__SHIFT 0x0
10583#define TC_CFG_L2_VOLATILE__VOL_MASK 0x0000000FL
10584//TCI_STATUS
10585#define TCI_STATUS__TCI_BUSY__SHIFT 0x0
10586#define TCI_STATUS__TCI_BUSY_MASK 0x00000001L
10587//TCI_CNTL_1
10588#define TCI_CNTL_1__WBINVL1_NUM_CYCLES__SHIFT 0x0
10589#define TCI_CNTL_1__REQ_FIFO_DEPTH__SHIFT 0x10
10590#define TCI_CNTL_1__WDATA_RAM_DEPTH__SHIFT 0x18
10591#define TCI_CNTL_1__WBINVL1_NUM_CYCLES_MASK 0x0000FFFFL
10592#define TCI_CNTL_1__REQ_FIFO_DEPTH_MASK 0x00FF0000L
10593#define TCI_CNTL_1__WDATA_RAM_DEPTH_MASK 0xFF000000L
10594//TCI_CNTL_2
10595#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2__SHIFT 0x0
10596#define TCI_CNTL_2__TCA_MAX_CREDIT__SHIFT 0x1
10597#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2_MASK 0x00000001L
10598#define TCI_CNTL_2__TCA_MAX_CREDIT_MASK 0x000001FEL
10599//TCC_CTRL
10600#define TCC_CTRL__CACHE_SIZE__SHIFT 0x0
10601#define TCC_CTRL__RATE__SHIFT 0x2
10602#define TCC_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
10603#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE__SHIFT 0x8
10604#define TCC_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
10605#define TCC_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
10606#define TCC_CTRL__LINEAR_SET_HASH__SHIFT 0x15
10607#define TCC_CTRL__MDC_SIZE__SHIFT 0x18
10608#define TCC_CTRL__MDC_SECTOR_SIZE__SHIFT 0x1a
10609#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE__SHIFT 0x1c
10610#define TCC_CTRL__CACHE_SIZE_MASK 0x00000003L
10611#define TCC_CTRL__RATE_MASK 0x0000000CL
10612#define TCC_CTRL__WRITEBACK_MARGIN_MASK 0x000000F0L
10613#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE_MASK 0x00000F00L
10614#define TCC_CTRL__SRC_FIFO_SIZE_MASK 0x0000F000L
10615#define TCC_CTRL__LATENCY_FIFO_SIZE_MASK 0x000F0000L
10616#define TCC_CTRL__LINEAR_SET_HASH_MASK 0x00200000L
10617#define TCC_CTRL__MDC_SIZE_MASK 0x03000000L
10618#define TCC_CTRL__MDC_SECTOR_SIZE_MASK 0x0C000000L
10619#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE_MASK 0xF0000000L
10620//TCC_CTRL2
10621#define TCC_CTRL2__PROBE_FIFO_SIZE__SHIFT 0x0
10622#define TCC_CTRL2__PROBE_FIFO_SIZE_MASK 0x0000000FL
10623//TCC_EDC_CNT
10624#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0
10625#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2
10626#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4
10627#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6
10628#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8
10629#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa
10630#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc
10631#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe
10632#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10
10633#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12
10634#define TCC_EDC_CNT__IN_USE_DEC_SED_COUNT__SHIFT 0x14
10635#define TCC_EDC_CNT__IN_USE_TRANSFER_SED_COUNT__SHIFT 0x16
10636#define TCC_EDC_CNT__LATENCY_FIFO_SED_COUNT__SHIFT 0x18
10637#define TCC_EDC_CNT__RETURN_DATA_SED_COUNT__SHIFT 0x1a
10638#define TCC_EDC_CNT__RETURN_CONTROL_SED_COUNT__SHIFT 0x1c
10639#define TCC_EDC_CNT__UC_ATOMIC_FIFO_SED_COUNT__SHIFT 0x1e
10640#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L
10641#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL
10642#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L
10643#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L
10644#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L
10645#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L
10646#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L
10647#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L
10648#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L
10649#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L
10650#define TCC_EDC_CNT__IN_USE_DEC_SED_COUNT_MASK 0x00300000L
10651#define TCC_EDC_CNT__IN_USE_TRANSFER_SED_COUNT_MASK 0x00C00000L
10652#define TCC_EDC_CNT__LATENCY_FIFO_SED_COUNT_MASK 0x03000000L
10653#define TCC_EDC_CNT__RETURN_DATA_SED_COUNT_MASK 0x0C000000L
10654#define TCC_EDC_CNT__RETURN_CONTROL_SED_COUNT_MASK 0x30000000L
10655#define TCC_EDC_CNT__UC_ATOMIC_FIFO_SED_COUNT_MASK 0xC0000000L
10656//TCC_EDC_CNT2
10657#define TCC_EDC_CNT2__WRITE_RETURN_SED_COUNT__SHIFT 0x0
10658#define TCC_EDC_CNT2__WRITE_CACHE_READ_SED_COUNT__SHIFT 0x2
10659#define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x4
10660#define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x6
10661#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT__SHIFT 0x8
10662#define TCC_EDC_CNT2__WRITE_RETURN_SED_COUNT_MASK 0x00000003L
10663#define TCC_EDC_CNT2__WRITE_CACHE_READ_SED_COUNT_MASK 0x0000000CL
10664#define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT_MASK 0x00000030L
10665#define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT_MASK 0x000000C0L
10666#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT_MASK 0x00000300L
10667//TCC_REDUNDANCY
10668#define TCC_REDUNDANCY__MC_SEL0__SHIFT 0x0
10669#define TCC_REDUNDANCY__MC_SEL1__SHIFT 0x1
10670#define TCC_REDUNDANCY__MC_SEL0_MASK 0x00000001L
10671#define TCC_REDUNDANCY__MC_SEL1_MASK 0x00000002L
10672//TCC_EXE_DISABLE
10673#define TCC_EXE_DISABLE__EXE_DISABLE__SHIFT 0x1
10674#define TCC_EXE_DISABLE__EXE_DISABLE_MASK 0x00000002L
10675//TCC_DSM_CNTL
10676#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL__SHIFT 0x0
10677#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
10678#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL__SHIFT 0x3
10679#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
10680#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL__SHIFT 0x6
10681#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
10682#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL__SHIFT 0x9
10683#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
10684#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL__SHIFT 0xc
10685#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
10686#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL__SHIFT 0xf
10687#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
10688#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x12
10689#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
10690#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x15
10691#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
10692#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL__SHIFT 0x18
10693#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
10694#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL__SHIFT 0x1b
10695#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
10696#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL_MASK 0x00000003L
10697#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
10698#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL_MASK 0x00000018L
10699#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
10700#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL_MASK 0x000000C0L
10701#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
10702#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL_MASK 0x00000600L
10703#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
10704#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL_MASK 0x00003000L
10705#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
10706#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL_MASK 0x00018000L
10707#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
10708#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x000C0000L
10709#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
10710#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x00600000L
10711#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
10712#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL_MASK 0x03000000L
10713#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
10714#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL_MASK 0x18000000L
10715#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
10716//TCC_DSM_CNTLA
10717#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x0
10718#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
10719#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x3
10720#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
10721#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL__SHIFT 0x6
10722#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
10723#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL__SHIFT 0x9
10724#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
10725#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL__SHIFT 0xc
10726#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
10727#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL__SHIFT 0xf
10728#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
10729#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x12
10730#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
10731#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x15
10732#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
10733#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL__SHIFT 0x18
10734#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
10735#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL__SHIFT 0x1b
10736#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
10737#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000003L
10738#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
10739#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000018L
10740#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
10741#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL_MASK 0x000000C0L
10742#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
10743#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL_MASK 0x00000600L
10744#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
10745#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL_MASK 0x00003000L
10746#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
10747#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL_MASK 0x00018000L
10748#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
10749#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL_MASK 0x000C0000L
10750#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
10751#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL_MASK 0x00600000L
10752#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
10753#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL_MASK 0x03000000L
10754#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
10755#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL_MASK 0x18000000L
10756#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
10757//TCC_DSM_CNTL2
10758#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT__SHIFT 0x0
10759#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY__SHIFT 0x2
10760#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT__SHIFT 0x3
10761#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY__SHIFT 0x5
10762#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT__SHIFT 0x6
10763#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY__SHIFT 0x8
10764#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT__SHIFT 0x9
10765#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY__SHIFT 0xb
10766#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT__SHIFT 0xc
10767#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY__SHIFT 0xe
10768#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT__SHIFT 0xf
10769#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY__SHIFT 0x11
10770#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x12
10771#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x14
10772#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x15
10773#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x17
10774#define TCC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
10775#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT_MASK 0x00000003L
10776#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY_MASK 0x00000004L
10777#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT_MASK 0x00000018L
10778#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY_MASK 0x00000020L
10779#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT_MASK 0x000000C0L
10780#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY_MASK 0x00000100L
10781#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT_MASK 0x00000600L
10782#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY_MASK 0x00000800L
10783#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT_MASK 0x00003000L
10784#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY_MASK 0x00004000L
10785#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT_MASK 0x00018000L
10786#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY_MASK 0x00020000L
10787#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x000C0000L
10788#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00100000L
10789#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x00600000L
10790#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00800000L
10791#define TCC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
10792//TCC_DSM_CNTL2A
10793#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT__SHIFT 0x0
10794#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY__SHIFT 0x2
10795#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT__SHIFT 0x3
10796#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY__SHIFT 0x5
10797#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT__SHIFT 0x6
10798#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY__SHIFT 0x8
10799#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT__SHIFT 0x9
10800#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY__SHIFT 0xb
10801#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
10802#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY__SHIFT 0xe
10803#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT__SHIFT 0xf
10804#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY__SHIFT 0x11
10805#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT__SHIFT 0x12
10806#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY__SHIFT 0x14
10807#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x15
10808#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY__SHIFT 0x17
10809#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
10810#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
10811#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x1b
10812#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY__SHIFT 0x1d
10813#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT_MASK 0x00000003L
10814#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY_MASK 0x00000004L
10815#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT_MASK 0x00000018L
10816#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY_MASK 0x00000020L
10817#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT_MASK 0x000000C0L
10818#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY_MASK 0x00000100L
10819#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT_MASK 0x00000600L
10820#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY_MASK 0x00000800L
10821#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
10822#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
10823#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT_MASK 0x00018000L
10824#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY_MASK 0x00020000L
10825#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT_MASK 0x000C0000L
10826#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY_MASK 0x00100000L
10827#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00600000L
10828#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY_MASK 0x00800000L
10829#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
10830#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
10831#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT_MASK 0x18000000L
10832#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY_MASK 0x20000000L
10833//TCC_DSM_CNTL2B
10834#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
10835#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY__SHIFT 0x2
10836#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x3
10837#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY__SHIFT 0x5
10838#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
10839#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
10840#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT_MASK 0x00000018L
10841#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY_MASK 0x00000020L
10842//TCC_WBINVL2
10843#define TCC_WBINVL2__DONE__SHIFT 0x4
10844#define TCC_WBINVL2__DONE_MASK 0x00000010L
10845//TCC_SOFT_RESET
10846#define TCC_SOFT_RESET__HALT_FOR_RESET__SHIFT 0x0
10847#define TCC_SOFT_RESET__HALT_FOR_RESET_MASK 0x00000001L
10848//TCA_CTRL
10849#define TCA_CTRL__HOLE_TIMEOUT__SHIFT 0x0
10850#define TCA_CTRL__RB_STILL_4_PHASE__SHIFT 0x4
10851#define TCA_CTRL__RB_AS_TCI__SHIFT 0x5
10852#define TCA_CTRL__DISABLE_UTCL2_PRIORITY__SHIFT 0x6
10853#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER__SHIFT 0x7
10854#define TCA_CTRL__HOLE_TIMEOUT_MASK 0x0000000FL
10855#define TCA_CTRL__RB_STILL_4_PHASE_MASK 0x00000010L
10856#define TCA_CTRL__RB_AS_TCI_MASK 0x00000020L
10857#define TCA_CTRL__DISABLE_UTCL2_PRIORITY_MASK 0x00000040L
10858#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER_MASK 0x00000080L
10859//TCA_BURST_MASK
10860#define TCA_BURST_MASK__ADDR_MASK__SHIFT 0x0
10861#define TCA_BURST_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
10862//TCA_BURST_CTRL
10863#define TCA_BURST_CTRL__MAX_BURST__SHIFT 0x0
10864#define TCA_BURST_CTRL__RB_DISABLE__SHIFT 0x3
10865#define TCA_BURST_CTRL__TCP_DISABLE__SHIFT 0x4
10866#define TCA_BURST_CTRL__SQC_DISABLE__SHIFT 0x5
10867#define TCA_BURST_CTRL__CPF_DISABLE__SHIFT 0x6
10868#define TCA_BURST_CTRL__CPG_DISABLE__SHIFT 0x7
10869#define TCA_BURST_CTRL__IA_DISABLE__SHIFT 0x8
10870#define TCA_BURST_CTRL__WD_DISABLE__SHIFT 0x9
10871#define TCA_BURST_CTRL__SQG_DISABLE__SHIFT 0xa
10872#define TCA_BURST_CTRL__UTCL2_DISABLE__SHIFT 0xb
10873#define TCA_BURST_CTRL__TPI_DISABLE__SHIFT 0xc
10874#define TCA_BURST_CTRL__RLC_DISABLE__SHIFT 0xd
10875#define TCA_BURST_CTRL__PA_DISABLE__SHIFT 0xe
10876#define TCA_BURST_CTRL__MAX_BURST_MASK 0x00000007L
10877#define TCA_BURST_CTRL__RB_DISABLE_MASK 0x00000008L
10878#define TCA_BURST_CTRL__TCP_DISABLE_MASK 0x00000010L
10879#define TCA_BURST_CTRL__SQC_DISABLE_MASK 0x00000020L
10880#define TCA_BURST_CTRL__CPF_DISABLE_MASK 0x00000040L
10881#define TCA_BURST_CTRL__CPG_DISABLE_MASK 0x00000080L
10882#define TCA_BURST_CTRL__IA_DISABLE_MASK 0x00000100L
10883#define TCA_BURST_CTRL__WD_DISABLE_MASK 0x00000200L
10884#define TCA_BURST_CTRL__SQG_DISABLE_MASK 0x00000400L
10885#define TCA_BURST_CTRL__UTCL2_DISABLE_MASK 0x00000800L
10886#define TCA_BURST_CTRL__TPI_DISABLE_MASK 0x00001000L
10887#define TCA_BURST_CTRL__RLC_DISABLE_MASK 0x00002000L
10888#define TCA_BURST_CTRL__PA_DISABLE_MASK 0x00004000L
10889//TCA_DSM_CNTL
10890#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x0
10891#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
10892#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x3
10893#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
10894#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000003L
10895#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
10896#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000018L
10897#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
10898//TCA_DSM_CNTL2
10899#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x0
10900#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x2
10901#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x3
10902#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x5
10903#define TCA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
10904#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000003L
10905#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000004L
10906#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000018L
10907#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000020L
10908#define TCA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
10909//TCA_EDC_CNT
10910#define TCA_EDC_CNT__HOLE_FIFO_SED_COUNT__SHIFT 0x0
10911#define TCA_EDC_CNT__REQ_FIFO_SED_COUNT__SHIFT 0x2
10912#define TCA_EDC_CNT__HOLE_FIFO_SED_COUNT_MASK 0x00000003L
10913#define TCA_EDC_CNT__REQ_FIFO_SED_COUNT_MASK 0x0000000CL
10914
10915
10916// addressBlock: gc_shdec
10917//SPI_SHADER_PGM_RSRC3_PS
10918#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
10919#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
10920#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD__SHIFT 0x16
10921#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE__SHIFT 0x1a
10922#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0x0000FFFFL
10923#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x003F0000L
10924#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
10925#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE_MASK 0x3C000000L
10926//SPI_SHADER_PGM_LO_PS
10927#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
10928#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xFFFFFFFFL
10929//SPI_SHADER_PGM_HI_PS
10930#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
10931#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xFFL
10932//SPI_SHADER_PGM_RSRC1_PS
10933#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
10934#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
10935#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
10936#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
10937#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
10938#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
10939#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
10940#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
10941#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL__SHIFT 0x1d
10942#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003FL
10943#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003C0L
10944#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000C00L
10945#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000FF000L
10946#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
10947#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
10948#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
10949#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
10950#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL_MASK 0x20000000L
10951//SPI_SHADER_PGM_RSRC2_PS
10952#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
10953#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
10954#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
10955#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
10956#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
10957#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
10958#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID__SHIFT 0x19
10959#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION__SHIFT 0x1a
10960#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0__SHIFT 0x1b
10961#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB__SHIFT 0x1c
10962#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
10963#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003EL
10964#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
10965#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
10966#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000FF00L
10967#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x01FF0000L
10968#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID_MASK 0x02000000L
10969#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION_MASK 0x04000000L
10970#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0_MASK 0x08000000L
10971#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB_MASK 0x10000000L
10972//SPI_SHADER_USER_DATA_PS_0
10973#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
10974#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xFFFFFFFFL
10975//SPI_SHADER_USER_DATA_PS_1
10976#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
10977#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xFFFFFFFFL
10978//SPI_SHADER_USER_DATA_PS_2
10979#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
10980#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xFFFFFFFFL
10981//SPI_SHADER_USER_DATA_PS_3
10982#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
10983#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xFFFFFFFFL
10984//SPI_SHADER_USER_DATA_PS_4
10985#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
10986#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xFFFFFFFFL
10987//SPI_SHADER_USER_DATA_PS_5
10988#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
10989#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xFFFFFFFFL
10990//SPI_SHADER_USER_DATA_PS_6
10991#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
10992#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xFFFFFFFFL
10993//SPI_SHADER_USER_DATA_PS_7
10994#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
10995#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xFFFFFFFFL
10996//SPI_SHADER_USER_DATA_PS_8
10997#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
10998#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xFFFFFFFFL
10999//SPI_SHADER_USER_DATA_PS_9
11000#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
11001#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xFFFFFFFFL
11002//SPI_SHADER_USER_DATA_PS_10
11003#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
11004#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xFFFFFFFFL
11005//SPI_SHADER_USER_DATA_PS_11
11006#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
11007#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xFFFFFFFFL
11008//SPI_SHADER_USER_DATA_PS_12
11009#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
11010#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xFFFFFFFFL
11011//SPI_SHADER_USER_DATA_PS_13
11012#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
11013#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xFFFFFFFFL
11014//SPI_SHADER_USER_DATA_PS_14
11015#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
11016#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xFFFFFFFFL
11017//SPI_SHADER_USER_DATA_PS_15
11018#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
11019#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xFFFFFFFFL
11020//SPI_SHADER_USER_DATA_PS_16
11021#define SPI_SHADER_USER_DATA_PS_16__DATA__SHIFT 0x0
11022#define SPI_SHADER_USER_DATA_PS_16__DATA_MASK 0xFFFFFFFFL
11023//SPI_SHADER_USER_DATA_PS_17
11024#define SPI_SHADER_USER_DATA_PS_17__DATA__SHIFT 0x0
11025#define SPI_SHADER_USER_DATA_PS_17__DATA_MASK 0xFFFFFFFFL
11026//SPI_SHADER_USER_DATA_PS_18
11027#define SPI_SHADER_USER_DATA_PS_18__DATA__SHIFT 0x0
11028#define SPI_SHADER_USER_DATA_PS_18__DATA_MASK 0xFFFFFFFFL
11029//SPI_SHADER_USER_DATA_PS_19
11030#define SPI_SHADER_USER_DATA_PS_19__DATA__SHIFT 0x0
11031#define SPI_SHADER_USER_DATA_PS_19__DATA_MASK 0xFFFFFFFFL
11032//SPI_SHADER_USER_DATA_PS_20
11033#define SPI_SHADER_USER_DATA_PS_20__DATA__SHIFT 0x0
11034#define SPI_SHADER_USER_DATA_PS_20__DATA_MASK 0xFFFFFFFFL
11035//SPI_SHADER_USER_DATA_PS_21
11036#define SPI_SHADER_USER_DATA_PS_21__DATA__SHIFT 0x0
11037#define SPI_SHADER_USER_DATA_PS_21__DATA_MASK 0xFFFFFFFFL
11038//SPI_SHADER_USER_DATA_PS_22
11039#define SPI_SHADER_USER_DATA_PS_22__DATA__SHIFT 0x0
11040#define SPI_SHADER_USER_DATA_PS_22__DATA_MASK 0xFFFFFFFFL
11041//SPI_SHADER_USER_DATA_PS_23
11042#define SPI_SHADER_USER_DATA_PS_23__DATA__SHIFT 0x0
11043#define SPI_SHADER_USER_DATA_PS_23__DATA_MASK 0xFFFFFFFFL
11044//SPI_SHADER_USER_DATA_PS_24
11045#define SPI_SHADER_USER_DATA_PS_24__DATA__SHIFT 0x0
11046#define SPI_SHADER_USER_DATA_PS_24__DATA_MASK 0xFFFFFFFFL
11047//SPI_SHADER_USER_DATA_PS_25
11048#define SPI_SHADER_USER_DATA_PS_25__DATA__SHIFT 0x0
11049#define SPI_SHADER_USER_DATA_PS_25__DATA_MASK 0xFFFFFFFFL
11050//SPI_SHADER_USER_DATA_PS_26
11051#define SPI_SHADER_USER_DATA_PS_26__DATA__SHIFT 0x0
11052#define SPI_SHADER_USER_DATA_PS_26__DATA_MASK 0xFFFFFFFFL
11053//SPI_SHADER_USER_DATA_PS_27
11054#define SPI_SHADER_USER_DATA_PS_27__DATA__SHIFT 0x0
11055#define SPI_SHADER_USER_DATA_PS_27__DATA_MASK 0xFFFFFFFFL
11056//SPI_SHADER_USER_DATA_PS_28
11057#define SPI_SHADER_USER_DATA_PS_28__DATA__SHIFT 0x0
11058#define SPI_SHADER_USER_DATA_PS_28__DATA_MASK 0xFFFFFFFFL
11059//SPI_SHADER_USER_DATA_PS_29
11060#define SPI_SHADER_USER_DATA_PS_29__DATA__SHIFT 0x0
11061#define SPI_SHADER_USER_DATA_PS_29__DATA_MASK 0xFFFFFFFFL
11062//SPI_SHADER_USER_DATA_PS_30
11063#define SPI_SHADER_USER_DATA_PS_30__DATA__SHIFT 0x0
11064#define SPI_SHADER_USER_DATA_PS_30__DATA_MASK 0xFFFFFFFFL
11065//SPI_SHADER_USER_DATA_PS_31
11066#define SPI_SHADER_USER_DATA_PS_31__DATA__SHIFT 0x0
11067#define SPI_SHADER_USER_DATA_PS_31__DATA_MASK 0xFFFFFFFFL
11068//SPI_SHADER_PGM_RSRC3_VS
11069#define SPI_SHADER_PGM_RSRC3_VS__CU_EN__SHIFT 0x0
11070#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT__SHIFT 0x10
11071#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD__SHIFT 0x16
11072#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE__SHIFT 0x1a
11073#define SPI_SHADER_PGM_RSRC3_VS__CU_EN_MASK 0x0000FFFFL
11074#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT_MASK 0x003F0000L
11075#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
11076#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE_MASK 0x3C000000L
11077//SPI_SHADER_LATE_ALLOC_VS
11078#define SPI_SHADER_LATE_ALLOC_VS__LIMIT__SHIFT 0x0
11079#define SPI_SHADER_LATE_ALLOC_VS__LIMIT_MASK 0x0000003FL
11080//SPI_SHADER_PGM_LO_VS
11081#define SPI_SHADER_PGM_LO_VS__MEM_BASE__SHIFT 0x0
11082#define SPI_SHADER_PGM_LO_VS__MEM_BASE_MASK 0xFFFFFFFFL
11083//SPI_SHADER_PGM_HI_VS
11084#define SPI_SHADER_PGM_HI_VS__MEM_BASE__SHIFT 0x0
11085#define SPI_SHADER_PGM_HI_VS__MEM_BASE_MASK 0xFFL
11086//SPI_SHADER_PGM_RSRC1_VS
11087#define SPI_SHADER_PGM_RSRC1_VS__VGPRS__SHIFT 0x0
11088#define SPI_SHADER_PGM_RSRC1_VS__SGPRS__SHIFT 0x6
11089#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY__SHIFT 0xa
11090#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE__SHIFT 0xc
11091#define SPI_SHADER_PGM_RSRC1_VS__PRIV__SHIFT 0x14
11092#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP__SHIFT 0x15
11093#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE__SHIFT 0x17
11094#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT__SHIFT 0x18
11095#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE__SHIFT 0x1a
11096#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL__SHIFT 0x1f
11097#define SPI_SHADER_PGM_RSRC1_VS__VGPRS_MASK 0x0000003FL
11098#define SPI_SHADER_PGM_RSRC1_VS__SGPRS_MASK 0x000003C0L
11099#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY_MASK 0x00000C00L
11100#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE_MASK 0x000FF000L
11101#define SPI_SHADER_PGM_RSRC1_VS__PRIV_MASK 0x00100000L
11102#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP_MASK 0x00200000L
11103#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE_MASK 0x00800000L
11104#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT_MASK 0x03000000L
11105#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE_MASK 0x04000000L
11106#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL_MASK 0x80000000L
11107//SPI_SHADER_PGM_RSRC2_VS
11108#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN__SHIFT 0x0
11109#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR__SHIFT 0x1
11110#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT__SHIFT 0x6
11111#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN__SHIFT 0x7
11112#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN__SHIFT 0x8
11113#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN__SHIFT 0x9
11114#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN__SHIFT 0xa
11115#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN__SHIFT 0xb
11116#define SPI_SHADER_PGM_RSRC2_VS__SO_EN__SHIFT 0xc
11117#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN__SHIFT 0xd
11118#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN__SHIFT 0x16
11119#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN__SHIFT 0x18
11120#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0__SHIFT 0x1b
11121#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB__SHIFT 0x1c
11122#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN_MASK 0x00000001L
11123#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MASK 0x0000003EL
11124#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT_MASK 0x00000040L
11125#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN_MASK 0x00000080L
11126#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN_MASK 0x00000100L
11127#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN_MASK 0x00000200L
11128#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN_MASK 0x00000400L
11129#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN_MASK 0x00000800L
11130#define SPI_SHADER_PGM_RSRC2_VS__SO_EN_MASK 0x00001000L
11131#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN_MASK 0x003FE000L
11132#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN_MASK 0x00400000L
11133#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN_MASK 0x01000000L
11134#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0_MASK 0x08000000L
11135#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB_MASK 0x10000000L
11136//SPI_SHADER_USER_DATA_VS_0
11137#define SPI_SHADER_USER_DATA_VS_0__DATA__SHIFT 0x0
11138#define SPI_SHADER_USER_DATA_VS_0__DATA_MASK 0xFFFFFFFFL
11139//SPI_SHADER_USER_DATA_VS_1
11140#define SPI_SHADER_USER_DATA_VS_1__DATA__SHIFT 0x0
11141#define SPI_SHADER_USER_DATA_VS_1__DATA_MASK 0xFFFFFFFFL
11142//SPI_SHADER_USER_DATA_VS_2
11143#define SPI_SHADER_USER_DATA_VS_2__DATA__SHIFT 0x0
11144#define SPI_SHADER_USER_DATA_VS_2__DATA_MASK 0xFFFFFFFFL
11145//SPI_SHADER_USER_DATA_VS_3
11146#define SPI_SHADER_USER_DATA_VS_3__DATA__SHIFT 0x0
11147#define SPI_SHADER_USER_DATA_VS_3__DATA_MASK 0xFFFFFFFFL
11148//SPI_SHADER_USER_DATA_VS_4
11149#define SPI_SHADER_USER_DATA_VS_4__DATA__SHIFT 0x0
11150#define SPI_SHADER_USER_DATA_VS_4__DATA_MASK 0xFFFFFFFFL
11151//SPI_SHADER_USER_DATA_VS_5
11152#define SPI_SHADER_USER_DATA_VS_5__DATA__SHIFT 0x0
11153#define SPI_SHADER_USER_DATA_VS_5__DATA_MASK 0xFFFFFFFFL
11154//SPI_SHADER_USER_DATA_VS_6
11155#define SPI_SHADER_USER_DATA_VS_6__DATA__SHIFT 0x0
11156#define SPI_SHADER_USER_DATA_VS_6__DATA_MASK 0xFFFFFFFFL
11157//SPI_SHADER_USER_DATA_VS_7
11158#define SPI_SHADER_USER_DATA_VS_7__DATA__SHIFT 0x0
11159#define SPI_SHADER_USER_DATA_VS_7__DATA_MASK 0xFFFFFFFFL
11160//SPI_SHADER_USER_DATA_VS_8
11161#define SPI_SHADER_USER_DATA_VS_8__DATA__SHIFT 0x0
11162#define SPI_SHADER_USER_DATA_VS_8__DATA_MASK 0xFFFFFFFFL
11163//SPI_SHADER_USER_DATA_VS_9
11164#define SPI_SHADER_USER_DATA_VS_9__DATA__SHIFT 0x0
11165#define SPI_SHADER_USER_DATA_VS_9__DATA_MASK 0xFFFFFFFFL
11166//SPI_SHADER_USER_DATA_VS_10
11167#define SPI_SHADER_USER_DATA_VS_10__DATA__SHIFT 0x0
11168#define SPI_SHADER_USER_DATA_VS_10__DATA_MASK 0xFFFFFFFFL
11169//SPI_SHADER_USER_DATA_VS_11
11170#define SPI_SHADER_USER_DATA_VS_11__DATA__SHIFT 0x0
11171#define SPI_SHADER_USER_DATA_VS_11__DATA_MASK 0xFFFFFFFFL
11172//SPI_SHADER_USER_DATA_VS_12
11173#define SPI_SHADER_USER_DATA_VS_12__DATA__SHIFT 0x0
11174#define SPI_SHADER_USER_DATA_VS_12__DATA_MASK 0xFFFFFFFFL
11175//SPI_SHADER_USER_DATA_VS_13
11176#define SPI_SHADER_USER_DATA_VS_13__DATA__SHIFT 0x0
11177#define SPI_SHADER_USER_DATA_VS_13__DATA_MASK 0xFFFFFFFFL
11178//SPI_SHADER_USER_DATA_VS_14
11179#define SPI_SHADER_USER_DATA_VS_14__DATA__SHIFT 0x0
11180#define SPI_SHADER_USER_DATA_VS_14__DATA_MASK 0xFFFFFFFFL
11181//SPI_SHADER_USER_DATA_VS_15
11182#define SPI_SHADER_USER_DATA_VS_15__DATA__SHIFT 0x0
11183#define SPI_SHADER_USER_DATA_VS_15__DATA_MASK 0xFFFFFFFFL
11184//SPI_SHADER_USER_DATA_VS_16
11185#define SPI_SHADER_USER_DATA_VS_16__DATA__SHIFT 0x0
11186#define SPI_SHADER_USER_DATA_VS_16__DATA_MASK 0xFFFFFFFFL
11187//SPI_SHADER_USER_DATA_VS_17
11188#define SPI_SHADER_USER_DATA_VS_17__DATA__SHIFT 0x0
11189#define SPI_SHADER_USER_DATA_VS_17__DATA_MASK 0xFFFFFFFFL
11190//SPI_SHADER_USER_DATA_VS_18
11191#define SPI_SHADER_USER_DATA_VS_18__DATA__SHIFT 0x0
11192#define SPI_SHADER_USER_DATA_VS_18__DATA_MASK 0xFFFFFFFFL
11193//SPI_SHADER_USER_DATA_VS_19
11194#define SPI_SHADER_USER_DATA_VS_19__DATA__SHIFT 0x0
11195#define SPI_SHADER_USER_DATA_VS_19__DATA_MASK 0xFFFFFFFFL
11196//SPI_SHADER_USER_DATA_VS_20
11197#define SPI_SHADER_USER_DATA_VS_20__DATA__SHIFT 0x0
11198#define SPI_SHADER_USER_DATA_VS_20__DATA_MASK 0xFFFFFFFFL
11199//SPI_SHADER_USER_DATA_VS_21
11200#define SPI_SHADER_USER_DATA_VS_21__DATA__SHIFT 0x0
11201#define SPI_SHADER_USER_DATA_VS_21__DATA_MASK 0xFFFFFFFFL
11202//SPI_SHADER_USER_DATA_VS_22
11203#define SPI_SHADER_USER_DATA_VS_22__DATA__SHIFT 0x0
11204#define SPI_SHADER_USER_DATA_VS_22__DATA_MASK 0xFFFFFFFFL
11205//SPI_SHADER_USER_DATA_VS_23
11206#define SPI_SHADER_USER_DATA_VS_23__DATA__SHIFT 0x0
11207#define SPI_SHADER_USER_DATA_VS_23__DATA_MASK 0xFFFFFFFFL
11208//SPI_SHADER_USER_DATA_VS_24
11209#define SPI_SHADER_USER_DATA_VS_24__DATA__SHIFT 0x0
11210#define SPI_SHADER_USER_DATA_VS_24__DATA_MASK 0xFFFFFFFFL
11211//SPI_SHADER_USER_DATA_VS_25
11212#define SPI_SHADER_USER_DATA_VS_25__DATA__SHIFT 0x0
11213#define SPI_SHADER_USER_DATA_VS_25__DATA_MASK 0xFFFFFFFFL
11214//SPI_SHADER_USER_DATA_VS_26
11215#define SPI_SHADER_USER_DATA_VS_26__DATA__SHIFT 0x0
11216#define SPI_SHADER_USER_DATA_VS_26__DATA_MASK 0xFFFFFFFFL
11217//SPI_SHADER_USER_DATA_VS_27
11218#define SPI_SHADER_USER_DATA_VS_27__DATA__SHIFT 0x0
11219#define SPI_SHADER_USER_DATA_VS_27__DATA_MASK 0xFFFFFFFFL
11220//SPI_SHADER_USER_DATA_VS_28
11221#define SPI_SHADER_USER_DATA_VS_28__DATA__SHIFT 0x0
11222#define SPI_SHADER_USER_DATA_VS_28__DATA_MASK 0xFFFFFFFFL
11223//SPI_SHADER_USER_DATA_VS_29
11224#define SPI_SHADER_USER_DATA_VS_29__DATA__SHIFT 0x0
11225#define SPI_SHADER_USER_DATA_VS_29__DATA_MASK 0xFFFFFFFFL
11226//SPI_SHADER_USER_DATA_VS_30
11227#define SPI_SHADER_USER_DATA_VS_30__DATA__SHIFT 0x0
11228#define SPI_SHADER_USER_DATA_VS_30__DATA_MASK 0xFFFFFFFFL
11229//SPI_SHADER_USER_DATA_VS_31
11230#define SPI_SHADER_USER_DATA_VS_31__DATA__SHIFT 0x0
11231#define SPI_SHADER_USER_DATA_VS_31__DATA_MASK 0xFFFFFFFFL
11232//SPI_SHADER_PGM_RSRC2_GS_VS
11233#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN__SHIFT 0x0
11234#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR__SHIFT 0x1
11235#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT__SHIFT 0x6
11236#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN__SHIFT 0x7
11237#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT__SHIFT 0x10
11238#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN__SHIFT 0x12
11239#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE__SHIFT 0x13
11240#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0__SHIFT 0x1b
11241#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB__SHIFT 0x1c
11242#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN_MASK 0x00000001L
11243#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MASK 0x0000003EL
11244#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT_MASK 0x00000040L
11245#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN_MASK 0x0000FF80L
11246#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT_MASK 0x00030000L
11247#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN_MASK 0x00040000L
11248#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE_MASK 0x07F80000L
11249#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0_MASK 0x08000000L
11250#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB_MASK 0x10000000L
11251//SPI_SHADER_PGM_RSRC4_GS
11252#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH__SHIFT 0x0
11253#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS__SHIFT 0x7
11254#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
11255#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS_MASK 0x00003F80L
11256//SPI_SHADER_USER_DATA_ADDR_LO_GS
11257#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE__SHIFT 0x0
11258#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
11259//SPI_SHADER_USER_DATA_ADDR_HI_GS
11260#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE__SHIFT 0x0
11261#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
11262//SPI_SHADER_PGM_LO_ES
11263#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
11264#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xFFFFFFFFL
11265//SPI_SHADER_PGM_HI_ES
11266#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
11267#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xFFL
11268//SPI_SHADER_PGM_RSRC3_GS
11269#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
11270#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
11271#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
11272#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE__SHIFT 0x1a
11273#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0x0000FFFFL
11274#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x003F0000L
11275#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
11276#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE_MASK 0x3C000000L
11277//SPI_SHADER_PGM_LO_GS
11278#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
11279#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
11280//SPI_SHADER_PGM_HI_GS
11281#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
11282#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xFFL
11283//SPI_SHADER_PGM_RSRC1_GS
11284#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
11285#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
11286#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
11287#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
11288#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
11289#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
11290#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
11291#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
11292#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT__SHIFT 0x1d
11293#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL__SHIFT 0x1f
11294#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003FL
11295#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003C0L
11296#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000C00L
11297#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000FF000L
11298#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
11299#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
11300#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
11301#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
11302#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT_MASK 0x60000000L
11303#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL_MASK 0x80000000L
11304//SPI_SHADER_PGM_RSRC2_GS
11305#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
11306#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
11307#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
11308#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
11309#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT__SHIFT 0x10
11310#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN__SHIFT 0x12
11311#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE__SHIFT 0x13
11312#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0__SHIFT 0x1b
11313#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB__SHIFT 0x1c
11314#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
11315#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003EL
11316#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
11317#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x0000FF80L
11318#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT_MASK 0x00030000L
11319#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN_MASK 0x00040000L
11320#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE_MASK 0x07F80000L
11321#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0_MASK 0x08000000L
11322#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB_MASK 0x10000000L
11323//SPI_SHADER_USER_DATA_ES_0
11324#define SPI_SHADER_USER_DATA_ES_0__DATA__SHIFT 0x0
11325#define SPI_SHADER_USER_DATA_ES_0__DATA_MASK 0xFFFFFFFFL
11326//SPI_SHADER_USER_DATA_ES_1
11327#define SPI_SHADER_USER_DATA_ES_1__DATA__SHIFT 0x0
11328#define SPI_SHADER_USER_DATA_ES_1__DATA_MASK 0xFFFFFFFFL
11329//SPI_SHADER_USER_DATA_ES_2
11330#define SPI_SHADER_USER_DATA_ES_2__DATA__SHIFT 0x0
11331#define SPI_SHADER_USER_DATA_ES_2__DATA_MASK 0xFFFFFFFFL
11332//SPI_SHADER_USER_DATA_ES_3
11333#define SPI_SHADER_USER_DATA_ES_3__DATA__SHIFT 0x0
11334#define SPI_SHADER_USER_DATA_ES_3__DATA_MASK 0xFFFFFFFFL
11335//SPI_SHADER_USER_DATA_ES_4
11336#define SPI_SHADER_USER_DATA_ES_4__DATA__SHIFT 0x0
11337#define SPI_SHADER_USER_DATA_ES_4__DATA_MASK 0xFFFFFFFFL
11338//SPI_SHADER_USER_DATA_ES_5
11339#define SPI_SHADER_USER_DATA_ES_5__DATA__SHIFT 0x0
11340#define SPI_SHADER_USER_DATA_ES_5__DATA_MASK 0xFFFFFFFFL
11341//SPI_SHADER_USER_DATA_ES_6
11342#define SPI_SHADER_USER_DATA_ES_6__DATA__SHIFT 0x0
11343#define SPI_SHADER_USER_DATA_ES_6__DATA_MASK 0xFFFFFFFFL
11344//SPI_SHADER_USER_DATA_ES_7
11345#define SPI_SHADER_USER_DATA_ES_7__DATA__SHIFT 0x0
11346#define SPI_SHADER_USER_DATA_ES_7__DATA_MASK 0xFFFFFFFFL
11347//SPI_SHADER_USER_DATA_ES_8
11348#define SPI_SHADER_USER_DATA_ES_8__DATA__SHIFT 0x0
11349#define SPI_SHADER_USER_DATA_ES_8__DATA_MASK 0xFFFFFFFFL
11350//SPI_SHADER_USER_DATA_ES_9
11351#define SPI_SHADER_USER_DATA_ES_9__DATA__SHIFT 0x0
11352#define SPI_SHADER_USER_DATA_ES_9__DATA_MASK 0xFFFFFFFFL
11353//SPI_SHADER_USER_DATA_ES_10
11354#define SPI_SHADER_USER_DATA_ES_10__DATA__SHIFT 0x0
11355#define SPI_SHADER_USER_DATA_ES_10__DATA_MASK 0xFFFFFFFFL
11356//SPI_SHADER_USER_DATA_ES_11
11357#define SPI_SHADER_USER_DATA_ES_11__DATA__SHIFT 0x0
11358#define SPI_SHADER_USER_DATA_ES_11__DATA_MASK 0xFFFFFFFFL
11359//SPI_SHADER_USER_DATA_ES_12
11360#define SPI_SHADER_USER_DATA_ES_12__DATA__SHIFT 0x0
11361#define SPI_SHADER_USER_DATA_ES_12__DATA_MASK 0xFFFFFFFFL
11362//SPI_SHADER_USER_DATA_ES_13
11363#define SPI_SHADER_USER_DATA_ES_13__DATA__SHIFT 0x0
11364#define SPI_SHADER_USER_DATA_ES_13__DATA_MASK 0xFFFFFFFFL
11365//SPI_SHADER_USER_DATA_ES_14
11366#define SPI_SHADER_USER_DATA_ES_14__DATA__SHIFT 0x0
11367#define SPI_SHADER_USER_DATA_ES_14__DATA_MASK 0xFFFFFFFFL
11368//SPI_SHADER_USER_DATA_ES_15
11369#define SPI_SHADER_USER_DATA_ES_15__DATA__SHIFT 0x0
11370#define SPI_SHADER_USER_DATA_ES_15__DATA_MASK 0xFFFFFFFFL
11371//SPI_SHADER_USER_DATA_ES_16
11372#define SPI_SHADER_USER_DATA_ES_16__DATA__SHIFT 0x0
11373#define SPI_SHADER_USER_DATA_ES_16__DATA_MASK 0xFFFFFFFFL
11374//SPI_SHADER_USER_DATA_ES_17
11375#define SPI_SHADER_USER_DATA_ES_17__DATA__SHIFT 0x0
11376#define SPI_SHADER_USER_DATA_ES_17__DATA_MASK 0xFFFFFFFFL
11377//SPI_SHADER_USER_DATA_ES_18
11378#define SPI_SHADER_USER_DATA_ES_18__DATA__SHIFT 0x0
11379#define SPI_SHADER_USER_DATA_ES_18__DATA_MASK 0xFFFFFFFFL
11380//SPI_SHADER_USER_DATA_ES_19
11381#define SPI_SHADER_USER_DATA_ES_19__DATA__SHIFT 0x0
11382#define SPI_SHADER_USER_DATA_ES_19__DATA_MASK 0xFFFFFFFFL
11383//SPI_SHADER_USER_DATA_ES_20
11384#define SPI_SHADER_USER_DATA_ES_20__DATA__SHIFT 0x0
11385#define SPI_SHADER_USER_DATA_ES_20__DATA_MASK 0xFFFFFFFFL
11386//SPI_SHADER_USER_DATA_ES_21
11387#define SPI_SHADER_USER_DATA_ES_21__DATA__SHIFT 0x0
11388#define SPI_SHADER_USER_DATA_ES_21__DATA_MASK 0xFFFFFFFFL
11389//SPI_SHADER_USER_DATA_ES_22
11390#define SPI_SHADER_USER_DATA_ES_22__DATA__SHIFT 0x0
11391#define SPI_SHADER_USER_DATA_ES_22__DATA_MASK 0xFFFFFFFFL
11392//SPI_SHADER_USER_DATA_ES_23
11393#define SPI_SHADER_USER_DATA_ES_23__DATA__SHIFT 0x0
11394#define SPI_SHADER_USER_DATA_ES_23__DATA_MASK 0xFFFFFFFFL
11395//SPI_SHADER_USER_DATA_ES_24
11396#define SPI_SHADER_USER_DATA_ES_24__DATA__SHIFT 0x0
11397#define SPI_SHADER_USER_DATA_ES_24__DATA_MASK 0xFFFFFFFFL
11398//SPI_SHADER_USER_DATA_ES_25
11399#define SPI_SHADER_USER_DATA_ES_25__DATA__SHIFT 0x0
11400#define SPI_SHADER_USER_DATA_ES_25__DATA_MASK 0xFFFFFFFFL
11401//SPI_SHADER_USER_DATA_ES_26
11402#define SPI_SHADER_USER_DATA_ES_26__DATA__SHIFT 0x0
11403#define SPI_SHADER_USER_DATA_ES_26__DATA_MASK 0xFFFFFFFFL
11404//SPI_SHADER_USER_DATA_ES_27
11405#define SPI_SHADER_USER_DATA_ES_27__DATA__SHIFT 0x0
11406#define SPI_SHADER_USER_DATA_ES_27__DATA_MASK 0xFFFFFFFFL
11407//SPI_SHADER_USER_DATA_ES_28
11408#define SPI_SHADER_USER_DATA_ES_28__DATA__SHIFT 0x0
11409#define SPI_SHADER_USER_DATA_ES_28__DATA_MASK 0xFFFFFFFFL
11410//SPI_SHADER_USER_DATA_ES_29
11411#define SPI_SHADER_USER_DATA_ES_29__DATA__SHIFT 0x0
11412#define SPI_SHADER_USER_DATA_ES_29__DATA_MASK 0xFFFFFFFFL
11413//SPI_SHADER_USER_DATA_ES_30
11414#define SPI_SHADER_USER_DATA_ES_30__DATA__SHIFT 0x0
11415#define SPI_SHADER_USER_DATA_ES_30__DATA_MASK 0xFFFFFFFFL
11416//SPI_SHADER_USER_DATA_ES_31
11417#define SPI_SHADER_USER_DATA_ES_31__DATA__SHIFT 0x0
11418#define SPI_SHADER_USER_DATA_ES_31__DATA_MASK 0xFFFFFFFFL
11419//SPI_SHADER_PGM_RSRC4_HS
11420#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH__SHIFT 0x0
11421#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
11422//SPI_SHADER_USER_DATA_ADDR_LO_HS
11423#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE__SHIFT 0x0
11424#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
11425//SPI_SHADER_USER_DATA_ADDR_HI_HS
11426#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE__SHIFT 0x0
11427#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
11428//SPI_SHADER_PGM_LO_LS
11429#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
11430#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xFFFFFFFFL
11431//SPI_SHADER_PGM_HI_LS
11432#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
11433#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xFFL
11434//SPI_SHADER_PGM_RSRC3_HS
11435#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
11436#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
11437#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE__SHIFT 0xa
11438#define SPI_SHADER_PGM_RSRC3_HS__CU_EN__SHIFT 0x10
11439#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x0000003FL
11440#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x000003C0L
11441#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE_MASK 0x00003C00L
11442#define SPI_SHADER_PGM_RSRC3_HS__CU_EN_MASK 0xFFFF0000L
11443//SPI_SHADER_PGM_LO_HS
11444#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
11445#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
11446//SPI_SHADER_PGM_HI_HS
11447#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
11448#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xFFL
11449//SPI_SHADER_PGM_RSRC1_HS
11450#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
11451#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
11452#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
11453#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
11454#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
11455#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
11456#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
11457#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT__SHIFT 0x1c
11458#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL__SHIFT 0x1e
11459#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003FL
11460#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003C0L
11461#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000C00L
11462#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000FF000L
11463#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
11464#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
11465#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
11466#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT_MASK 0x30000000L
11467#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL_MASK 0x40000000L
11468//SPI_SHADER_PGM_RSRC2_HS
11469#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
11470#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
11471#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
11472#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x7
11473#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE__SHIFT 0x10
11474#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0__SHIFT 0x1b
11475#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB__SHIFT 0x1c
11476#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
11477#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003EL
11478#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
11479#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0000FF80L
11480#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE_MASK 0x01FF0000L
11481#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0_MASK 0x08000000L
11482#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB_MASK 0x10000000L
11483//SPI_SHADER_USER_DATA_LS_0
11484#define SPI_SHADER_USER_DATA_LS_0__DATA__SHIFT 0x0
11485#define SPI_SHADER_USER_DATA_LS_0__DATA_MASK 0xFFFFFFFFL
11486//SPI_SHADER_USER_DATA_LS_1
11487#define SPI_SHADER_USER_DATA_LS_1__DATA__SHIFT 0x0
11488#define SPI_SHADER_USER_DATA_LS_1__DATA_MASK 0xFFFFFFFFL
11489//SPI_SHADER_USER_DATA_LS_2
11490#define SPI_SHADER_USER_DATA_LS_2__DATA__SHIFT 0x0
11491#define SPI_SHADER_USER_DATA_LS_2__DATA_MASK 0xFFFFFFFFL
11492//SPI_SHADER_USER_DATA_LS_3
11493#define SPI_SHADER_USER_DATA_LS_3__DATA__SHIFT 0x0
11494#define SPI_SHADER_USER_DATA_LS_3__DATA_MASK 0xFFFFFFFFL
11495//SPI_SHADER_USER_DATA_LS_4
11496#define SPI_SHADER_USER_DATA_LS_4__DATA__SHIFT 0x0
11497#define SPI_SHADER_USER_DATA_LS_4__DATA_MASK 0xFFFFFFFFL
11498//SPI_SHADER_USER_DATA_LS_5
11499#define SPI_SHADER_USER_DATA_LS_5__DATA__SHIFT 0x0
11500#define SPI_SHADER_USER_DATA_LS_5__DATA_MASK 0xFFFFFFFFL
11501//SPI_SHADER_USER_DATA_LS_6
11502#define SPI_SHADER_USER_DATA_LS_6__DATA__SHIFT 0x0
11503#define SPI_SHADER_USER_DATA_LS_6__DATA_MASK 0xFFFFFFFFL
11504//SPI_SHADER_USER_DATA_LS_7
11505#define SPI_SHADER_USER_DATA_LS_7__DATA__SHIFT 0x0
11506#define SPI_SHADER_USER_DATA_LS_7__DATA_MASK 0xFFFFFFFFL
11507//SPI_SHADER_USER_DATA_LS_8
11508#define SPI_SHADER_USER_DATA_LS_8__DATA__SHIFT 0x0
11509#define SPI_SHADER_USER_DATA_LS_8__DATA_MASK 0xFFFFFFFFL
11510//SPI_SHADER_USER_DATA_LS_9
11511#define SPI_SHADER_USER_DATA_LS_9__DATA__SHIFT 0x0
11512#define SPI_SHADER_USER_DATA_LS_9__DATA_MASK 0xFFFFFFFFL
11513//SPI_SHADER_USER_DATA_LS_10
11514#define SPI_SHADER_USER_DATA_LS_10__DATA__SHIFT 0x0
11515#define SPI_SHADER_USER_DATA_LS_10__DATA_MASK 0xFFFFFFFFL
11516//SPI_SHADER_USER_DATA_LS_11
11517#define SPI_SHADER_USER_DATA_LS_11__DATA__SHIFT 0x0
11518#define SPI_SHADER_USER_DATA_LS_11__DATA_MASK 0xFFFFFFFFL
11519//SPI_SHADER_USER_DATA_LS_12
11520#define SPI_SHADER_USER_DATA_LS_12__DATA__SHIFT 0x0
11521#define SPI_SHADER_USER_DATA_LS_12__DATA_MASK 0xFFFFFFFFL
11522//SPI_SHADER_USER_DATA_LS_13
11523#define SPI_SHADER_USER_DATA_LS_13__DATA__SHIFT 0x0
11524#define SPI_SHADER_USER_DATA_LS_13__DATA_MASK 0xFFFFFFFFL
11525//SPI_SHADER_USER_DATA_LS_14
11526#define SPI_SHADER_USER_DATA_LS_14__DATA__SHIFT 0x0
11527#define SPI_SHADER_USER_DATA_LS_14__DATA_MASK 0xFFFFFFFFL
11528//SPI_SHADER_USER_DATA_LS_15
11529#define SPI_SHADER_USER_DATA_LS_15__DATA__SHIFT 0x0
11530#define SPI_SHADER_USER_DATA_LS_15__DATA_MASK 0xFFFFFFFFL
11531//SPI_SHADER_USER_DATA_LS_16
11532#define SPI_SHADER_USER_DATA_LS_16__DATA__SHIFT 0x0
11533#define SPI_SHADER_USER_DATA_LS_16__DATA_MASK 0xFFFFFFFFL
11534//SPI_SHADER_USER_DATA_LS_17
11535#define SPI_SHADER_USER_DATA_LS_17__DATA__SHIFT 0x0
11536#define SPI_SHADER_USER_DATA_LS_17__DATA_MASK 0xFFFFFFFFL
11537//SPI_SHADER_USER_DATA_LS_18
11538#define SPI_SHADER_USER_DATA_LS_18__DATA__SHIFT 0x0
11539#define SPI_SHADER_USER_DATA_LS_18__DATA_MASK 0xFFFFFFFFL
11540//SPI_SHADER_USER_DATA_LS_19
11541#define SPI_SHADER_USER_DATA_LS_19__DATA__SHIFT 0x0
11542#define SPI_SHADER_USER_DATA_LS_19__DATA_MASK 0xFFFFFFFFL
11543//SPI_SHADER_USER_DATA_LS_20
11544#define SPI_SHADER_USER_DATA_LS_20__DATA__SHIFT 0x0
11545#define SPI_SHADER_USER_DATA_LS_20__DATA_MASK 0xFFFFFFFFL
11546//SPI_SHADER_USER_DATA_LS_21
11547#define SPI_SHADER_USER_DATA_LS_21__DATA__SHIFT 0x0
11548#define SPI_SHADER_USER_DATA_LS_21__DATA_MASK 0xFFFFFFFFL
11549//SPI_SHADER_USER_DATA_LS_22
11550#define SPI_SHADER_USER_DATA_LS_22__DATA__SHIFT 0x0
11551#define SPI_SHADER_USER_DATA_LS_22__DATA_MASK 0xFFFFFFFFL
11552//SPI_SHADER_USER_DATA_LS_23
11553#define SPI_SHADER_USER_DATA_LS_23__DATA__SHIFT 0x0
11554#define SPI_SHADER_USER_DATA_LS_23__DATA_MASK 0xFFFFFFFFL
11555//SPI_SHADER_USER_DATA_LS_24
11556#define SPI_SHADER_USER_DATA_LS_24__DATA__SHIFT 0x0
11557#define SPI_SHADER_USER_DATA_LS_24__DATA_MASK 0xFFFFFFFFL
11558//SPI_SHADER_USER_DATA_LS_25
11559#define SPI_SHADER_USER_DATA_LS_25__DATA__SHIFT 0x0
11560#define SPI_SHADER_USER_DATA_LS_25__DATA_MASK 0xFFFFFFFFL
11561//SPI_SHADER_USER_DATA_LS_26
11562#define SPI_SHADER_USER_DATA_LS_26__DATA__SHIFT 0x0
11563#define SPI_SHADER_USER_DATA_LS_26__DATA_MASK 0xFFFFFFFFL
11564//SPI_SHADER_USER_DATA_LS_27
11565#define SPI_SHADER_USER_DATA_LS_27__DATA__SHIFT 0x0
11566#define SPI_SHADER_USER_DATA_LS_27__DATA_MASK 0xFFFFFFFFL
11567//SPI_SHADER_USER_DATA_LS_28
11568#define SPI_SHADER_USER_DATA_LS_28__DATA__SHIFT 0x0
11569#define SPI_SHADER_USER_DATA_LS_28__DATA_MASK 0xFFFFFFFFL
11570//SPI_SHADER_USER_DATA_LS_29
11571#define SPI_SHADER_USER_DATA_LS_29__DATA__SHIFT 0x0
11572#define SPI_SHADER_USER_DATA_LS_29__DATA_MASK 0xFFFFFFFFL
11573//SPI_SHADER_USER_DATA_LS_30
11574#define SPI_SHADER_USER_DATA_LS_30__DATA__SHIFT 0x0
11575#define SPI_SHADER_USER_DATA_LS_30__DATA_MASK 0xFFFFFFFFL
11576//SPI_SHADER_USER_DATA_LS_31
11577#define SPI_SHADER_USER_DATA_LS_31__DATA__SHIFT 0x0
11578#define SPI_SHADER_USER_DATA_LS_31__DATA_MASK 0xFFFFFFFFL
11579//SPI_SHADER_USER_DATA_COMMON_0
11580#define SPI_SHADER_USER_DATA_COMMON_0__DATA__SHIFT 0x0
11581#define SPI_SHADER_USER_DATA_COMMON_0__DATA_MASK 0xFFFFFFFFL
11582//SPI_SHADER_USER_DATA_COMMON_1
11583#define SPI_SHADER_USER_DATA_COMMON_1__DATA__SHIFT 0x0
11584#define SPI_SHADER_USER_DATA_COMMON_1__DATA_MASK 0xFFFFFFFFL
11585//SPI_SHADER_USER_DATA_COMMON_2
11586#define SPI_SHADER_USER_DATA_COMMON_2__DATA__SHIFT 0x0
11587#define SPI_SHADER_USER_DATA_COMMON_2__DATA_MASK 0xFFFFFFFFL
11588//SPI_SHADER_USER_DATA_COMMON_3
11589#define SPI_SHADER_USER_DATA_COMMON_3__DATA__SHIFT 0x0
11590#define SPI_SHADER_USER_DATA_COMMON_3__DATA_MASK 0xFFFFFFFFL
11591//SPI_SHADER_USER_DATA_COMMON_4
11592#define SPI_SHADER_USER_DATA_COMMON_4__DATA__SHIFT 0x0
11593#define SPI_SHADER_USER_DATA_COMMON_4__DATA_MASK 0xFFFFFFFFL
11594//SPI_SHADER_USER_DATA_COMMON_5
11595#define SPI_SHADER_USER_DATA_COMMON_5__DATA__SHIFT 0x0
11596#define SPI_SHADER_USER_DATA_COMMON_5__DATA_MASK 0xFFFFFFFFL
11597//SPI_SHADER_USER_DATA_COMMON_6
11598#define SPI_SHADER_USER_DATA_COMMON_6__DATA__SHIFT 0x0
11599#define SPI_SHADER_USER_DATA_COMMON_6__DATA_MASK 0xFFFFFFFFL
11600//SPI_SHADER_USER_DATA_COMMON_7
11601#define SPI_SHADER_USER_DATA_COMMON_7__DATA__SHIFT 0x0
11602#define SPI_SHADER_USER_DATA_COMMON_7__DATA_MASK 0xFFFFFFFFL
11603//SPI_SHADER_USER_DATA_COMMON_8
11604#define SPI_SHADER_USER_DATA_COMMON_8__DATA__SHIFT 0x0
11605#define SPI_SHADER_USER_DATA_COMMON_8__DATA_MASK 0xFFFFFFFFL
11606//SPI_SHADER_USER_DATA_COMMON_9
11607#define SPI_SHADER_USER_DATA_COMMON_9__DATA__SHIFT 0x0
11608#define SPI_SHADER_USER_DATA_COMMON_9__DATA_MASK 0xFFFFFFFFL
11609//SPI_SHADER_USER_DATA_COMMON_10
11610#define SPI_SHADER_USER_DATA_COMMON_10__DATA__SHIFT 0x0
11611#define SPI_SHADER_USER_DATA_COMMON_10__DATA_MASK 0xFFFFFFFFL
11612//SPI_SHADER_USER_DATA_COMMON_11
11613#define SPI_SHADER_USER_DATA_COMMON_11__DATA__SHIFT 0x0
11614#define SPI_SHADER_USER_DATA_COMMON_11__DATA_MASK 0xFFFFFFFFL
11615//SPI_SHADER_USER_DATA_COMMON_12
11616#define SPI_SHADER_USER_DATA_COMMON_12__DATA__SHIFT 0x0
11617#define SPI_SHADER_USER_DATA_COMMON_12__DATA_MASK 0xFFFFFFFFL
11618//SPI_SHADER_USER_DATA_COMMON_13
11619#define SPI_SHADER_USER_DATA_COMMON_13__DATA__SHIFT 0x0
11620#define SPI_SHADER_USER_DATA_COMMON_13__DATA_MASK 0xFFFFFFFFL
11621//SPI_SHADER_USER_DATA_COMMON_14
11622#define SPI_SHADER_USER_DATA_COMMON_14__DATA__SHIFT 0x0
11623#define SPI_SHADER_USER_DATA_COMMON_14__DATA_MASK 0xFFFFFFFFL
11624//SPI_SHADER_USER_DATA_COMMON_15
11625#define SPI_SHADER_USER_DATA_COMMON_15__DATA__SHIFT 0x0
11626#define SPI_SHADER_USER_DATA_COMMON_15__DATA_MASK 0xFFFFFFFFL
11627//SPI_SHADER_USER_DATA_COMMON_16
11628#define SPI_SHADER_USER_DATA_COMMON_16__DATA__SHIFT 0x0
11629#define SPI_SHADER_USER_DATA_COMMON_16__DATA_MASK 0xFFFFFFFFL
11630//SPI_SHADER_USER_DATA_COMMON_17
11631#define SPI_SHADER_USER_DATA_COMMON_17__DATA__SHIFT 0x0
11632#define SPI_SHADER_USER_DATA_COMMON_17__DATA_MASK 0xFFFFFFFFL
11633//SPI_SHADER_USER_DATA_COMMON_18
11634#define SPI_SHADER_USER_DATA_COMMON_18__DATA__SHIFT 0x0
11635#define SPI_SHADER_USER_DATA_COMMON_18__DATA_MASK 0xFFFFFFFFL
11636//SPI_SHADER_USER_DATA_COMMON_19
11637#define SPI_SHADER_USER_DATA_COMMON_19__DATA__SHIFT 0x0
11638#define SPI_SHADER_USER_DATA_COMMON_19__DATA_MASK 0xFFFFFFFFL
11639//SPI_SHADER_USER_DATA_COMMON_20
11640#define SPI_SHADER_USER_DATA_COMMON_20__DATA__SHIFT 0x0
11641#define SPI_SHADER_USER_DATA_COMMON_20__DATA_MASK 0xFFFFFFFFL
11642//SPI_SHADER_USER_DATA_COMMON_21
11643#define SPI_SHADER_USER_DATA_COMMON_21__DATA__SHIFT 0x0
11644#define SPI_SHADER_USER_DATA_COMMON_21__DATA_MASK 0xFFFFFFFFL
11645//SPI_SHADER_USER_DATA_COMMON_22
11646#define SPI_SHADER_USER_DATA_COMMON_22__DATA__SHIFT 0x0
11647#define SPI_SHADER_USER_DATA_COMMON_22__DATA_MASK 0xFFFFFFFFL
11648//SPI_SHADER_USER_DATA_COMMON_23
11649#define SPI_SHADER_USER_DATA_COMMON_23__DATA__SHIFT 0x0
11650#define SPI_SHADER_USER_DATA_COMMON_23__DATA_MASK 0xFFFFFFFFL
11651//SPI_SHADER_USER_DATA_COMMON_24
11652#define SPI_SHADER_USER_DATA_COMMON_24__DATA__SHIFT 0x0
11653#define SPI_SHADER_USER_DATA_COMMON_24__DATA_MASK 0xFFFFFFFFL
11654//SPI_SHADER_USER_DATA_COMMON_25
11655#define SPI_SHADER_USER_DATA_COMMON_25__DATA__SHIFT 0x0
11656#define SPI_SHADER_USER_DATA_COMMON_25__DATA_MASK 0xFFFFFFFFL
11657//SPI_SHADER_USER_DATA_COMMON_26
11658#define SPI_SHADER_USER_DATA_COMMON_26__DATA__SHIFT 0x0
11659#define SPI_SHADER_USER_DATA_COMMON_26__DATA_MASK 0xFFFFFFFFL
11660//SPI_SHADER_USER_DATA_COMMON_27
11661#define SPI_SHADER_USER_DATA_COMMON_27__DATA__SHIFT 0x0
11662#define SPI_SHADER_USER_DATA_COMMON_27__DATA_MASK 0xFFFFFFFFL
11663//SPI_SHADER_USER_DATA_COMMON_28
11664#define SPI_SHADER_USER_DATA_COMMON_28__DATA__SHIFT 0x0
11665#define SPI_SHADER_USER_DATA_COMMON_28__DATA_MASK 0xFFFFFFFFL
11666//SPI_SHADER_USER_DATA_COMMON_29
11667#define SPI_SHADER_USER_DATA_COMMON_29__DATA__SHIFT 0x0
11668#define SPI_SHADER_USER_DATA_COMMON_29__DATA_MASK 0xFFFFFFFFL
11669//SPI_SHADER_USER_DATA_COMMON_30
11670#define SPI_SHADER_USER_DATA_COMMON_30__DATA__SHIFT 0x0
11671#define SPI_SHADER_USER_DATA_COMMON_30__DATA_MASK 0xFFFFFFFFL
11672//SPI_SHADER_USER_DATA_COMMON_31
11673#define SPI_SHADER_USER_DATA_COMMON_31__DATA__SHIFT 0x0
11674#define SPI_SHADER_USER_DATA_COMMON_31__DATA_MASK 0xFFFFFFFFL
11675//COMPUTE_DISPATCH_INITIATOR
11676#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
11677#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
11678#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
11679#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
11680#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
11681#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
11682#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
11683#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
11684#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
11685#define COMPUTE_DISPATCH_INITIATOR__RESERVED__SHIFT 0xc
11686#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
11687#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
11688#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
11689#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
11690#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
11691#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
11692#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
11693#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
11694#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
11695#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
11696#define COMPUTE_DISPATCH_INITIATOR__RESERVED_MASK 0x00001000L
11697#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
11698//COMPUTE_DIM_X
11699#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
11700#define COMPUTE_DIM_X__SIZE_MASK 0xFFFFFFFFL
11701//COMPUTE_DIM_Y
11702#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
11703#define COMPUTE_DIM_Y__SIZE_MASK 0xFFFFFFFFL
11704//COMPUTE_DIM_Z
11705#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
11706#define COMPUTE_DIM_Z__SIZE_MASK 0xFFFFFFFFL
11707//COMPUTE_START_X
11708#define COMPUTE_START_X__START__SHIFT 0x0
11709#define COMPUTE_START_X__START_MASK 0xFFFFFFFFL
11710//COMPUTE_START_Y
11711#define COMPUTE_START_Y__START__SHIFT 0x0
11712#define COMPUTE_START_Y__START_MASK 0xFFFFFFFFL
11713//COMPUTE_START_Z
11714#define COMPUTE_START_Z__START__SHIFT 0x0
11715#define COMPUTE_START_Z__START_MASK 0xFFFFFFFFL
11716//COMPUTE_NUM_THREAD_X
11717#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
11718#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
11719#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000FFFFL
11720#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
11721//COMPUTE_NUM_THREAD_Y
11722#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
11723#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
11724#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000FFFFL
11725#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
11726//COMPUTE_NUM_THREAD_Z
11727#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
11728#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
11729#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000FFFFL
11730#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
11731//COMPUTE_PIPELINESTAT_ENABLE
11732#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
11733#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x00000001L
11734//COMPUTE_PERFCOUNT_ENABLE
11735#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
11736#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x00000001L
11737//COMPUTE_PGM_LO
11738#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
11739#define COMPUTE_PGM_LO__DATA_MASK 0xFFFFFFFFL
11740//COMPUTE_PGM_HI
11741#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
11742#define COMPUTE_PGM_HI__DATA_MASK 0x000000FFL
11743//COMPUTE_DISPATCH_PKT_ADDR_LO
11744#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA__SHIFT 0x0
11745#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA_MASK 0xFFFFFFFFL
11746//COMPUTE_DISPATCH_PKT_ADDR_HI
11747#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA__SHIFT 0x0
11748#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA_MASK 0x000000FFL
11749//COMPUTE_DISPATCH_SCRATCH_BASE_LO
11750#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA__SHIFT 0x0
11751#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
11752//COMPUTE_DISPATCH_SCRATCH_BASE_HI
11753#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA__SHIFT 0x0
11754#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
11755//COMPUTE_PGM_RSRC1
11756#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
11757#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
11758#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
11759#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
11760#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
11761#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
11762#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
11763#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
11764#define COMPUTE_PGM_RSRC1__FP16_OVFL__SHIFT 0x1a
11765#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003FL
11766#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003C0L
11767#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000C00L
11768#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000FF000L
11769#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
11770#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
11771#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
11772#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
11773#define COMPUTE_PGM_RSRC1__FP16_OVFL_MASK 0x04000000L
11774//COMPUTE_PGM_RSRC2
11775#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
11776#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
11777#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
11778#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
11779#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
11780#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
11781#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
11782#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
11783#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
11784#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
11785#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
11786#define COMPUTE_PGM_RSRC2__SKIP_USGPR0__SHIFT 0x1f
11787#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
11788#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003EL
11789#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
11790#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
11791#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
11792#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
11793#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
11794#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
11795#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
11796#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00FF8000L
11797#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7F000000L
11798#define COMPUTE_PGM_RSRC2__SKIP_USGPR0_MASK 0x80000000L
11799//COMPUTE_VMID
11800#define COMPUTE_VMID__DATA__SHIFT 0x0
11801#define COMPUTE_VMID__DATA_MASK 0x0000000FL
11802//COMPUTE_RESOURCE_LIMITS
11803#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
11804#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
11805#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
11806#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
11807#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
11808#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
11809#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE__SHIFT 0x1b
11810#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x000003FFL
11811#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000F000L
11812#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003F0000L
11813#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
11814#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
11815#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
11816#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE_MASK 0x78000000L
11817//COMPUTE_STATIC_THREAD_MGMT_SE0
11818#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN__SHIFT 0x0
11819#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN__SHIFT 0x10
11820#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN_MASK 0x0000FFFFL
11821#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN_MASK 0xFFFF0000L
11822//COMPUTE_STATIC_THREAD_MGMT_SE1
11823#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN__SHIFT 0x0
11824#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN__SHIFT 0x10
11825#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN_MASK 0x0000FFFFL
11826#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN_MASK 0xFFFF0000L
11827//COMPUTE_TMPRING_SIZE
11828#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
11829#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
11830#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
11831#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
11832//COMPUTE_STATIC_THREAD_MGMT_SE2
11833#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN__SHIFT 0x0
11834#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN__SHIFT 0x10
11835#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN_MASK 0x0000FFFFL
11836#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN_MASK 0xFFFF0000L
11837//COMPUTE_STATIC_THREAD_MGMT_SE3
11838#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN__SHIFT 0x0
11839#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN__SHIFT 0x10
11840#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN_MASK 0x0000FFFFL
11841#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN_MASK 0xFFFF0000L
11842//COMPUTE_RESTART_X
11843#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
11844#define COMPUTE_RESTART_X__RESTART_MASK 0xFFFFFFFFL
11845//COMPUTE_RESTART_Y
11846#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
11847#define COMPUTE_RESTART_Y__RESTART_MASK 0xFFFFFFFFL
11848//COMPUTE_RESTART_Z
11849#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
11850#define COMPUTE_RESTART_Z__RESTART_MASK 0xFFFFFFFFL
11851//COMPUTE_THREAD_TRACE_ENABLE
11852#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
11853#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x00000001L
11854//COMPUTE_MISC_RESERVED
11855#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
11856#define COMPUTE_MISC_RESERVED__RESERVED2__SHIFT 0x2
11857#define COMPUTE_MISC_RESERVED__RESERVED3__SHIFT 0x3
11858#define COMPUTE_MISC_RESERVED__RESERVED4__SHIFT 0x4
11859#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
11860#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x00000003L
11861#define COMPUTE_MISC_RESERVED__RESERVED2_MASK 0x00000004L
11862#define COMPUTE_MISC_RESERVED__RESERVED3_MASK 0x00000008L
11863#define COMPUTE_MISC_RESERVED__RESERVED4_MASK 0x00000010L
11864#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x0001FFE0L
11865//COMPUTE_DISPATCH_ID
11866#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
11867#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xFFFFFFFFL
11868//COMPUTE_THREADGROUP_ID
11869#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
11870#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xFFFFFFFFL
11871//COMPUTE_RELAUNCH
11872#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
11873#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
11874#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
11875#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3FFFFFFFL
11876#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000L
11877#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000L
11878//COMPUTE_WAVE_RESTORE_ADDR_LO
11879#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
11880#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xFFFFFFFFL
11881//COMPUTE_WAVE_RESTORE_ADDR_HI
11882#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
11883#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xFFFFL
11884//COMPUTE_USER_DATA_0
11885#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
11886#define COMPUTE_USER_DATA_0__DATA_MASK 0xFFFFFFFFL
11887//COMPUTE_USER_DATA_1
11888#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
11889#define COMPUTE_USER_DATA_1__DATA_MASK 0xFFFFFFFFL
11890//COMPUTE_USER_DATA_2
11891#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
11892#define COMPUTE_USER_DATA_2__DATA_MASK 0xFFFFFFFFL
11893//COMPUTE_USER_DATA_3
11894#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
11895#define COMPUTE_USER_DATA_3__DATA_MASK 0xFFFFFFFFL
11896//COMPUTE_USER_DATA_4
11897#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
11898#define COMPUTE_USER_DATA_4__DATA_MASK 0xFFFFFFFFL
11899//COMPUTE_USER_DATA_5
11900#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
11901#define COMPUTE_USER_DATA_5__DATA_MASK 0xFFFFFFFFL
11902//COMPUTE_USER_DATA_6
11903#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
11904#define COMPUTE_USER_DATA_6__DATA_MASK 0xFFFFFFFFL
11905//COMPUTE_USER_DATA_7
11906#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
11907#define COMPUTE_USER_DATA_7__DATA_MASK 0xFFFFFFFFL
11908//COMPUTE_USER_DATA_8
11909#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
11910#define COMPUTE_USER_DATA_8__DATA_MASK 0xFFFFFFFFL
11911//COMPUTE_USER_DATA_9
11912#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
11913#define COMPUTE_USER_DATA_9__DATA_MASK 0xFFFFFFFFL
11914//COMPUTE_USER_DATA_10
11915#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
11916#define COMPUTE_USER_DATA_10__DATA_MASK 0xFFFFFFFFL
11917//COMPUTE_USER_DATA_11
11918#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
11919#define COMPUTE_USER_DATA_11__DATA_MASK 0xFFFFFFFFL
11920//COMPUTE_USER_DATA_12
11921#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
11922#define COMPUTE_USER_DATA_12__DATA_MASK 0xFFFFFFFFL
11923//COMPUTE_USER_DATA_13
11924#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
11925#define COMPUTE_USER_DATA_13__DATA_MASK 0xFFFFFFFFL
11926//COMPUTE_USER_DATA_14
11927#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
11928#define COMPUTE_USER_DATA_14__DATA_MASK 0xFFFFFFFFL
11929//COMPUTE_USER_DATA_15
11930#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
11931#define COMPUTE_USER_DATA_15__DATA_MASK 0xFFFFFFFFL
11932//COMPUTE_NOWHERE
11933#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
11934#define COMPUTE_NOWHERE__DATA_MASK 0xFFFFFFFFL
11935
11936
11937// addressBlock: gc_cppdec
11938//CP_DFY_CNTL
11939#define CP_DFY_CNTL__POLICY__SHIFT 0x0
11940#define CP_DFY_CNTL__MTYPE__SHIFT 0x2
11941#define CP_DFY_CNTL__TPI_SDP_SEL__SHIFT 0x1a
11942#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
11943#define CP_DFY_CNTL__MODE__SHIFT 0x1d
11944#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
11945#define CP_DFY_CNTL__POLICY_MASK 0x00000001L
11946#define CP_DFY_CNTL__MTYPE_MASK 0x0000000CL
11947#define CP_DFY_CNTL__TPI_SDP_SEL_MASK 0x04000000L
11948#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000L
11949#define CP_DFY_CNTL__MODE_MASK 0x60000000L
11950#define CP_DFY_CNTL__ENABLE_MASK 0x80000000L
11951//CP_DFY_STAT
11952#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
11953#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
11954#define CP_DFY_STAT__BUSY__SHIFT 0x1f
11955#define CP_DFY_STAT__BURST_COUNT_MASK 0x0000FFFFL
11956#define CP_DFY_STAT__TAGS_PENDING_MASK 0x07FF0000L
11957#define CP_DFY_STAT__BUSY_MASK 0x80000000L
11958//CP_DFY_ADDR_HI
11959#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
11960#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
11961//CP_DFY_ADDR_LO
11962#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
11963#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xFFFFFFE0L
11964//CP_DFY_DATA_0
11965#define CP_DFY_DATA_0__DATA__SHIFT 0x0
11966#define CP_DFY_DATA_0__DATA_MASK 0xFFFFFFFFL
11967//CP_DFY_DATA_1
11968#define CP_DFY_DATA_1__DATA__SHIFT 0x0
11969#define CP_DFY_DATA_1__DATA_MASK 0xFFFFFFFFL
11970//CP_DFY_DATA_2
11971#define CP_DFY_DATA_2__DATA__SHIFT 0x0
11972#define CP_DFY_DATA_2__DATA_MASK 0xFFFFFFFFL
11973//CP_DFY_DATA_3
11974#define CP_DFY_DATA_3__DATA__SHIFT 0x0
11975#define CP_DFY_DATA_3__DATA_MASK 0xFFFFFFFFL
11976//CP_DFY_DATA_4
11977#define CP_DFY_DATA_4__DATA__SHIFT 0x0
11978#define CP_DFY_DATA_4__DATA_MASK 0xFFFFFFFFL
11979//CP_DFY_DATA_5
11980#define CP_DFY_DATA_5__DATA__SHIFT 0x0
11981#define CP_DFY_DATA_5__DATA_MASK 0xFFFFFFFFL
11982//CP_DFY_DATA_6
11983#define CP_DFY_DATA_6__DATA__SHIFT 0x0
11984#define CP_DFY_DATA_6__DATA_MASK 0xFFFFFFFFL
11985//CP_DFY_DATA_7
11986#define CP_DFY_DATA_7__DATA__SHIFT 0x0
11987#define CP_DFY_DATA_7__DATA_MASK 0xFFFFFFFFL
11988//CP_DFY_DATA_8
11989#define CP_DFY_DATA_8__DATA__SHIFT 0x0
11990#define CP_DFY_DATA_8__DATA_MASK 0xFFFFFFFFL
11991//CP_DFY_DATA_9
11992#define CP_DFY_DATA_9__DATA__SHIFT 0x0
11993#define CP_DFY_DATA_9__DATA_MASK 0xFFFFFFFFL
11994//CP_DFY_DATA_10
11995#define CP_DFY_DATA_10__DATA__SHIFT 0x0
11996#define CP_DFY_DATA_10__DATA_MASK 0xFFFFFFFFL
11997//CP_DFY_DATA_11
11998#define CP_DFY_DATA_11__DATA__SHIFT 0x0
11999#define CP_DFY_DATA_11__DATA_MASK 0xFFFFFFFFL
12000//CP_DFY_DATA_12
12001#define CP_DFY_DATA_12__DATA__SHIFT 0x0
12002#define CP_DFY_DATA_12__DATA_MASK 0xFFFFFFFFL
12003//CP_DFY_DATA_13
12004#define CP_DFY_DATA_13__DATA__SHIFT 0x0
12005#define CP_DFY_DATA_13__DATA_MASK 0xFFFFFFFFL
12006//CP_DFY_DATA_14
12007#define CP_DFY_DATA_14__DATA__SHIFT 0x0
12008#define CP_DFY_DATA_14__DATA_MASK 0xFFFFFFFFL
12009//CP_DFY_DATA_15
12010#define CP_DFY_DATA_15__DATA__SHIFT 0x0
12011#define CP_DFY_DATA_15__DATA_MASK 0xFFFFFFFFL
12012//CP_DFY_CMD
12013#define CP_DFY_CMD__OFFSET__SHIFT 0x0
12014#define CP_DFY_CMD__SIZE__SHIFT 0x10
12015#define CP_DFY_CMD__OFFSET_MASK 0x000001FFL
12016#define CP_DFY_CMD__SIZE_MASK 0xFFFF0000L
12017//CP_EOPQ_WAIT_TIME
12018#define CP_EOPQ_WAIT_TIME__WAIT_TIME__SHIFT 0x0
12019#define CP_EOPQ_WAIT_TIME__SCALE_COUNT__SHIFT 0xa
12020#define CP_EOPQ_WAIT_TIME__WAIT_TIME_MASK 0x000003FFL
12021#define CP_EOPQ_WAIT_TIME__SCALE_COUNT_MASK 0x0003FC00L
12022//CP_CPC_MGCG_SYNC_CNTL
12023#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
12024#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
12025#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0x000000FFL
12026#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0x0000FF00L
12027//CPC_INT_INFO
12028#define CPC_INT_INFO__ADDR_HI__SHIFT 0x0
12029#define CPC_INT_INFO__TYPE__SHIFT 0x10
12030#define CPC_INT_INFO__VMID__SHIFT 0x14
12031#define CPC_INT_INFO__QUEUE_ID__SHIFT 0x1c
12032#define CPC_INT_INFO__ADDR_HI_MASK 0x0000FFFFL
12033#define CPC_INT_INFO__TYPE_MASK 0x00010000L
12034#define CPC_INT_INFO__VMID_MASK 0x00F00000L
12035#define CPC_INT_INFO__QUEUE_ID_MASK 0x70000000L
12036//CP_VIRT_STATUS
12037#define CP_VIRT_STATUS__VIRT_STATUS__SHIFT 0x0
12038#define CP_VIRT_STATUS__VIRT_STATUS_MASK 0xFFFFFFFFL
12039//CPC_INT_ADDR
12040#define CPC_INT_ADDR__ADDR__SHIFT 0x0
12041#define CPC_INT_ADDR__ADDR_MASK 0xFFFFFFFFL
12042//CPC_INT_PASID
12043#define CPC_INT_PASID__PASID__SHIFT 0x0
12044#define CPC_INT_PASID__PASID_MASK 0x0000FFFFL
12045//CP_GFX_ERROR
12046#define CP_GFX_ERROR__EDC_ERROR_ID__SHIFT 0x0
12047#define CP_GFX_ERROR__SUA_ERROR__SHIFT 0x4
12048#define CP_GFX_ERROR__RSVD1_ERROR__SHIFT 0x5
12049#define CP_GFX_ERROR__RSVD2_ERROR__SHIFT 0x6
12050#define CP_GFX_ERROR__SEM_UTCL1_ERROR__SHIFT 0x7
12051#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR__SHIFT 0x8
12052#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR__SHIFT 0x9
12053#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR__SHIFT 0xa
12054#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR__SHIFT 0xb
12055#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR__SHIFT 0xc
12056#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR__SHIFT 0xd
12057#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR__SHIFT 0xe
12058#define CP_GFX_ERROR__APPEND_UTCL1_ERROR__SHIFT 0xf
12059#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR__SHIFT 0x10
12060#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR__SHIFT 0x11
12061#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0x12
12062#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x13
12063#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR__SHIFT 0x14
12064#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR__SHIFT 0x15
12065#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR__SHIFT 0x16
12066#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR__SHIFT 0x17
12067#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR__SHIFT 0x18
12068#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR__SHIFT 0x19
12069#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR__SHIFT 0x1a
12070#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR__SHIFT 0x1b
12071#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR__SHIFT 0x1c
12072#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR__SHIFT 0x1d
12073#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR__SHIFT 0x1e
12074#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR__SHIFT 0x1f
12075#define CP_GFX_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
12076#define CP_GFX_ERROR__SUA_ERROR_MASK 0x00000010L
12077#define CP_GFX_ERROR__RSVD1_ERROR_MASK 0x00000020L
12078#define CP_GFX_ERROR__RSVD2_ERROR_MASK 0x00000040L
12079#define CP_GFX_ERROR__SEM_UTCL1_ERROR_MASK 0x00000080L
12080#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR_MASK 0x00000100L
12081#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR_MASK 0x00000200L
12082#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR_MASK 0x00000400L
12083#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR_MASK 0x00000800L
12084#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR_MASK 0x00001000L
12085#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR_MASK 0x00002000L
12086#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR_MASK 0x00004000L
12087#define CP_GFX_ERROR__APPEND_UTCL1_ERROR_MASK 0x00008000L
12088#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR_MASK 0x00010000L
12089#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR_MASK 0x00020000L
12090#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00040000L
12091#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00080000L
12092#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR_MASK 0x00100000L
12093#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR_MASK 0x00200000L
12094#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR_MASK 0x00400000L
12095#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR_MASK 0x00800000L
12096#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR_MASK 0x01000000L
12097#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR_MASK 0x02000000L
12098#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR_MASK 0x04000000L
12099#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR_MASK 0x08000000L
12100#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR_MASK 0x10000000L
12101#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR_MASK 0x20000000L
12102#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR_MASK 0x40000000L
12103#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR_MASK 0x80000000L
12104//CPG_UTCL1_CNTL
12105#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
12106#define CPG_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
12107#define CPG_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
12108#define CPG_UTCL1_CNTL__BYPASS__SHIFT 0x19
12109#define CPG_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
12110#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
12111#define CPG_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
12112#define CPG_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
12113#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
12114#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
12115#define CPG_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
12116#define CPG_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
12117#define CPG_UTCL1_CNTL__BYPASS_MASK 0x02000000L
12118#define CPG_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
12119#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
12120#define CPG_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
12121#define CPG_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
12122#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
12123//CPC_UTCL1_CNTL
12124#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
12125#define CPC_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
12126#define CPC_UTCL1_CNTL__BYPASS__SHIFT 0x19
12127#define CPC_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
12128#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
12129#define CPC_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
12130#define CPC_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
12131#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
12132#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
12133#define CPC_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
12134#define CPC_UTCL1_CNTL__BYPASS_MASK 0x02000000L
12135#define CPC_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
12136#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
12137#define CPC_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
12138#define CPC_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
12139#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
12140//CPF_UTCL1_CNTL
12141#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
12142#define CPF_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
12143#define CPF_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
12144#define CPF_UTCL1_CNTL__BYPASS__SHIFT 0x19
12145#define CPF_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
12146#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
12147#define CPF_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
12148#define CPF_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
12149#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
12150#define CPF_UTCL1_CNTL__FORCE_NO_EXE__SHIFT 0x1f
12151#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
12152#define CPF_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
12153#define CPF_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
12154#define CPF_UTCL1_CNTL__BYPASS_MASK 0x02000000L
12155#define CPF_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
12156#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
12157#define CPF_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
12158#define CPF_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
12159#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
12160#define CPF_UTCL1_CNTL__FORCE_NO_EXE_MASK 0x80000000L
12161//CP_AQL_SMM_STATUS
12162#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM__SHIFT 0x0
12163#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM_MASK 0xFFFFFFFFL
12164//CP_RB0_BASE
12165#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
12166#define CP_RB0_BASE__RB_BASE_MASK 0xFFFFFFFFL
12167//CP_RB_BASE
12168#define CP_RB_BASE__RB_BASE__SHIFT 0x0
12169#define CP_RB_BASE__RB_BASE_MASK 0xFFFFFFFFL
12170//CP_RB0_CNTL
12171#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
12172#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
12173#define CP_RB0_CNTL__BUF_SWAP__SHIFT 0x11
12174#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
12175#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
12176#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
12177#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
12178#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
12179#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003FL
12180#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003F00L
12181#define CP_RB0_CNTL__BUF_SWAP_MASK 0x00060000L
12182#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
12183#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
12184#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x01000000L
12185#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
12186#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
12187//CP_RB_CNTL
12188#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
12189#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
12190#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
12191#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
12192#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
12193#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
12194#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
12195#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003FL
12196#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003F00L
12197#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
12198#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
12199#define CP_RB_CNTL__CACHE_POLICY_MASK 0x01000000L
12200#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
12201#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
12202//CP_RB_RPTR_WR
12203#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
12204#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000FFFFFL
12205//CP_RB0_RPTR_ADDR
12206#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
12207#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
12208//CP_RB_RPTR_ADDR
12209#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
12210#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
12211//CP_RB0_RPTR_ADDR_HI
12212#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
12213#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
12214//CP_RB_RPTR_ADDR_HI
12215#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
12216#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
12217//CP_RB0_BUFSZ_MASK
12218#define CP_RB0_BUFSZ_MASK__DATA__SHIFT 0x0
12219#define CP_RB0_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
12220//CP_RB_BUFSZ_MASK
12221#define CP_RB_BUFSZ_MASK__DATA__SHIFT 0x0
12222#define CP_RB_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
12223//CP_RB_WPTR_POLL_ADDR_LO
12224#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
12225#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xFFFFFFFCL
12226//CP_RB_WPTR_POLL_ADDR_HI
12227#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
12228#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x0000FFFFL
12229//GC_PRIV_MODE
12230//CP_INT_CNTL
12231#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
12232#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12233#define CP_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12234#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12235#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
12236#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
12237#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
12238#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
12239#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
12240#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12241#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12242#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12243#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12244#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12245#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12246#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12247#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
12248#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12249#define CP_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12250#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12251#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
12252#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
12253#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
12254#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
12255#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
12256#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12257#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12258#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12259#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12260#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12261#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12262#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12263//CP_INT_STATUS
12264#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
12265#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
12266#define CP_INT_STATUS__GPF_INT_STAT__SHIFT 0x10
12267#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
12268#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
12269#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
12270#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
12271#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
12272#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
12273#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
12274#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
12275#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
12276#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
12277#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
12278#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
12279#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
12280#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
12281#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
12282#define CP_INT_STATUS__GPF_INT_STAT_MASK 0x00010000L
12283#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
12284#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x00040000L
12285#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
12286#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
12287#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x00200000L
12288#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
12289#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
12290#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
12291#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
12292#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
12293#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
12294#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
12295#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
12296//CP_DEVICE_ID
12297#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
12298#define CP_DEVICE_ID__DEVICE_ID_MASK 0x000000FFL
12299//CP_ME0_PIPE_PRIORITY_CNTS
12300#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
12301#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
12302#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
12303#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
12304#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
12305#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
12306#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
12307#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
12308//CP_RING_PRIORITY_CNTS
12309#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
12310#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
12311#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
12312#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
12313#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
12314#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
12315#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
12316#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
12317//CP_ME0_PIPE0_PRIORITY
12318#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
12319#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
12320//CP_RING0_PRIORITY
12321#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
12322#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
12323//CP_ME0_PIPE1_PRIORITY
12324#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
12325#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
12326//CP_RING1_PRIORITY
12327#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
12328#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
12329//CP_ME0_PIPE2_PRIORITY
12330#define CP_ME0_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
12331#define CP_ME0_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
12332//CP_RING2_PRIORITY
12333#define CP_RING2_PRIORITY__PRIORITY__SHIFT 0x0
12334#define CP_RING2_PRIORITY__PRIORITY_MASK 0x00000003L
12335//CP_FATAL_ERROR
12336#define CP_FATAL_ERROR__CPF_FATAL_ERROR__SHIFT 0x0
12337#define CP_FATAL_ERROR__CPG_FATAL_ERROR__SHIFT 0x1
12338#define CP_FATAL_ERROR__GFX_HALT_PROC__SHIFT 0x2
12339#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR__SHIFT 0x3
12340#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN__SHIFT 0x4
12341#define CP_FATAL_ERROR__CPF_FATAL_ERROR_MASK 0x00000001L
12342#define CP_FATAL_ERROR__CPG_FATAL_ERROR_MASK 0x00000002L
12343#define CP_FATAL_ERROR__GFX_HALT_PROC_MASK 0x00000004L
12344#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR_MASK 0x00000008L
12345#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN_MASK 0x00000010L
12346//CP_RB_VMID
12347#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
12348#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
12349#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
12350#define CP_RB_VMID__RB0_VMID_MASK 0x0000000FL
12351#define CP_RB_VMID__RB1_VMID_MASK 0x00000F00L
12352#define CP_RB_VMID__RB2_VMID_MASK 0x000F0000L
12353//CP_ME0_PIPE0_VMID
12354#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
12355#define CP_ME0_PIPE0_VMID__VMID_MASK 0x0000000FL
12356//CP_ME0_PIPE1_VMID
12357#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
12358#define CP_ME0_PIPE1_VMID__VMID_MASK 0x0000000FL
12359//CP_RB0_WPTR
12360#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
12361#define CP_RB0_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
12362//CP_RB_WPTR
12363#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
12364#define CP_RB_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
12365//CP_RB0_WPTR_HI
12366#define CP_RB0_WPTR_HI__RB_WPTR__SHIFT 0x0
12367#define CP_RB0_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
12368//CP_RB_WPTR_HI
12369#define CP_RB_WPTR_HI__RB_WPTR__SHIFT 0x0
12370#define CP_RB_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
12371//CP_RB1_WPTR
12372#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
12373#define CP_RB1_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
12374//CP_RB1_WPTR_HI
12375#define CP_RB1_WPTR_HI__RB_WPTR__SHIFT 0x0
12376#define CP_RB1_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
12377//CP_RB2_WPTR
12378#define CP_RB2_WPTR__RB_WPTR__SHIFT 0x0
12379#define CP_RB2_WPTR__RB_WPTR_MASK 0x000FFFFFL
12380//CP_RB_DOORBELL_CONTROL
12381#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
12382#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
12383#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
12384#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
12385#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
12386#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
12387#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
12388#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
12389//CP_RB_DOORBELL_RANGE_LOWER
12390#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
12391#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
12392//CP_RB_DOORBELL_RANGE_UPPER
12393#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
12394#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
12395//CP_MEC_DOORBELL_RANGE_LOWER
12396#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
12397#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
12398//CP_MEC_DOORBELL_RANGE_UPPER
12399#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
12400#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
12401//CPG_UTCL1_ERROR
12402#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
12403#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
12404//CPC_UTCL1_ERROR
12405#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
12406#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
12407//CP_RB1_BASE
12408#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
12409#define CP_RB1_BASE__RB_BASE_MASK 0xFFFFFFFFL
12410//CP_RB1_CNTL
12411#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
12412#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
12413#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
12414#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
12415#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
12416#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
12417#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
12418#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003FL
12419#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003F00L
12420#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
12421#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
12422#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x01000000L
12423#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
12424#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
12425//CP_RB1_RPTR_ADDR
12426#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
12427#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
12428//CP_RB1_RPTR_ADDR_HI
12429#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
12430#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
12431//CP_RB2_BASE
12432#define CP_RB2_BASE__RB_BASE__SHIFT 0x0
12433#define CP_RB2_BASE__RB_BASE_MASK 0xFFFFFFFFL
12434//CP_RB2_CNTL
12435#define CP_RB2_CNTL__RB_BUFSZ__SHIFT 0x0
12436#define CP_RB2_CNTL__RB_BLKSZ__SHIFT 0x8
12437#define CP_RB2_CNTL__MIN_AVAILSZ__SHIFT 0x14
12438#define CP_RB2_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
12439#define CP_RB2_CNTL__CACHE_POLICY__SHIFT 0x18
12440#define CP_RB2_CNTL__RB_NO_UPDATE__SHIFT 0x1b
12441#define CP_RB2_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
12442#define CP_RB2_CNTL__RB_BUFSZ_MASK 0x0000003FL
12443#define CP_RB2_CNTL__RB_BLKSZ_MASK 0x00003F00L
12444#define CP_RB2_CNTL__MIN_AVAILSZ_MASK 0x00300000L
12445#define CP_RB2_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
12446#define CP_RB2_CNTL__CACHE_POLICY_MASK 0x01000000L
12447#define CP_RB2_CNTL__RB_NO_UPDATE_MASK 0x08000000L
12448#define CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
12449//CP_RB2_RPTR_ADDR
12450#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
12451#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
12452//CP_RB2_RPTR_ADDR_HI
12453#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
12454#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
12455//CP_RB0_ACTIVE
12456#define CP_RB0_ACTIVE__ACTIVE__SHIFT 0x0
12457#define CP_RB0_ACTIVE__ACTIVE_MASK 0x00000001L
12458//CP_RB_ACTIVE
12459#define CP_RB_ACTIVE__ACTIVE__SHIFT 0x0
12460#define CP_RB_ACTIVE__ACTIVE_MASK 0x00000001L
12461//CP_INT_CNTL_RING0
12462#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
12463#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12464#define CP_INT_CNTL_RING0__GPF_INT_ENABLE__SHIFT 0x10
12465#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12466#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
12467#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
12468#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
12469#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
12470#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
12471#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
12472#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12473#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12474#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12475#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
12476#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
12477#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
12478#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
12479#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12480#define CP_INT_CNTL_RING0__GPF_INT_ENABLE_MASK 0x00010000L
12481#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12482#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
12483#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
12484#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
12485#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
12486#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
12487#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12488#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12489#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12490#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12491#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
12492#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
12493#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
12494//CP_INT_CNTL_RING1
12495#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
12496#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12497#define CP_INT_CNTL_RING1__GPF_INT_ENABLE__SHIFT 0x10
12498#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12499#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE__SHIFT 0x12
12500#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
12501#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
12502#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE__SHIFT 0x15
12503#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
12504#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
12505#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12506#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12507#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12508#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
12509#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
12510#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
12511#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
12512#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12513#define CP_INT_CNTL_RING1__GPF_INT_ENABLE_MASK 0x00010000L
12514#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12515#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
12516#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
12517#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
12518#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
12519#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
12520#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12521#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12522#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12523#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12524#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
12525#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
12526#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
12527//CP_INT_CNTL_RING2
12528#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
12529#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12530#define CP_INT_CNTL_RING2__GPF_INT_ENABLE__SHIFT 0x10
12531#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12532#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE__SHIFT 0x12
12533#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
12534#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
12535#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE__SHIFT 0x15
12536#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
12537#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE__SHIFT 0x17
12538#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12539#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12540#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12541#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE__SHIFT 0x1d
12542#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE__SHIFT 0x1e
12543#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE__SHIFT 0x1f
12544#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
12545#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12546#define CP_INT_CNTL_RING2__GPF_INT_ENABLE_MASK 0x00010000L
12547#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12548#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
12549#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
12550#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
12551#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
12552#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
12553#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12554#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12555#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12556#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12557#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE_MASK 0x20000000L
12558#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE_MASK 0x40000000L
12559#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE_MASK 0x80000000L
12560//CP_INT_STATUS_RING0
12561#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
12562#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
12563#define CP_INT_STATUS_RING0__GPF_INT_STAT__SHIFT 0x10
12564#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
12565#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
12566#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
12567#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
12568#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
12569#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
12570#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
12571#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
12572#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
12573#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
12574#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
12575#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
12576#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
12577#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
12578#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
12579#define CP_INT_STATUS_RING0__GPF_INT_STAT_MASK 0x00010000L
12580#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
12581#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x00040000L
12582#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x00080000L
12583#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
12584#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x00200000L
12585#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
12586#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
12587#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
12588#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
12589#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
12590#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
12591#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
12592#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
12593//CP_INT_STATUS_RING1
12594#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
12595#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
12596#define CP_INT_STATUS_RING1__GPF_INT_STAT__SHIFT 0x10
12597#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
12598#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT__SHIFT 0x12
12599#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT__SHIFT 0x13
12600#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT__SHIFT 0x14
12601#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT__SHIFT 0x15
12602#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
12603#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
12604#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
12605#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
12606#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
12607#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
12608#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
12609#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
12610#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
12611#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
12612#define CP_INT_STATUS_RING1__GPF_INT_STAT_MASK 0x00010000L
12613#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
12614#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT_MASK 0x00040000L
12615#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT_MASK 0x00080000L
12616#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
12617#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT_MASK 0x00200000L
12618#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
12619#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
12620#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
12621#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
12622#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
12623#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
12624#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
12625#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
12626//CP_INT_STATUS_RING2
12627#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
12628#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
12629#define CP_INT_STATUS_RING2__GPF_INT_STAT__SHIFT 0x10
12630#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
12631#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT__SHIFT 0x12
12632#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT__SHIFT 0x13
12633#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT__SHIFT 0x14
12634#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT__SHIFT 0x15
12635#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT__SHIFT 0x16
12636#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT__SHIFT 0x17
12637#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT__SHIFT 0x18
12638#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT__SHIFT 0x1a
12639#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
12640#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT__SHIFT 0x1d
12641#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT__SHIFT 0x1e
12642#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT__SHIFT 0x1f
12643#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
12644#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
12645#define CP_INT_STATUS_RING2__GPF_INT_STAT_MASK 0x00010000L
12646#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
12647#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT_MASK 0x00040000L
12648#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT_MASK 0x00080000L
12649#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
12650#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT_MASK 0x00200000L
12651#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT_MASK 0x00400000L
12652#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT_MASK 0x00800000L
12653#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
12654#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT_MASK 0x04000000L
12655#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
12656#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT_MASK 0x20000000L
12657#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT_MASK 0x40000000L
12658#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT_MASK 0x80000000L
12659#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
12660#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
12661#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
12662#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
12663#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
12664#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
12665//CP_PWR_CNTL
12666#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
12667#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
12668#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
12669#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
12670#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
12671#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
12672#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
12673#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
12674#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
12675#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
12676#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x00000001L
12677#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x00000002L
12678#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x00000100L
12679#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x00000200L
12680#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x00000400L
12681#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x00000800L
12682#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x00010000L
12683#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x00020000L
12684#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x00040000L
12685#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x00080000L
12686//CP_MEM_SLP_CNTL
12687#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN__SHIFT 0x0
12688#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN__SHIFT 0x1
12689#define CP_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
12690#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
12691#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY__SHIFT 0x8
12692#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY__SHIFT 0x10
12693#define CP_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
12694#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK 0x00000001L
12695#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN_MASK 0x00000002L
12696#define CP_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
12697#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
12698#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY_MASK 0x0000FF00L
12699#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
12700#define CP_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
12701//CP_ECC_FIRSTOCCURRENCE
12702#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
12703#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
12704#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
12705#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
12706#define CP_ECC_FIRSTOCCURRENCE__QUEUE__SHIFT 0xc
12707#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
12708#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
12709#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0x000000F0L
12710#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x00000300L
12711#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0x00000C00L
12712#define CP_ECC_FIRSTOCCURRENCE__QUEUE_MASK 0x00007000L
12713#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000F0000L
12714//CP_ECC_FIRSTOCCURRENCE_RING0
12715#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
12716#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xFFFFFFFFL
12717//CP_ECC_FIRSTOCCURRENCE_RING1
12718#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
12719#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xFFFFFFFFL
12720//CP_ECC_FIRSTOCCURRENCE_RING2
12721#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE__SHIFT 0x0
12722#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE_MASK 0xFFFFFFFFL
12723//GB_EDC_MODE
12724#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
12725#define GB_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
12726#define GB_EDC_MODE__GATE_FUE__SHIFT 0x11
12727#define GB_EDC_MODE__DED_MODE__SHIFT 0x14
12728#define GB_EDC_MODE__PROP_FED__SHIFT 0x1d
12729#define GB_EDC_MODE__BYPASS__SHIFT 0x1f
12730#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
12731#define GB_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
12732#define GB_EDC_MODE__GATE_FUE_MASK 0x00020000L
12733#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
12734#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
12735#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
12736//CP_PQ_WPTR_POLL_CNTL
12737#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
12738#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT__SHIFT 0x1d
12739#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
12740#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
12741#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0x000000FFL
12742#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT_MASK 0x20000000L
12743#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000L
12744#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000L
12745//CP_PQ_WPTR_POLL_CNTL1
12746#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
12747#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xFFFFFFFFL
12748//CP_ME1_PIPE0_INT_CNTL
12749#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12750#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12751#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12752#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12753#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12754#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12755#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12756#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12757#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12758#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12759#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12760#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12761#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12762#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12763#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12764#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12765#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12766#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12767#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12768#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12769#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12770#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12771#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12772#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12773#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12774#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12775//CP_ME1_PIPE1_INT_CNTL
12776#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12777#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12778#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12779#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12780#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12781#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12782#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12783#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12784#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12785#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12786#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12787#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12788#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12789#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12790#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12791#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12792#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12793#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12794#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12795#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12796#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12797#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12798#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12799#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12800#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12801#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12802//CP_ME1_PIPE2_INT_CNTL
12803#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12804#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12805#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12806#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12807#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12808#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12809#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12810#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12811#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12812#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12813#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12814#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12815#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12816#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12817#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12818#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12819#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12820#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12821#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12822#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12823#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12824#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12825#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12826#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12827#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12828#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12829//CP_ME1_PIPE3_INT_CNTL
12830#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12831#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12832#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12833#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12834#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12835#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12836#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12837#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12838#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12839#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12840#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12841#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12842#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12843#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12844#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12845#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12846#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12847#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12848#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12849#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12850#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12851#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12852#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12853#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12854#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12855#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12856//CP_ME2_PIPE0_INT_CNTL
12857#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12858#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12859#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12860#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12861#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12862#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12863#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12864#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12865#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12866#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12867#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12868#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12869#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12870#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12871#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12872#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12873#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12874#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12875#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12876#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12877#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12878#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12879#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12880#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12881#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12882#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12883//CP_ME2_PIPE1_INT_CNTL
12884#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12885#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12886#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12887#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12888#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12889#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12890#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12891#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12892#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12893#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12894#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12895#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12896#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12897#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12898#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12899#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12900#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12901#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12902#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12903#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12904#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12905#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12906#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12907#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12908#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12909#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12910//CP_ME2_PIPE2_INT_CNTL
12911#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12912#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12913#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12914#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12915#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12916#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12917#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12918#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12919#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12920#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12921#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12922#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12923#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12924#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12925#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12926#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12927#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12928#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12929#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12930#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12931#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12932#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12933#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12934#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12935#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12936#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12937//CP_ME2_PIPE3_INT_CNTL
12938#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
12939#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
12940#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
12941#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
12942#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
12943#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
12944#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
12945#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
12946#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
12947#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
12948#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
12949#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
12950#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
12951#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
12952#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
12953#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
12954#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
12955#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
12956#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
12957#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
12958#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
12959#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
12960#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
12961#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
12962#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
12963#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
12964//CP_ME1_PIPE0_INT_STATUS
12965#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
12966#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
12967#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
12968#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
12969#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
12970#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
12971#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
12972#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
12973#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
12974#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
12975#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
12976#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
12977#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
12978#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
12979#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
12980#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
12981#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
12982#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
12983#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
12984#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
12985#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
12986#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
12987#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
12988#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
12989#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
12990#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
12991//CP_ME1_PIPE1_INT_STATUS
12992#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
12993#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
12994#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
12995#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
12996#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
12997#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
12998#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
12999#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13000#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13001#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13002#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13003#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13004#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13005#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13006#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13007#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13008#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13009#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13010#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13011#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13012#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13013#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13014#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13015#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13016#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13017#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13018//CP_ME1_PIPE2_INT_STATUS
13019#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
13020#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
13021#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
13022#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
13023#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
13024#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
13025#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
13026#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13027#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13028#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13029#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13030#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13031#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13032#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13033#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13034#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13035#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13036#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13037#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13038#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13039#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13040#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13041#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13042#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13043#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13044#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13045//CP_ME1_PIPE3_INT_STATUS
13046#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
13047#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
13048#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
13049#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
13050#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
13051#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
13052#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
13053#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13054#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13055#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13056#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13057#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13058#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13059#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13060#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13061#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13062#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13063#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13064#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13065#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13066#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13067#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13068#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13069#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13070#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13071#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13072//CP_ME2_PIPE0_INT_STATUS
13073#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
13074#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
13075#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
13076#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
13077#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
13078#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
13079#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
13080#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13081#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13082#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13083#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13084#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13085#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13086#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13087#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13088#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13089#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13090#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13091#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13092#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13093#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13094#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13095#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13096#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13097#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13098#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13099//CP_ME2_PIPE1_INT_STATUS
13100#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
13101#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
13102#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
13103#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
13104#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
13105#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
13106#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
13107#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13108#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13109#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13110#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13111#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13112#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13113#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13114#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13115#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13116#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13117#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13118#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13119#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13120#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13121#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13122#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13123#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13124#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13125#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13126//CP_ME2_PIPE2_INT_STATUS
13127#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
13128#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
13129#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
13130#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
13131#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
13132#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
13133#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
13134#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13135#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13136#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13137#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13138#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13139#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13140#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13141#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13142#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13143#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13144#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13145#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13146#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13147#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13148#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13149#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13150#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13151#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13152#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13153//CP_ME2_PIPE3_INT_STATUS
13154#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
13155#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
13156#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
13157#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
13158#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
13159#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
13160#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
13161#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13162#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13163#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13164#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13165#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13166#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13167#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13168#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13169#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13170#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13171#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13172#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13173#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13174#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13175#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13176#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13177#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13178#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13179#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13180#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
13181#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
13182#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
13183#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
13184//CC_GC_EDC_CONFIG
13185#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
13186#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
13187//CP_ME1_PIPE_PRIORITY_CNTS
13188#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
13189#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
13190#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
13191#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
13192#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
13193#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
13194#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
13195#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
13196//CP_ME1_PIPE0_PRIORITY
13197#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
13198#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
13199//CP_ME1_PIPE1_PRIORITY
13200#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
13201#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
13202//CP_ME1_PIPE2_PRIORITY
13203#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
13204#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
13205//CP_ME1_PIPE3_PRIORITY
13206#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
13207#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
13208//CP_ME2_PIPE_PRIORITY_CNTS
13209#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
13210#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
13211#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
13212#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
13213#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
13214#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
13215#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
13216#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
13217//CP_ME2_PIPE0_PRIORITY
13218#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
13219#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
13220//CP_ME2_PIPE1_PRIORITY
13221#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
13222#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
13223//CP_ME2_PIPE2_PRIORITY
13224#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
13225#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
13226//CP_ME2_PIPE3_PRIORITY
13227#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
13228#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
13229//CP_CE_PRGRM_CNTR_START
13230#define CP_CE_PRGRM_CNTR_START__IP_START__SHIFT 0x0
13231#define CP_CE_PRGRM_CNTR_START__IP_START_MASK 0x000007FFL
13232//CP_PFP_PRGRM_CNTR_START
13233#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
13234#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0x00001FFFL
13235//CP_ME_PRGRM_CNTR_START
13236#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
13237#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0x00000FFFL
13238//CP_MEC1_PRGRM_CNTR_START
13239#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
13240#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
13241//CP_MEC2_PRGRM_CNTR_START
13242#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
13243#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
13244//CP_CE_INTR_ROUTINE_START
13245#define CP_CE_INTR_ROUTINE_START__IR_START__SHIFT 0x0
13246#define CP_CE_INTR_ROUTINE_START__IR_START_MASK 0x000007FFL
13247//CP_PFP_INTR_ROUTINE_START
13248#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
13249#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0x00001FFFL
13250//CP_ME_INTR_ROUTINE_START
13251#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
13252#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0x00000FFFL
13253//CP_MEC1_INTR_ROUTINE_START
13254#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
13255#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
13256//CP_MEC2_INTR_ROUTINE_START
13257#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
13258#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
13259//CP_CONTEXT_CNTL
13260#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX__SHIFT 0x0
13261#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
13262#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX__SHIFT 0x10
13263#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
13264#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX_MASK 0x00000007L
13265#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x00000070L
13266#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX_MASK 0x00070000L
13267#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x00700000L
13268//CP_MAX_CONTEXT
13269#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
13270#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x00000007L
13271//CP_IQ_WAIT_TIME1
13272#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
13273#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
13274#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
13275#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
13276#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0x000000FFL
13277#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0x0000FF00L
13278#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0x00FF0000L
13279#define CP_IQ_WAIT_TIME1__GWS_MASK 0xFF000000L
13280//CP_IQ_WAIT_TIME2
13281#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
13282#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
13283#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
13284#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
13285#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0x000000FFL
13286#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0x0000FF00L
13287#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0x00FF0000L
13288#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xFF000000L
13289//CP_RB0_BASE_HI
13290#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
13291#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
13292//CP_RB1_BASE_HI
13293#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
13294#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
13295//CP_VMID_RESET
13296#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
13297#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000FFFFL
13298//CPC_INT_CNTL
13299#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
13300#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
13301#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
13302#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
13303#define CPC_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
13304#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
13305#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
13306#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
13307#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
13308#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
13309#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
13310#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
13311#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
13312#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
13313#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
13314#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
13315#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
13316#define CPC_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
13317#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
13318#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
13319#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
13320#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
13321#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
13322#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
13323#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
13324#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
13325//CPC_INT_STATUS
13326#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
13327#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
13328#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
13329#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
13330#define CPC_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
13331#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
13332#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
13333#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
13334#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
13335#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
13336#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
13337#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
13338#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
13339#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
13340#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
13341#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
13342#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
13343#define CPC_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
13344#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
13345#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
13346#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
13347#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
13348#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
13349#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
13350#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
13351#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
13352//CP_VMID_PREEMPT
13353#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
13354#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
13355#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000FFFFL
13356#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0x000F0000L
13357//CPC_INT_CNTX_ID
13358#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
13359#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
13360//CP_PQ_STATUS
13361#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
13362#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
13363#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
13364#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
13365//CP_CPC_IC_BASE_LO
13366#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
13367#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
13368//CP_CPC_IC_BASE_HI
13369#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
13370#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
13371//CP_CPC_IC_BASE_CNTL
13372#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
13373#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
13374#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0x0000000FL
13375#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x01000000L
13376//CP_CPC_IC_OP_CNTL
13377#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
13378#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
13379#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
13380#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
13381#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
13382#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
13383//CP_MEC1_F32_INT_DIS
13384#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
13385#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
13386#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
13387#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
13388#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
13389#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
13390#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
13391#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
13392#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
13393#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
13394#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
13395#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
13396#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
13397#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
13398#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
13399#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
13400#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
13401#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
13402#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
13403#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
13404#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
13405#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
13406#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
13407#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
13408#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
13409#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
13410#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
13411#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
13412#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
13413#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
13414#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
13415#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
13416//CP_MEC2_F32_INT_DIS
13417#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
13418#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
13419#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
13420#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
13421#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
13422#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
13423#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
13424#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
13425#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
13426#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
13427#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
13428#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
13429#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
13430#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
13431#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
13432#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
13433#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
13434#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
13435#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
13436#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
13437#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
13438#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
13439#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
13440#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
13441#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
13442#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
13443#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
13444#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
13445#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
13446#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
13447#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
13448#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
13449//CP_VMID_STATUS
13450#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
13451#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
13452#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
13453#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
13454
13455
13456// addressBlock: gc_cppdec2
13457//CP_RB_DOORBELL_CONTROL_SCH_0
13458#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET__SHIFT 0x2
13459#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN__SHIFT 0x1e
13460#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT__SHIFT 0x1f
13461#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13462#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN_MASK 0x40000000L
13463#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT_MASK 0x80000000L
13464//CP_RB_DOORBELL_CONTROL_SCH_1
13465#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET__SHIFT 0x2
13466#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN__SHIFT 0x1e
13467#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT__SHIFT 0x1f
13468#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13469#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN_MASK 0x40000000L
13470#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT_MASK 0x80000000L
13471//CP_RB_DOORBELL_CONTROL_SCH_2
13472#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET__SHIFT 0x2
13473#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN__SHIFT 0x1e
13474#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT__SHIFT 0x1f
13475#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13476#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN_MASK 0x40000000L
13477#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT_MASK 0x80000000L
13478//CP_RB_DOORBELL_CONTROL_SCH_3
13479#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET__SHIFT 0x2
13480#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN__SHIFT 0x1e
13481#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT__SHIFT 0x1f
13482#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13483#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN_MASK 0x40000000L
13484#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT_MASK 0x80000000L
13485//CP_RB_DOORBELL_CONTROL_SCH_4
13486#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET__SHIFT 0x2
13487#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN__SHIFT 0x1e
13488#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT__SHIFT 0x1f
13489#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13490#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN_MASK 0x40000000L
13491#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT_MASK 0x80000000L
13492//CP_RB_DOORBELL_CONTROL_SCH_5
13493#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET__SHIFT 0x2
13494#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN__SHIFT 0x1e
13495#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT__SHIFT 0x1f
13496#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13497#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN_MASK 0x40000000L
13498#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT_MASK 0x80000000L
13499//CP_RB_DOORBELL_CONTROL_SCH_6
13500#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET__SHIFT 0x2
13501#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN__SHIFT 0x1e
13502#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT__SHIFT 0x1f
13503#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13504#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN_MASK 0x40000000L
13505#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT_MASK 0x80000000L
13506//CP_RB_DOORBELL_CONTROL_SCH_7
13507#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET__SHIFT 0x2
13508#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN__SHIFT 0x1e
13509#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT__SHIFT 0x1f
13510#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
13511#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN_MASK 0x40000000L
13512#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT_MASK 0x80000000L
13513//CP_RB_DOORBELL_CLEAR
13514#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE__SHIFT 0x0
13515#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR__SHIFT 0x8
13516#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR__SHIFT 0x9
13517#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR__SHIFT 0xa
13518#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR__SHIFT 0xb
13519#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR__SHIFT 0xc
13520#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR__SHIFT 0xd
13521#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE_MASK 0x00000007L
13522#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR_MASK 0x00000100L
13523#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR_MASK 0x00000200L
13524#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR_MASK 0x00000400L
13525#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR_MASK 0x00000800L
13526#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR_MASK 0x00001000L
13527#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR_MASK 0x00002000L
13528//CP_GFX_MQD_CONTROL
13529#define CP_GFX_MQD_CONTROL__VMID__SHIFT 0x0
13530#define CP_GFX_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
13531#define CP_GFX_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
13532#define CP_GFX_MQD_CONTROL__VMID_MASK 0x0000000FL
13533#define CP_GFX_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
13534#define CP_GFX_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
13535//CP_GFX_MQD_BASE_ADDR
13536#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
13537#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
13538//CP_GFX_MQD_BASE_ADDR_HI
13539#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
13540#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
13541//CP_RB_STATUS
13542#define CP_RB_STATUS__DOORBELL_UPDATED__SHIFT 0x0
13543#define CP_RB_STATUS__DOORBELL_ENABLE__SHIFT 0x1
13544#define CP_RB_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
13545#define CP_RB_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
13546//CPG_UTCL1_STATUS
13547#define CPG_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
13548#define CPG_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
13549#define CPG_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
13550#define CPG_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
13551#define CPG_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
13552#define CPG_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
13553#define CPG_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
13554#define CPG_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
13555#define CPG_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
13556#define CPG_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
13557#define CPG_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
13558#define CPG_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
13559//CPC_UTCL1_STATUS
13560#define CPC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
13561#define CPC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
13562#define CPC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
13563#define CPC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
13564#define CPC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
13565#define CPC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
13566#define CPC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
13567#define CPC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
13568#define CPC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
13569#define CPC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
13570#define CPC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
13571#define CPC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
13572//CPF_UTCL1_STATUS
13573#define CPF_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
13574#define CPF_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
13575#define CPF_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
13576#define CPF_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
13577#define CPF_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
13578#define CPF_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
13579#define CPF_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
13580#define CPF_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
13581#define CPF_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
13582#define CPF_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
13583#define CPF_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
13584#define CPF_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
13585//CP_SD_CNTL
13586#define CP_SD_CNTL__CPF_EN__SHIFT 0x0
13587#define CP_SD_CNTL__CPG_EN__SHIFT 0x1
13588#define CP_SD_CNTL__CPC_EN__SHIFT 0x2
13589#define CP_SD_CNTL__RLC_EN__SHIFT 0x3
13590#define CP_SD_CNTL__SPI_EN__SHIFT 0x4
13591#define CP_SD_CNTL__WD_EN__SHIFT 0x5
13592#define CP_SD_CNTL__IA_EN__SHIFT 0x6
13593#define CP_SD_CNTL__PA_EN__SHIFT 0x7
13594#define CP_SD_CNTL__RMI_EN__SHIFT 0x8
13595#define CP_SD_CNTL__EA_EN__SHIFT 0x9
13596#define CP_SD_CNTL__CPF_EN_MASK 0x00000001L
13597#define CP_SD_CNTL__CPG_EN_MASK 0x00000002L
13598#define CP_SD_CNTL__CPC_EN_MASK 0x00000004L
13599#define CP_SD_CNTL__RLC_EN_MASK 0x00000008L
13600#define CP_SD_CNTL__SPI_EN_MASK 0x00000010L
13601#define CP_SD_CNTL__WD_EN_MASK 0x00000020L
13602#define CP_SD_CNTL__IA_EN_MASK 0x00000040L
13603#define CP_SD_CNTL__PA_EN_MASK 0x00000080L
13604#define CP_SD_CNTL__RMI_EN_MASK 0x00000100L
13605#define CP_SD_CNTL__EA_EN_MASK 0x00000200L
13606//CP_SOFT_RESET_CNTL
13607#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET__SHIFT 0x0
13608#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET__SHIFT 0x1
13609#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET__SHIFT 0x2
13610#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET__SHIFT 0x3
13611#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET__SHIFT 0x4
13612#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET__SHIFT 0x5
13613#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET__SHIFT 0x6
13614#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET_MASK 0x00000001L
13615#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET_MASK 0x00000002L
13616#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET_MASK 0x00000004L
13617#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET_MASK 0x00000008L
13618#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET_MASK 0x00000010L
13619#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET_MASK 0x00000020L
13620#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET_MASK 0x00000040L
13621//CP_CPC_GFX_CNTL
13622#define CP_CPC_GFX_CNTL__QUEUEID__SHIFT 0x0
13623#define CP_CPC_GFX_CNTL__PIPEID__SHIFT 0x3
13624#define CP_CPC_GFX_CNTL__MEID__SHIFT 0x5
13625#define CP_CPC_GFX_CNTL__VALID__SHIFT 0x7
13626#define CP_CPC_GFX_CNTL__QUEUEID_MASK 0x00000007L
13627#define CP_CPC_GFX_CNTL__PIPEID_MASK 0x00000018L
13628#define CP_CPC_GFX_CNTL__MEID_MASK 0x00000060L
13629#define CP_CPC_GFX_CNTL__VALID_MASK 0x00000080L
13630
13631
13632// addressBlock: gc_spipdec
13633//SPI_ARB_PRIORITY
13634#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
13635#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
13636#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
13637#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
13638#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
13639#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
13640#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
13641#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
13642#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
13643#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
13644#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001C0L
13645#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000E00L
13646#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
13647#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000C000L
13648#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
13649#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000C0000L
13650//SPI_ARB_CYCLES_0
13651#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
13652#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
13653#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000FFFFL
13654#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xFFFF0000L
13655//SPI_ARB_CYCLES_1
13656#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
13657#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
13658#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000FFFFL
13659#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xFFFF0000L
13660//SPI_WCL_PIPE_PERCENT_GFX
13661#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
13662#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE__SHIFT 0x7
13663#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
13664#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE__SHIFT 0x11
13665#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
13666#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x0000007FL
13667#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE_MASK 0x00000F80L
13668#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x0001F000L
13669#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE_MASK 0x003E0000L
13670#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x07C00000L
13671//SPI_WCL_PIPE_PERCENT_HP3D
13672#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
13673#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
13674#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
13675#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x0000007FL
13676#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x0001F000L
13677#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x07C00000L
13678//SPI_WCL_PIPE_PERCENT_CS0
13679#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
13680#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7FL
13681//SPI_WCL_PIPE_PERCENT_CS1
13682#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
13683#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7FL
13684//SPI_WCL_PIPE_PERCENT_CS2
13685#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
13686#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7FL
13687//SPI_WCL_PIPE_PERCENT_CS3
13688#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
13689#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7FL
13690//SPI_WCL_PIPE_PERCENT_CS4
13691#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
13692#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7FL
13693//SPI_WCL_PIPE_PERCENT_CS5
13694#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
13695#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7FL
13696//SPI_WCL_PIPE_PERCENT_CS6
13697#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
13698#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7FL
13699//SPI_WCL_PIPE_PERCENT_CS7
13700#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
13701#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
13702//SPI_COMPUTE_QUEUE_RESET
13703#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
13704#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
13705//SPI_RESOURCE_RESERVE_CU_0
13706#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
13707#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
13708#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
13709#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
13710#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
13711#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0x0000000FL
13712#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0x000000F0L
13713#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0x00000F00L
13714#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x00007000L
13715#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x00078000L
13716//SPI_RESOURCE_RESERVE_CU_1
13717#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
13718#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
13719#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
13720#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
13721#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
13722#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0x0000000FL
13723#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0x000000F0L
13724#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0x00000F00L
13725#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x00007000L
13726#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x00078000L
13727//SPI_RESOURCE_RESERVE_CU_2
13728#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
13729#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
13730#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
13731#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
13732#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
13733#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0x0000000FL
13734#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0x000000F0L
13735#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0x00000F00L
13736#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x00007000L
13737#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x00078000L
13738//SPI_RESOURCE_RESERVE_CU_3
13739#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
13740#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
13741#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
13742#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
13743#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
13744#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0x0000000FL
13745#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0x000000F0L
13746#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0x00000F00L
13747#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x00007000L
13748#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x00078000L
13749//SPI_RESOURCE_RESERVE_CU_4
13750#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
13751#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
13752#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
13753#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
13754#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
13755#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0x0000000FL
13756#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0x000000F0L
13757#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0x00000F00L
13758#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x00007000L
13759#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x00078000L
13760//SPI_RESOURCE_RESERVE_CU_5
13761#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
13762#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
13763#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
13764#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
13765#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
13766#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0x0000000FL
13767#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0x000000F0L
13768#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0x00000F00L
13769#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x00007000L
13770#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x00078000L
13771//SPI_RESOURCE_RESERVE_CU_6
13772#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
13773#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
13774#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
13775#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
13776#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
13777#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0x0000000FL
13778#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0x000000F0L
13779#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0x00000F00L
13780#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x00007000L
13781#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x00078000L
13782//SPI_RESOURCE_RESERVE_CU_7
13783#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
13784#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
13785#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
13786#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
13787#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
13788#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0x0000000FL
13789#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0x000000F0L
13790#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0x00000F00L
13791#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x00007000L
13792#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x00078000L
13793//SPI_RESOURCE_RESERVE_CU_8
13794#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
13795#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
13796#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
13797#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
13798#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
13799#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0x0000000FL
13800#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0x000000F0L
13801#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0x00000F00L
13802#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x00007000L
13803#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x00078000L
13804//SPI_RESOURCE_RESERVE_CU_9
13805#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
13806#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
13807#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
13808#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
13809#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
13810#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0x0000000FL
13811#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0x000000F0L
13812#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0x00000F00L
13813#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x00007000L
13814#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x00078000L
13815//SPI_RESOURCE_RESERVE_EN_CU_0
13816#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
13817#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
13818#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
13819#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY__SHIFT 0x18
13820#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x00000001L
13821#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0x0000FFFEL
13822#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0x00FF0000L
13823#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY_MASK 0x01000000L
13824//SPI_RESOURCE_RESERVE_EN_CU_1
13825#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
13826#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
13827#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
13828#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY__SHIFT 0x18
13829#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x00000001L
13830#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0x0000FFFEL
13831#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0x00FF0000L
13832#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY_MASK 0x01000000L
13833//SPI_RESOURCE_RESERVE_EN_CU_2
13834#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
13835#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
13836#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
13837#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY__SHIFT 0x18
13838#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x00000001L
13839#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0x0000FFFEL
13840#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0x00FF0000L
13841#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY_MASK 0x01000000L
13842//SPI_RESOURCE_RESERVE_EN_CU_3
13843#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
13844#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
13845#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
13846#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY__SHIFT 0x18
13847#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x00000001L
13848#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0x0000FFFEL
13849#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0x00FF0000L
13850#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY_MASK 0x01000000L
13851//SPI_RESOURCE_RESERVE_EN_CU_4
13852#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
13853#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
13854#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
13855#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY__SHIFT 0x18
13856#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x00000001L
13857#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0x0000FFFEL
13858#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0x00FF0000L
13859#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY_MASK 0x01000000L
13860//SPI_RESOURCE_RESERVE_EN_CU_5
13861#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
13862#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
13863#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
13864#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY__SHIFT 0x18
13865#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x00000001L
13866#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0x0000FFFEL
13867#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0x00FF0000L
13868#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY_MASK 0x01000000L
13869//SPI_RESOURCE_RESERVE_EN_CU_6
13870#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
13871#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
13872#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
13873#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY__SHIFT 0x18
13874#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x00000001L
13875#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0x0000FFFEL
13876#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0x00FF0000L
13877#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY_MASK 0x01000000L
13878//SPI_RESOURCE_RESERVE_EN_CU_7
13879#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
13880#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
13881#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
13882#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY__SHIFT 0x18
13883#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x00000001L
13884#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0x0000FFFEL
13885#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0x00FF0000L
13886#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY_MASK 0x01000000L
13887//SPI_RESOURCE_RESERVE_EN_CU_8
13888#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
13889#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
13890#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
13891#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY__SHIFT 0x18
13892#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x00000001L
13893#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0x0000FFFEL
13894#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0x00FF0000L
13895#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY_MASK 0x01000000L
13896//SPI_RESOURCE_RESERVE_EN_CU_9
13897#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
13898#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
13899#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
13900#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY__SHIFT 0x18
13901#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x00000001L
13902#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0x0000FFFEL
13903#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0x00FF0000L
13904#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY_MASK 0x01000000L
13905//SPI_RESOURCE_RESERVE_CU_10
13906#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
13907#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
13908#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
13909#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
13910#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
13911#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0x0000000FL
13912#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0x000000F0L
13913#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0x00000F00L
13914#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x00007000L
13915#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x00078000L
13916//SPI_RESOURCE_RESERVE_CU_11
13917#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
13918#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
13919#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
13920#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
13921#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
13922#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0x0000000FL
13923#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0x000000F0L
13924#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0x00000F00L
13925#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x00007000L
13926#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x00078000L
13927//SPI_RESOURCE_RESERVE_EN_CU_10
13928#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
13929#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
13930#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
13931#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY__SHIFT 0x18
13932#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x00000001L
13933#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0x0000FFFEL
13934#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0x00FF0000L
13935#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY_MASK 0x01000000L
13936//SPI_RESOURCE_RESERVE_EN_CU_11
13937#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
13938#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
13939#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
13940#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY__SHIFT 0x18
13941#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x00000001L
13942#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0x0000FFFEL
13943#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0x00FF0000L
13944#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY_MASK 0x01000000L
13945//SPI_RESOURCE_RESERVE_CU_12
13946#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
13947#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
13948#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
13949#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
13950#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
13951#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0x0000000FL
13952#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0x000000F0L
13953#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0x00000F00L
13954#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x00007000L
13955#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x00078000L
13956//SPI_RESOURCE_RESERVE_CU_13
13957#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
13958#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
13959#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
13960#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
13961#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
13962#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0x0000000FL
13963#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0x000000F0L
13964#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0x00000F00L
13965#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x00007000L
13966#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x00078000L
13967//SPI_RESOURCE_RESERVE_CU_14
13968#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
13969#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
13970#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
13971#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
13972#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
13973#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0x0000000FL
13974#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0x000000F0L
13975#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0x00000F00L
13976#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x00007000L
13977#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x00078000L
13978//SPI_RESOURCE_RESERVE_CU_15
13979#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
13980#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
13981#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
13982#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
13983#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
13984#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0x0000000FL
13985#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0x000000F0L
13986#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0x00000F00L
13987#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x00007000L
13988#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x00078000L
13989//SPI_RESOURCE_RESERVE_EN_CU_12
13990#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
13991#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
13992#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
13993#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY__SHIFT 0x18
13994#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x00000001L
13995#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0x0000FFFEL
13996#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0x00FF0000L
13997#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY_MASK 0x01000000L
13998//SPI_RESOURCE_RESERVE_EN_CU_13
13999#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
14000#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
14001#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
14002#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY__SHIFT 0x18
14003#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x00000001L
14004#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0x0000FFFEL
14005#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0x00FF0000L
14006#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY_MASK 0x01000000L
14007//SPI_RESOURCE_RESERVE_EN_CU_14
14008#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
14009#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
14010#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
14011#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY__SHIFT 0x18
14012#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x00000001L
14013#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0x0000FFFEL
14014#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0x00FF0000L
14015#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY_MASK 0x01000000L
14016//SPI_RESOURCE_RESERVE_EN_CU_15
14017#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
14018#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
14019#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
14020#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY__SHIFT 0x18
14021#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x00000001L
14022#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0x0000FFFEL
14023#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0x00FF0000L
14024#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY_MASK 0x01000000L
14025//SPI_COMPUTE_WF_CTX_SAVE
14026#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
14027#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
14028#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
14029#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
14030#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
14031#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x00000001L
14032#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x00000002L
14033#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x00000004L
14034#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000L
14035#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000L
14036//SPI_ARB_CNTL_0
14037#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT__SHIFT 0x0
14038#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT__SHIFT 0x4
14039#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT__SHIFT 0x8
14040#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT_MASK 0x0000000FL
14041#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT_MASK 0x000000F0L
14042#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT_MASK 0x00000F00L
14043
14044
14045// addressBlock: gc_cpphqddec
14046//CP_HQD_GFX_CONTROL
14047#define CP_HQD_GFX_CONTROL__MESSAGE__SHIFT 0x0
14048#define CP_HQD_GFX_CONTROL__MISC__SHIFT 0x4
14049#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT 0xf
14050#define CP_HQD_GFX_CONTROL__MESSAGE_MASK 0x0000000FL
14051#define CP_HQD_GFX_CONTROL__MISC_MASK 0x00007FF0L
14052#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN_MASK 0x00008000L
14053//CP_HQD_GFX_STATUS
14054#define CP_HQD_GFX_STATUS__STATUS__SHIFT 0x0
14055#define CP_HQD_GFX_STATUS__STATUS_MASK 0x0000FFFFL
14056//CP_HPD_ROQ_OFFSETS
14057#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
14058#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
14059#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
14060#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
14061#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
14062#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x003F0000L
14063//CP_HPD_STATUS0
14064#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
14065#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
14066#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
14067#define CP_HPD_STATUS0__FETCHING_MQD__SHIFT 0x10
14068#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB__SHIFT 0x11
14069#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ__SHIFT 0x12
14070#define CP_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
14071#define CP_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
14072#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
14073#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
14074#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
14075#define CP_HPD_STATUS0__FETCHING_MQD_MASK 0x00010000L
14076#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB_MASK 0x00020000L
14077#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ_MASK 0x00040000L
14078#define CP_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
14079#define CP_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
14080//CP_HPD_UTCL1_CNTL
14081#define CP_HPD_UTCL1_CNTL__SELECT__SHIFT 0x0
14082#define CP_HPD_UTCL1_CNTL__SELECT_MASK 0x0000000FL
14083//CP_HPD_UTCL1_ERROR
14084#define CP_HPD_UTCL1_ERROR__ADDR_HI__SHIFT 0x0
14085#define CP_HPD_UTCL1_ERROR__TYPE__SHIFT 0x10
14086#define CP_HPD_UTCL1_ERROR__VMID__SHIFT 0x14
14087#define CP_HPD_UTCL1_ERROR__ADDR_HI_MASK 0x0000FFFFL
14088#define CP_HPD_UTCL1_ERROR__TYPE_MASK 0x00010000L
14089#define CP_HPD_UTCL1_ERROR__VMID_MASK 0x00F00000L
14090//CP_HPD_UTCL1_ERROR_ADDR
14091#define CP_HPD_UTCL1_ERROR_ADDR__ADDR__SHIFT 0xc
14092#define CP_HPD_UTCL1_ERROR_ADDR__ADDR_MASK 0xFFFFF000L
14093//CP_MQD_BASE_ADDR
14094#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
14095#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
14096//CP_MQD_BASE_ADDR_HI
14097#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
14098#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
14099//CP_HQD_ACTIVE
14100#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
14101#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
14102#define CP_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
14103#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x00000002L
14104//CP_HQD_VMID
14105#define CP_HQD_VMID__VMID__SHIFT 0x0
14106#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
14107#define CP_HQD_VMID__VQID__SHIFT 0x10
14108#define CP_HQD_VMID__VMID_MASK 0x0000000FL
14109#define CP_HQD_VMID__IB_VMID_MASK 0x00000F00L
14110#define CP_HQD_VMID__VQID_MASK 0x03FF0000L
14111//CP_HQD_PERSISTENT_STATE
14112#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
14113#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
14114#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN__SHIFT 0x15
14115#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN__SHIFT 0x16
14116#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN__SHIFT 0x17
14117#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN__SHIFT 0x18
14118#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN__SHIFT 0x19
14119#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN__SHIFT 0x1a
14120#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN__SHIFT 0x1b
14121#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
14122#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
14123#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
14124#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
14125#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x00000001L
14126#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x0003FF00L
14127#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN_MASK 0x00200000L
14128#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN_MASK 0x00400000L
14129#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN_MASK 0x00800000L
14130#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN_MASK 0x01000000L
14131#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN_MASK 0x02000000L
14132#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN_MASK 0x04000000L
14133#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN_MASK 0x08000000L
14134#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000L
14135#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000L
14136#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000L
14137#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000L
14138//CP_HQD_PIPE_PRIORITY
14139#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
14140#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x00000003L
14141//CP_HQD_QUEUE_PRIORITY
14142#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
14143#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
14144//CP_HQD_QUANTUM
14145#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
14146#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
14147#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
14148#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
14149#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
14150#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000010L
14151#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x00003F00L
14152#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
14153//CP_HQD_PQ_BASE
14154#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
14155#define CP_HQD_PQ_BASE__ADDR_MASK 0xFFFFFFFFL
14156//CP_HQD_PQ_BASE_HI
14157#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
14158#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0x000000FFL
14159//CP_HQD_PQ_RPTR
14160#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
14161#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
14162//CP_HQD_PQ_RPTR_REPORT_ADDR
14163#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
14164#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xFFFFFFFCL
14165//CP_HQD_PQ_RPTR_REPORT_ADDR_HI
14166#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
14167#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0x0000FFFFL
14168//CP_HQD_PQ_WPTR_POLL_ADDR
14169#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x3
14170#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xFFFFFFF8L
14171//CP_HQD_PQ_WPTR_POLL_ADDR_HI
14172#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
14173#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0x0000FFFFL
14174//CP_HQD_PQ_DOORBELL_CONTROL
14175#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
14176#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
14177#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
14178#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
14179#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
14180#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
14181#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
14182#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x00000001L
14183#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
14184#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
14185#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000L
14186#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000L
14187#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
14188#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
14189//CP_HQD_PQ_CONTROL
14190#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
14191#define CP_HQD_PQ_CONTROL__WPTR_CARRY__SHIFT 0x6
14192#define CP_HQD_PQ_CONTROL__RPTR_CARRY__SHIFT 0x7
14193#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
14194#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT 0xe
14195#define CP_HQD_PQ_CONTROL__PQ_EMPTY__SHIFT 0xf
14196#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT 0x10
14197#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT 0x11
14198#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
14199#define CP_HQD_PQ_CONTROL__EXE_DISABLE__SHIFT 0x17
14200#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
14201#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x19
14202#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
14203#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
14204#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP__SHIFT 0x1d
14205#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
14206#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
14207#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x0000003FL
14208#define CP_HQD_PQ_CONTROL__WPTR_CARRY_MASK 0x00000040L
14209#define CP_HQD_PQ_CONTROL__RPTR_CARRY_MASK 0x00000080L
14210#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x00003F00L
14211#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN_MASK 0x00004000L
14212#define CP_HQD_PQ_CONTROL__PQ_EMPTY_MASK 0x00008000L
14213#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN_MASK 0x00010000L
14214#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP_MASK 0x00060000L
14215#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x00300000L
14216#define CP_HQD_PQ_CONTROL__EXE_DISABLE_MASK 0x00800000L
14217#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x01000000L
14218#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x06000000L
14219#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x08000000L
14220#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000L
14221#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK 0x20000000L
14222#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000L
14223#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000L
14224//CP_HQD_IB_BASE_ADDR
14225#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
14226#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xFFFFFFFCL
14227//CP_HQD_IB_BASE_ADDR_HI
14228#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
14229#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0x0000FFFFL
14230//CP_HQD_IB_RPTR
14231#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
14232#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0x000FFFFFL
14233//CP_HQD_IB_CONTROL
14234#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
14235#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
14236#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT 0x17
14237#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
14238#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
14239#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0x000FFFFFL
14240#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x00300000L
14241#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE_MASK 0x00800000L
14242#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x01000000L
14243#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000L
14244//CP_HQD_IQ_TIMER
14245#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
14246#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
14247#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
14248#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
14249#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
14250#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
14251#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
14252#define CP_HQD_IQ_TIMER__EXE_DISABLE__SHIFT 0x17
14253#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
14254#define CP_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x19
14255#define CP_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
14256#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
14257#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
14258#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
14259#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
14260#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
14261#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
14262#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
14263#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
14264#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x003F0000L
14265#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
14266#define CP_HQD_IQ_TIMER__EXE_DISABLE_MASK 0x00800000L
14267#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x01000000L
14268#define CP_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x02000000L
14269#define CP_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
14270#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000L
14271#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000L
14272#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
14273//CP_HQD_IQ_RPTR
14274#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
14275#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x0000003FL
14276//CP_HQD_DEQUEUE_REQUEST
14277#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
14278#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
14279#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
14280#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
14281#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
14282#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x00000007L
14283#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
14284#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x00000100L
14285#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
14286#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
14287//CP_HQD_DMA_OFFLOAD
14288#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
14289#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
14290//CP_HQD_OFFLOAD
14291#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
14292#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
14293#define CP_HQD_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
14294#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
14295#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
14296#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
14297#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
14298#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
14299#define CP_HQD_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
14300#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
14301#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
14302#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
14303//CP_HQD_SEMA_CMD
14304#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
14305#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
14306#define CP_HQD_SEMA_CMD__RETRY_MASK 0x00000001L
14307#define CP_HQD_SEMA_CMD__RESULT_MASK 0x00000006L
14308//CP_HQD_MSG_TYPE
14309#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
14310#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
14311#define CP_HQD_MSG_TYPE__ACTION_MASK 0x00000007L
14312#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x00000070L
14313//CP_HQD_ATOMIC0_PREOP_LO
14314#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
14315#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
14316//CP_HQD_ATOMIC0_PREOP_HI
14317#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
14318#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
14319//CP_HQD_ATOMIC1_PREOP_LO
14320#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
14321#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
14322//CP_HQD_ATOMIC1_PREOP_HI
14323#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
14324#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
14325//CP_HQD_HQ_SCHEDULER0
14326#define CP_HQD_HQ_SCHEDULER0__SCHEDULER__SHIFT 0x0
14327#define CP_HQD_HQ_SCHEDULER0__SCHEDULER_MASK 0xFFFFFFFFL
14328//CP_HQD_HQ_STATUS0
14329#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
14330#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT__SHIFT 0x2
14331#define CP_HQD_HQ_STATUS0__RSV_6_4__SHIFT 0x4
14332#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
14333#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
14334#define CP_HQD_HQ_STATUS0__PG_ACTIVATED__SHIFT 0x9
14335#define CP_HQD_HQ_STATUS0__RSVR_29_10__SHIFT 0xa
14336#define CP_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
14337#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN__SHIFT 0x1f
14338#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x00000003L
14339#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT_MASK 0x0000000CL
14340#define CP_HQD_HQ_STATUS0__RSV_6_4_MASK 0x00000070L
14341#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x00000080L
14342#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x00000100L
14343#define CP_HQD_HQ_STATUS0__PG_ACTIVATED_MASK 0x00000200L
14344#define CP_HQD_HQ_STATUS0__RSVR_29_10_MASK 0x3FFFFC00L
14345#define CP_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
14346#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN_MASK 0x80000000L
14347//CP_HQD_HQ_CONTROL0
14348#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
14349#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xFFFFFFFFL
14350//CP_HQD_HQ_SCHEDULER1
14351#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
14352#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xFFFFFFFFL
14353//CP_MQD_CONTROL
14354#define CP_MQD_CONTROL__VMID__SHIFT 0x0
14355#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
14356#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
14357#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
14358#define CP_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
14359#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
14360#define CP_MQD_CONTROL__VMID_MASK 0x0000000FL
14361#define CP_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
14362#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
14363#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
14364#define CP_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
14365#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
14366//CP_HQD_HQ_STATUS1
14367#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
14368#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xFFFFFFFFL
14369//CP_HQD_HQ_CONTROL1
14370#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
14371#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xFFFFFFFFL
14372//CP_HQD_EOP_BASE_ADDR
14373#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
14374#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
14375//CP_HQD_EOP_BASE_ADDR_HI
14376#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
14377#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x000000FFL
14378//CP_HQD_EOP_CONTROL
14379#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
14380#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
14381#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
14382#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
14383#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
14384#define CP_HQD_EOP_CONTROL__HALT_FETCHER__SHIFT 0x15
14385#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN__SHIFT 0x16
14386#define CP_HQD_EOP_CONTROL__EXE_DISABLE__SHIFT 0x17
14387#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
14388#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
14389#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
14390#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x0000003FL
14391#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x00000100L
14392#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x00001000L
14393#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x00002000L
14394#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x00004000L
14395#define CP_HQD_EOP_CONTROL__HALT_FETCHER_MASK 0x00200000L
14396#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN_MASK 0x00400000L
14397#define CP_HQD_EOP_CONTROL__EXE_DISABLE_MASK 0x00800000L
14398#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x01000000L
14399#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000L
14400#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000L
14401//CP_HQD_EOP_RPTR
14402#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
14403#define CP_HQD_EOP_RPTR__RESET_FETCHER__SHIFT 0x1c
14404#define CP_HQD_EOP_RPTR__DEQUEUE_PEND__SHIFT 0x1d
14405#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
14406#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
14407#define CP_HQD_EOP_RPTR__RPTR_MASK 0x00001FFFL
14408#define CP_HQD_EOP_RPTR__RESET_FETCHER_MASK 0x10000000L
14409#define CP_HQD_EOP_RPTR__DEQUEUE_PEND_MASK 0x20000000L
14410#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000L
14411#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000L
14412//CP_HQD_EOP_WPTR
14413#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
14414#define CP_HQD_EOP_WPTR__EOP_EMPTY__SHIFT 0xf
14415#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
14416#define CP_HQD_EOP_WPTR__WPTR_MASK 0x00001FFFL
14417#define CP_HQD_EOP_WPTR__EOP_EMPTY_MASK 0x00008000L
14418#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1FFF0000L
14419//CP_HQD_EOP_EVENTS
14420#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
14421#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
14422#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0x00000FFFL
14423#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x00010000L
14424//CP_HQD_CTX_SAVE_BASE_ADDR_LO
14425#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
14426#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
14427//CP_HQD_CTX_SAVE_BASE_ADDR_HI
14428#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
14429#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
14430//CP_HQD_CTX_SAVE_CONTROL
14431#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
14432#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
14433#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x00000008L
14434#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
14435//CP_HQD_CNTL_STACK_OFFSET
14436#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
14437#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x00007FFCL
14438//CP_HQD_CNTL_STACK_SIZE
14439#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
14440#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x00007000L
14441//CP_HQD_WG_STATE_OFFSET
14442#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
14443#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x01FFFFFCL
14444//CP_HQD_CTX_SAVE_SIZE
14445#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
14446#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x01FFF000L
14447//CP_HQD_GDS_RESOURCE_STATE
14448#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
14449#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
14450#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
14451#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
14452#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x00000001L
14453#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x00000002L
14454#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x000003F0L
14455#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x0003F000L
14456//CP_HQD_ERROR
14457#define CP_HQD_ERROR__EDC_ERROR_ID__SHIFT 0x0
14458#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
14459#define CP_HQD_ERROR__AQL_ERROR__SHIFT 0x5
14460#define CP_HQD_ERROR__PQ_UTCL1_ERROR__SHIFT 0x8
14461#define CP_HQD_ERROR__IB_UTCL1_ERROR__SHIFT 0x9
14462#define CP_HQD_ERROR__EOP_UTCL1_ERROR__SHIFT 0xa
14463#define CP_HQD_ERROR__IQ_UTCL1_ERROR__SHIFT 0xb
14464#define CP_HQD_ERROR__RRPT_UTCL1_ERROR__SHIFT 0xc
14465#define CP_HQD_ERROR__WPP_UTCL1_ERROR__SHIFT 0xd
14466#define CP_HQD_ERROR__SEM_UTCL1_ERROR__SHIFT 0xe
14467#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0xf
14468#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x10
14469#define CP_HQD_ERROR__SR_UTCL1_ERROR__SHIFT 0x11
14470#define CP_HQD_ERROR__QU_UTCL1_ERROR__SHIFT 0x12
14471#define CP_HQD_ERROR__TC_UTCL1_ERROR__SHIFT 0x13
14472#define CP_HQD_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
14473#define CP_HQD_ERROR__SUA_ERROR_MASK 0x00000010L
14474#define CP_HQD_ERROR__AQL_ERROR_MASK 0x00000020L
14475#define CP_HQD_ERROR__PQ_UTCL1_ERROR_MASK 0x00000100L
14476#define CP_HQD_ERROR__IB_UTCL1_ERROR_MASK 0x00000200L
14477#define CP_HQD_ERROR__EOP_UTCL1_ERROR_MASK 0x00000400L
14478#define CP_HQD_ERROR__IQ_UTCL1_ERROR_MASK 0x00000800L
14479#define CP_HQD_ERROR__RRPT_UTCL1_ERROR_MASK 0x00001000L
14480#define CP_HQD_ERROR__WPP_UTCL1_ERROR_MASK 0x00002000L
14481#define CP_HQD_ERROR__SEM_UTCL1_ERROR_MASK 0x00004000L
14482#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00008000L
14483#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00010000L
14484#define CP_HQD_ERROR__SR_UTCL1_ERROR_MASK 0x00020000L
14485#define CP_HQD_ERROR__QU_UTCL1_ERROR_MASK 0x00040000L
14486#define CP_HQD_ERROR__TC_UTCL1_ERROR_MASK 0x00080000L
14487//CP_HQD_EOP_WPTR_MEM
14488#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
14489#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x00001FFFL
14490//CP_HQD_AQL_CONTROL
14491#define CP_HQD_AQL_CONTROL__CONTROL0__SHIFT 0x0
14492#define CP_HQD_AQL_CONTROL__CONTROL0_EN__SHIFT 0xf
14493#define CP_HQD_AQL_CONTROL__CONTROL1__SHIFT 0x10
14494#define CP_HQD_AQL_CONTROL__CONTROL1_EN__SHIFT 0x1f
14495#define CP_HQD_AQL_CONTROL__CONTROL0_MASK 0x00007FFFL
14496#define CP_HQD_AQL_CONTROL__CONTROL0_EN_MASK 0x00008000L
14497#define CP_HQD_AQL_CONTROL__CONTROL1_MASK 0x7FFF0000L
14498#define CP_HQD_AQL_CONTROL__CONTROL1_EN_MASK 0x80000000L
14499//CP_HQD_PQ_WPTR_LO
14500#define CP_HQD_PQ_WPTR_LO__OFFSET__SHIFT 0x0
14501#define CP_HQD_PQ_WPTR_LO__OFFSET_MASK 0xFFFFFFFFL
14502//CP_HQD_PQ_WPTR_HI
14503#define CP_HQD_PQ_WPTR_HI__DATA__SHIFT 0x0
14504#define CP_HQD_PQ_WPTR_HI__DATA_MASK 0xFFFFFFFFL
14505
14506
14507// addressBlock: gc_didtdec
14508//DIDT_IND_INDEX
14509#define DIDT_IND_INDEX__DIDT_IND_INDEX__SHIFT 0x0
14510#define DIDT_IND_INDEX__DIDT_IND_INDEX_MASK 0xFFFFFFFFL
14511//DIDT_IND_DATA
14512#define DIDT_IND_DATA__DIDT_IND_DATA__SHIFT 0x0
14513#define DIDT_IND_DATA__DIDT_IND_DATA_MASK 0xFFFFFFFFL
14514
14515
14516// addressBlock: gc_gccacdec
14517//GC_CAC_CTRL_1
14518#define GC_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
14519#define GC_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x18
14520#define GC_CAC_CTRL_1__CAC_WINDOW_MASK 0x00FFFFFFL
14521#define GC_CAC_CTRL_1__TDP_WINDOW_MASK 0xFF000000L
14522//GC_CAC_CTRL_2
14523#define GC_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
14524#define GC_CAC_CTRL_2__CAC_SOFT_CTRL_ENABLE__SHIFT 0x1
14525#define GC_CAC_CTRL_2__UNUSED_0__SHIFT 0x2
14526#define GC_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
14527#define GC_CAC_CTRL_2__CAC_SOFT_CTRL_ENABLE_MASK 0x00000002L
14528#define GC_CAC_CTRL_2__UNUSED_0_MASK 0xFFFFFFFCL
14529//GC_CAC_CGTT_CLK_CTRL
14530#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
14531#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
14532#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
14533#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
14534#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
14535#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
14536#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
14537#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
14538//GC_CAC_AGGR_LOWER
14539#define GC_CAC_AGGR_LOWER__AGGR_31_0__SHIFT 0x0
14540#define GC_CAC_AGGR_LOWER__AGGR_31_0_MASK 0xFFFFFFFFL
14541//GC_CAC_AGGR_UPPER
14542#define GC_CAC_AGGR_UPPER__AGGR_63_32__SHIFT 0x0
14543#define GC_CAC_AGGR_UPPER__AGGR_63_32_MASK 0xFFFFFFFFL
14544//GC_CAC_PG_AGGR_LOWER
14545#define GC_CAC_PG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0
14546#define GC_CAC_PG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xFFFFFFFFL
14547//GC_CAC_PG_AGGR_UPPER
14548#define GC_CAC_PG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0
14549#define GC_CAC_PG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xFFFFFFFFL
14550//GC_CAC_SOFT_CTRL
14551#define GC_CAC_SOFT_CTRL__SOFT_SNAP__SHIFT 0x0
14552#define GC_CAC_SOFT_CTRL__UNUSED__SHIFT 0x1
14553#define GC_CAC_SOFT_CTRL__SOFT_SNAP_MASK 0x00000001L
14554#define GC_CAC_SOFT_CTRL__UNUSED_MASK 0xFFFFFFFEL
14555//GC_DIDT_CTRL0
14556#define GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
14557#define GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT 0x1
14558#define GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT 0x3
14559#define GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
14560#define GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x5
14561#define GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
14562#define GC_DIDT_CTRL0__PHASE_OFFSET_MASK 0x00000006L
14563#define GC_DIDT_CTRL0__DIDT_SW_RST_MASK 0x00000008L
14564#define GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
14565#define GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001E0L
14566//GC_DIDT_CTRL1
14567#define GC_DIDT_CTRL1__MIN_POWER__SHIFT 0x0
14568#define GC_DIDT_CTRL1__MAX_POWER__SHIFT 0x10
14569#define GC_DIDT_CTRL1__MIN_POWER_MASK 0x0000FFFFL
14570#define GC_DIDT_CTRL1__MAX_POWER_MASK 0xFFFF0000L
14571//GC_DIDT_CTRL2
14572#define GC_DIDT_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
14573#define GC_DIDT_CTRL2__UNUSED_0__SHIFT 0xe
14574#define GC_DIDT_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
14575#define GC_DIDT_CTRL2__UNUSED_1__SHIFT 0x1a
14576#define GC_DIDT_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
14577#define GC_DIDT_CTRL2__UNUSED_2__SHIFT 0x1f
14578#define GC_DIDT_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
14579#define GC_DIDT_CTRL2__UNUSED_0_MASK 0x0000C000L
14580#define GC_DIDT_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
14581#define GC_DIDT_CTRL2__UNUSED_1_MASK 0x04000000L
14582#define GC_DIDT_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
14583#define GC_DIDT_CTRL2__UNUSED_2_MASK 0x80000000L
14584//GC_DIDT_WEIGHT
14585#define GC_DIDT_WEIGHT__SQ_WEIGHT__SHIFT 0x0
14586#define GC_DIDT_WEIGHT__DB_WEIGHT__SHIFT 0x8
14587#define GC_DIDT_WEIGHT__TD_WEIGHT__SHIFT 0x10
14588#define GC_DIDT_WEIGHT__TCP_WEIGHT__SHIFT 0x18
14589#define GC_DIDT_WEIGHT__SQ_WEIGHT_MASK 0x000000FFL
14590#define GC_DIDT_WEIGHT__DB_WEIGHT_MASK 0x0000FF00L
14591#define GC_DIDT_WEIGHT__TD_WEIGHT_MASK 0x00FF0000L
14592#define GC_DIDT_WEIGHT__TCP_WEIGHT_MASK 0xFF000000L
14593//GC_EDC_CTRL
14594#define GC_EDC_CTRL__EDC_EN__SHIFT 0x0
14595#define GC_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
14596#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
14597#define GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
14598#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
14599#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x9
14600#define GC_EDC_CTRL__UNUSED_0__SHIFT 0xa
14601#define GC_EDC_CTRL__EDC_EN_MASK 0x00000001L
14602#define GC_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
14603#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
14604#define GC_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
14605#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
14606#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00000200L
14607#define GC_EDC_CTRL__UNUSED_0_MASK 0xFFFFFC00L
14608//GC_EDC_THRESHOLD
14609#define GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
14610#define GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
14611//GC_EDC_STATUS
14612#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x0
14613#define GC_EDC_STATUS__EDC_ROLLING_DROOP_DELTA__SHIFT 0x3
14614#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x00000007L
14615#define GC_EDC_STATUS__EDC_ROLLING_DROOP_DELTA_MASK 0x03FFFFF8L
14616//GC_EDC_OVERFLOW
14617#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
14618#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
14619#define GC_EDC_OVERFLOW__EDC_DROOP_LEVEL_OVERFLOW__SHIFT 0x11
14620#define GC_EDC_OVERFLOW__PSM_COUNTER__SHIFT 0x12
14621#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
14622#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
14623#define GC_EDC_OVERFLOW__EDC_DROOP_LEVEL_OVERFLOW_MASK 0x00020000L
14624#define GC_EDC_OVERFLOW__PSM_COUNTER_MASK 0xFFFC0000L
14625//GC_EDC_ROLLING_POWER_DELTA
14626#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
14627#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
14628//GC_DIDT_DROOP_CTRL
14629#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT 0x0
14630#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT 0x1
14631#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT 0xf
14632#define GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT 0x13
14633#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT 0x1f
14634#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK 0x00000001L
14635#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK 0x00007FFEL
14636#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK 0x00078000L
14637#define GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK 0x00080000L
14638#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK 0x80000000L
14639//GC_EDC_DROOP_CTRL
14640#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT 0x0
14641#define GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT 0x1
14642#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT 0xf
14643#define GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT 0x14
14644#define GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT 0x15
14645#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK 0x00000001L
14646#define GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK 0x00007FFEL
14647#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK 0x000F8000L
14648#define GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK 0x00100000L
14649#define GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK 0x00200000L
14650//GC_CAC_IND_INDEX
14651#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR__SHIFT 0x0
14652#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR_MASK 0xFFFFFFFFL
14653//GC_CAC_IND_DATA
14654#define GC_CAC_IND_DATA__GC_CAC_IND_DATA__SHIFT 0x0
14655#define GC_CAC_IND_DATA__GC_CAC_IND_DATA_MASK 0xFFFFFFFFL
14656//SE_CAC_CGTT_CLK_CTRL
14657#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
14658#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
14659#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
14660#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
14661#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
14662#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
14663#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
14664#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
14665//SE_CAC_IND_INDEX
14666#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR__SHIFT 0x0
14667#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR_MASK 0xFFFFFFFFL
14668//SE_CAC_IND_DATA
14669#define SE_CAC_IND_DATA__SE_CAC_IND_DATA__SHIFT 0x0
14670#define SE_CAC_IND_DATA__SE_CAC_IND_DATA_MASK 0xFFFFFFFFL
14671
14672
14673// addressBlock: gc_tcpdec
14674//TCP_WATCH0_ADDR_H
14675#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
14676#define TCP_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
14677//TCP_WATCH0_ADDR_L
14678#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x6
14679#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFFC0L
14680//TCP_WATCH0_CNTL
14681#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
14682#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
14683#define TCP_WATCH0_CNTL__ATC__SHIFT 0x1c
14684#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
14685#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
14686#define TCP_WATCH0_CNTL__MASK_MASK 0x00FFFFFFL
14687#define TCP_WATCH0_CNTL__VMID_MASK 0x0F000000L
14688#define TCP_WATCH0_CNTL__ATC_MASK 0x10000000L
14689#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000L
14690#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000L
14691//TCP_WATCH1_ADDR_H
14692#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
14693#define TCP_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
14694//TCP_WATCH1_ADDR_L
14695#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x6
14696#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFFC0L
14697//TCP_WATCH1_CNTL
14698#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
14699#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
14700#define TCP_WATCH1_CNTL__ATC__SHIFT 0x1c
14701#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
14702#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
14703#define TCP_WATCH1_CNTL__MASK_MASK 0x00FFFFFFL
14704#define TCP_WATCH1_CNTL__VMID_MASK 0x0F000000L
14705#define TCP_WATCH1_CNTL__ATC_MASK 0x10000000L
14706#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000L
14707#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000L
14708//TCP_WATCH2_ADDR_H
14709#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
14710#define TCP_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
14711//TCP_WATCH2_ADDR_L
14712#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x6
14713#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFFC0L
14714//TCP_WATCH2_CNTL
14715#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
14716#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
14717#define TCP_WATCH2_CNTL__ATC__SHIFT 0x1c
14718#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
14719#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
14720#define TCP_WATCH2_CNTL__MASK_MASK 0x00FFFFFFL
14721#define TCP_WATCH2_CNTL__VMID_MASK 0x0F000000L
14722#define TCP_WATCH2_CNTL__ATC_MASK 0x10000000L
14723#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000L
14724#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000L
14725//TCP_WATCH3_ADDR_H
14726#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
14727#define TCP_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
14728//TCP_WATCH3_ADDR_L
14729#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x6
14730#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFFC0L
14731//TCP_WATCH3_CNTL
14732#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
14733#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
14734#define TCP_WATCH3_CNTL__ATC__SHIFT 0x1c
14735#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
14736#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
14737#define TCP_WATCH3_CNTL__MASK_MASK 0x00FFFFFFL
14738#define TCP_WATCH3_CNTL__VMID_MASK 0x0F000000L
14739#define TCP_WATCH3_CNTL__ATC_MASK 0x10000000L
14740#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000L
14741#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000L
14742//TCP_GATCL1_CNTL
14743#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID__SHIFT 0x19
14744#define TCP_GATCL1_CNTL__FORCE_MISS__SHIFT 0x1a
14745#define TCP_GATCL1_CNTL__FORCE_IN_ORDER__SHIFT 0x1b
14746#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
14747#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
14748#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID_MASK 0x02000000L
14749#define TCP_GATCL1_CNTL__FORCE_MISS_MASK 0x04000000L
14750#define TCP_GATCL1_CNTL__FORCE_IN_ORDER_MASK 0x08000000L
14751#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
14752#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
14753//TCP_ATC_EDC_GATCL1_CNT
14754#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0
14755#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL
14756//TCP_GATCL1_DSM_CNTL
14757#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0__SHIFT 0x0
14758#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1__SHIFT 0x1
14759#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A__SHIFT 0x2
14760#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0_MASK 0x00000001L
14761#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1_MASK 0x00000002L
14762#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A_MASK 0x00000004L
14763//TCP_CNTL2
14764#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
14765#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0x000000FFL
14766//TCP_UTCL1_CNTL1
14767#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
14768#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
14769#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
14770#define TCP_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
14771#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
14772#define TCP_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
14773#define TCP_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
14774#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
14775#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
14776#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
14777#define TCP_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
14778#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
14779#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
14780#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
14781#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
14782#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
14783#define TCP_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
14784#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
14785#define TCP_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
14786#define TCP_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
14787#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
14788#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
14789#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
14790#define TCP_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
14791#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
14792#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
14793//TCP_UTCL1_CNTL2
14794#define TCP_UTCL1_CNTL2__SPARE__SHIFT 0x0
14795#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
14796#define TCP_UTCL1_CNTL2__ANY_LINE_VALID__SHIFT 0xa
14797#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
14798#define TCP_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
14799#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
14800#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
14801#define TCP_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
14802#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
14803#define TCP_UTCL1_CNTL2__ANY_LINE_VALID_MASK 0x00000400L
14804#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
14805#define TCP_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
14806#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
14807#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
14808//TCP_UTCL1_STATUS
14809#define TCP_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
14810#define TCP_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
14811#define TCP_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
14812#define TCP_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
14813#define TCP_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
14814#define TCP_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
14815//TCP_PERFCOUNTER_FILTER
14816#define TCP_PERFCOUNTER_FILTER__BUFFER__SHIFT 0x0
14817#define TCP_PERFCOUNTER_FILTER__FLAT__SHIFT 0x1
14818#define TCP_PERFCOUNTER_FILTER__DIM__SHIFT 0x2
14819#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT__SHIFT 0x5
14820#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT__SHIFT 0xb
14821#define TCP_PERFCOUNTER_FILTER__SW_MODE__SHIFT 0xf
14822#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES__SHIFT 0x14
14823#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE__SHIFT 0x16
14824#define TCP_PERFCOUNTER_FILTER__GLC__SHIFT 0x19
14825#define TCP_PERFCOUNTER_FILTER__SLC__SHIFT 0x1a
14826#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE__SHIFT 0x1b
14827#define TCP_PERFCOUNTER_FILTER__ADDR_MODE__SHIFT 0x1c
14828#define TCP_PERFCOUNTER_FILTER__BUFFER_MASK 0x00000001L
14829#define TCP_PERFCOUNTER_FILTER__FLAT_MASK 0x00000002L
14830#define TCP_PERFCOUNTER_FILTER__DIM_MASK 0x0000001CL
14831#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT_MASK 0x000007E0L
14832#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT_MASK 0x00007800L
14833#define TCP_PERFCOUNTER_FILTER__SW_MODE_MASK 0x000F8000L
14834#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES_MASK 0x00300000L
14835#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE_MASK 0x01C00000L
14836#define TCP_PERFCOUNTER_FILTER__GLC_MASK 0x02000000L
14837#define TCP_PERFCOUNTER_FILTER__SLC_MASK 0x04000000L
14838#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE_MASK 0x08000000L
14839#define TCP_PERFCOUNTER_FILTER__ADDR_MODE_MASK 0x70000000L
14840//TCP_PERFCOUNTER_FILTER_EN
14841#define TCP_PERFCOUNTER_FILTER_EN__BUFFER__SHIFT 0x0
14842#define TCP_PERFCOUNTER_FILTER_EN__FLAT__SHIFT 0x1
14843#define TCP_PERFCOUNTER_FILTER_EN__DIM__SHIFT 0x2
14844#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT__SHIFT 0x3
14845#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT__SHIFT 0x4
14846#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE__SHIFT 0x5
14847#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES__SHIFT 0x6
14848#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE__SHIFT 0x7
14849#define TCP_PERFCOUNTER_FILTER_EN__GLC__SHIFT 0x8
14850#define TCP_PERFCOUNTER_FILTER_EN__SLC__SHIFT 0x9
14851#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE__SHIFT 0xa
14852#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE__SHIFT 0xb
14853#define TCP_PERFCOUNTER_FILTER_EN__BUFFER_MASK 0x00000001L
14854#define TCP_PERFCOUNTER_FILTER_EN__FLAT_MASK 0x00000002L
14855#define TCP_PERFCOUNTER_FILTER_EN__DIM_MASK 0x00000004L
14856#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT_MASK 0x00000008L
14857#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT_MASK 0x00000010L
14858#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE_MASK 0x00000020L
14859#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES_MASK 0x00000040L
14860#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE_MASK 0x00000080L
14861#define TCP_PERFCOUNTER_FILTER_EN__GLC_MASK 0x00000100L
14862#define TCP_PERFCOUNTER_FILTER_EN__SLC_MASK 0x00000200L
14863#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE_MASK 0x00000400L
14864#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE_MASK 0x00000800L
14865
14866
14867// addressBlock: gc_gdspdec
14868//GDS_VMID0_BASE
14869#define GDS_VMID0_BASE__BASE__SHIFT 0x0
14870#define GDS_VMID0_BASE__BASE_MASK 0x0000FFFFL
14871//GDS_VMID0_SIZE
14872#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
14873#define GDS_VMID0_SIZE__SIZE_MASK 0x0001FFFFL
14874//GDS_VMID1_BASE
14875#define GDS_VMID1_BASE__BASE__SHIFT 0x0
14876#define GDS_VMID1_BASE__BASE_MASK 0x0000FFFFL
14877//GDS_VMID1_SIZE
14878#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
14879#define GDS_VMID1_SIZE__SIZE_MASK 0x0001FFFFL
14880//GDS_VMID2_BASE
14881#define GDS_VMID2_BASE__BASE__SHIFT 0x0
14882#define GDS_VMID2_BASE__BASE_MASK 0x0000FFFFL
14883//GDS_VMID2_SIZE
14884#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
14885#define GDS_VMID2_SIZE__SIZE_MASK 0x0001FFFFL
14886//GDS_VMID3_BASE
14887#define GDS_VMID3_BASE__BASE__SHIFT 0x0
14888#define GDS_VMID3_BASE__BASE_MASK 0x0000FFFFL
14889//GDS_VMID3_SIZE
14890#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
14891#define GDS_VMID3_SIZE__SIZE_MASK 0x0001FFFFL
14892//GDS_VMID4_BASE
14893#define GDS_VMID4_BASE__BASE__SHIFT 0x0
14894#define GDS_VMID4_BASE__BASE_MASK 0x0000FFFFL
14895//GDS_VMID4_SIZE
14896#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
14897#define GDS_VMID4_SIZE__SIZE_MASK 0x0001FFFFL
14898//GDS_VMID5_BASE
14899#define GDS_VMID5_BASE__BASE__SHIFT 0x0
14900#define GDS_VMID5_BASE__BASE_MASK 0x0000FFFFL
14901//GDS_VMID5_SIZE
14902#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
14903#define GDS_VMID5_SIZE__SIZE_MASK 0x0001FFFFL
14904//GDS_VMID6_BASE
14905#define GDS_VMID6_BASE__BASE__SHIFT 0x0
14906#define GDS_VMID6_BASE__BASE_MASK 0x0000FFFFL
14907//GDS_VMID6_SIZE
14908#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
14909#define GDS_VMID6_SIZE__SIZE_MASK 0x0001FFFFL
14910//GDS_VMID7_BASE
14911#define GDS_VMID7_BASE__BASE__SHIFT 0x0
14912#define GDS_VMID7_BASE__BASE_MASK 0x0000FFFFL
14913//GDS_VMID7_SIZE
14914#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
14915#define GDS_VMID7_SIZE__SIZE_MASK 0x0001FFFFL
14916//GDS_VMID8_BASE
14917#define GDS_VMID8_BASE__BASE__SHIFT 0x0
14918#define GDS_VMID8_BASE__BASE_MASK 0x0000FFFFL
14919//GDS_VMID8_SIZE
14920#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
14921#define GDS_VMID8_SIZE__SIZE_MASK 0x0001FFFFL
14922//GDS_VMID9_BASE
14923#define GDS_VMID9_BASE__BASE__SHIFT 0x0
14924#define GDS_VMID9_BASE__BASE_MASK 0x0000FFFFL
14925//GDS_VMID9_SIZE
14926#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
14927#define GDS_VMID9_SIZE__SIZE_MASK 0x0001FFFFL
14928//GDS_VMID10_BASE
14929#define GDS_VMID10_BASE__BASE__SHIFT 0x0
14930#define GDS_VMID10_BASE__BASE_MASK 0x0000FFFFL
14931//GDS_VMID10_SIZE
14932#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
14933#define GDS_VMID10_SIZE__SIZE_MASK 0x0001FFFFL
14934//GDS_VMID11_BASE
14935#define GDS_VMID11_BASE__BASE__SHIFT 0x0
14936#define GDS_VMID11_BASE__BASE_MASK 0x0000FFFFL
14937//GDS_VMID11_SIZE
14938#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
14939#define GDS_VMID11_SIZE__SIZE_MASK 0x0001FFFFL
14940//GDS_VMID12_BASE
14941#define GDS_VMID12_BASE__BASE__SHIFT 0x0
14942#define GDS_VMID12_BASE__BASE_MASK 0x0000FFFFL
14943//GDS_VMID12_SIZE
14944#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
14945#define GDS_VMID12_SIZE__SIZE_MASK 0x0001FFFFL
14946//GDS_VMID13_BASE
14947#define GDS_VMID13_BASE__BASE__SHIFT 0x0
14948#define GDS_VMID13_BASE__BASE_MASK 0x0000FFFFL
14949//GDS_VMID13_SIZE
14950#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
14951#define GDS_VMID13_SIZE__SIZE_MASK 0x0001FFFFL
14952//GDS_VMID14_BASE
14953#define GDS_VMID14_BASE__BASE__SHIFT 0x0
14954#define GDS_VMID14_BASE__BASE_MASK 0x0000FFFFL
14955//GDS_VMID14_SIZE
14956#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
14957#define GDS_VMID14_SIZE__SIZE_MASK 0x0001FFFFL
14958//GDS_VMID15_BASE
14959#define GDS_VMID15_BASE__BASE__SHIFT 0x0
14960#define GDS_VMID15_BASE__BASE_MASK 0x0000FFFFL
14961//GDS_VMID15_SIZE
14962#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
14963#define GDS_VMID15_SIZE__SIZE_MASK 0x0001FFFFL
14964//GDS_GWS_VMID0
14965#define GDS_GWS_VMID0__BASE__SHIFT 0x0
14966#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
14967#define GDS_GWS_VMID0__BASE_MASK 0x0000003FL
14968#define GDS_GWS_VMID0__SIZE_MASK 0x007F0000L
14969//GDS_GWS_VMID1
14970#define GDS_GWS_VMID1__BASE__SHIFT 0x0
14971#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
14972#define GDS_GWS_VMID1__BASE_MASK 0x0000003FL
14973#define GDS_GWS_VMID1__SIZE_MASK 0x007F0000L
14974//GDS_GWS_VMID2
14975#define GDS_GWS_VMID2__BASE__SHIFT 0x0
14976#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
14977#define GDS_GWS_VMID2__BASE_MASK 0x0000003FL
14978#define GDS_GWS_VMID2__SIZE_MASK 0x007F0000L
14979//GDS_GWS_VMID3
14980#define GDS_GWS_VMID3__BASE__SHIFT 0x0
14981#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
14982#define GDS_GWS_VMID3__BASE_MASK 0x0000003FL
14983#define GDS_GWS_VMID3__SIZE_MASK 0x007F0000L
14984//GDS_GWS_VMID4
14985#define GDS_GWS_VMID4__BASE__SHIFT 0x0
14986#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
14987#define GDS_GWS_VMID4__BASE_MASK 0x0000003FL
14988#define GDS_GWS_VMID4__SIZE_MASK 0x007F0000L
14989//GDS_GWS_VMID5
14990#define GDS_GWS_VMID5__BASE__SHIFT 0x0
14991#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
14992#define GDS_GWS_VMID5__BASE_MASK 0x0000003FL
14993#define GDS_GWS_VMID5__SIZE_MASK 0x007F0000L
14994//GDS_GWS_VMID6
14995#define GDS_GWS_VMID6__BASE__SHIFT 0x0
14996#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
14997#define GDS_GWS_VMID6__BASE_MASK 0x0000003FL
14998#define GDS_GWS_VMID6__SIZE_MASK 0x007F0000L
14999//GDS_GWS_VMID7
15000#define GDS_GWS_VMID7__BASE__SHIFT 0x0
15001#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
15002#define GDS_GWS_VMID7__BASE_MASK 0x0000003FL
15003#define GDS_GWS_VMID7__SIZE_MASK 0x007F0000L
15004//GDS_GWS_VMID8
15005#define GDS_GWS_VMID8__BASE__SHIFT 0x0
15006#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
15007#define GDS_GWS_VMID8__BASE_MASK 0x0000003FL
15008#define GDS_GWS_VMID8__SIZE_MASK 0x007F0000L
15009//GDS_GWS_VMID9
15010#define GDS_GWS_VMID9__BASE__SHIFT 0x0
15011#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
15012#define GDS_GWS_VMID9__BASE_MASK 0x0000003FL
15013#define GDS_GWS_VMID9__SIZE_MASK 0x007F0000L
15014//GDS_GWS_VMID10
15015#define GDS_GWS_VMID10__BASE__SHIFT 0x0
15016#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
15017#define GDS_GWS_VMID10__BASE_MASK 0x0000003FL
15018#define GDS_GWS_VMID10__SIZE_MASK 0x007F0000L
15019//GDS_GWS_VMID11
15020#define GDS_GWS_VMID11__BASE__SHIFT 0x0
15021#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
15022#define GDS_GWS_VMID11__BASE_MASK 0x0000003FL
15023#define GDS_GWS_VMID11__SIZE_MASK 0x007F0000L
15024//GDS_GWS_VMID12
15025#define GDS_GWS_VMID12__BASE__SHIFT 0x0
15026#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
15027#define GDS_GWS_VMID12__BASE_MASK 0x0000003FL
15028#define GDS_GWS_VMID12__SIZE_MASK 0x007F0000L
15029//GDS_GWS_VMID13
15030#define GDS_GWS_VMID13__BASE__SHIFT 0x0
15031#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
15032#define GDS_GWS_VMID13__BASE_MASK 0x0000003FL
15033#define GDS_GWS_VMID13__SIZE_MASK 0x007F0000L
15034//GDS_GWS_VMID14
15035#define GDS_GWS_VMID14__BASE__SHIFT 0x0
15036#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
15037#define GDS_GWS_VMID14__BASE_MASK 0x0000003FL
15038#define GDS_GWS_VMID14__SIZE_MASK 0x007F0000L
15039//GDS_GWS_VMID15
15040#define GDS_GWS_VMID15__BASE__SHIFT 0x0
15041#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
15042#define GDS_GWS_VMID15__BASE_MASK 0x0000003FL
15043#define GDS_GWS_VMID15__SIZE_MASK 0x007F0000L
15044//GDS_OA_VMID0
15045#define GDS_OA_VMID0__MASK__SHIFT 0x0
15046#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
15047#define GDS_OA_VMID0__MASK_MASK 0x0000FFFFL
15048#define GDS_OA_VMID0__UNUSED_MASK 0xFFFF0000L
15049//GDS_OA_VMID1
15050#define GDS_OA_VMID1__MASK__SHIFT 0x0
15051#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
15052#define GDS_OA_VMID1__MASK_MASK 0x0000FFFFL
15053#define GDS_OA_VMID1__UNUSED_MASK 0xFFFF0000L
15054//GDS_OA_VMID2
15055#define GDS_OA_VMID2__MASK__SHIFT 0x0
15056#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
15057#define GDS_OA_VMID2__MASK_MASK 0x0000FFFFL
15058#define GDS_OA_VMID2__UNUSED_MASK 0xFFFF0000L
15059//GDS_OA_VMID3
15060#define GDS_OA_VMID3__MASK__SHIFT 0x0
15061#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
15062#define GDS_OA_VMID3__MASK_MASK 0x0000FFFFL
15063#define GDS_OA_VMID3__UNUSED_MASK 0xFFFF0000L
15064//GDS_OA_VMID4
15065#define GDS_OA_VMID4__MASK__SHIFT 0x0
15066#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
15067#define GDS_OA_VMID4__MASK_MASK 0x0000FFFFL
15068#define GDS_OA_VMID4__UNUSED_MASK 0xFFFF0000L
15069//GDS_OA_VMID5
15070#define GDS_OA_VMID5__MASK__SHIFT 0x0
15071#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
15072#define GDS_OA_VMID5__MASK_MASK 0x0000FFFFL
15073#define GDS_OA_VMID5__UNUSED_MASK 0xFFFF0000L
15074//GDS_OA_VMID6
15075#define GDS_OA_VMID6__MASK__SHIFT 0x0
15076#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
15077#define GDS_OA_VMID6__MASK_MASK 0x0000FFFFL
15078#define GDS_OA_VMID6__UNUSED_MASK 0xFFFF0000L
15079//GDS_OA_VMID7
15080#define GDS_OA_VMID7__MASK__SHIFT 0x0
15081#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
15082#define GDS_OA_VMID7__MASK_MASK 0x0000FFFFL
15083#define GDS_OA_VMID7__UNUSED_MASK 0xFFFF0000L
15084//GDS_OA_VMID8
15085#define GDS_OA_VMID8__MASK__SHIFT 0x0
15086#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
15087#define GDS_OA_VMID8__MASK_MASK 0x0000FFFFL
15088#define GDS_OA_VMID8__UNUSED_MASK 0xFFFF0000L
15089//GDS_OA_VMID9
15090#define GDS_OA_VMID9__MASK__SHIFT 0x0
15091#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
15092#define GDS_OA_VMID9__MASK_MASK 0x0000FFFFL
15093#define GDS_OA_VMID9__UNUSED_MASK 0xFFFF0000L
15094//GDS_OA_VMID10
15095#define GDS_OA_VMID10__MASK__SHIFT 0x0
15096#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
15097#define GDS_OA_VMID10__MASK_MASK 0x0000FFFFL
15098#define GDS_OA_VMID10__UNUSED_MASK 0xFFFF0000L
15099//GDS_OA_VMID11
15100#define GDS_OA_VMID11__MASK__SHIFT 0x0
15101#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
15102#define GDS_OA_VMID11__MASK_MASK 0x0000FFFFL
15103#define GDS_OA_VMID11__UNUSED_MASK 0xFFFF0000L
15104//GDS_OA_VMID12
15105#define GDS_OA_VMID12__MASK__SHIFT 0x0
15106#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
15107#define GDS_OA_VMID12__MASK_MASK 0x0000FFFFL
15108#define GDS_OA_VMID12__UNUSED_MASK 0xFFFF0000L
15109//GDS_OA_VMID13
15110#define GDS_OA_VMID13__MASK__SHIFT 0x0
15111#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
15112#define GDS_OA_VMID13__MASK_MASK 0x0000FFFFL
15113#define GDS_OA_VMID13__UNUSED_MASK 0xFFFF0000L
15114//GDS_OA_VMID14
15115#define GDS_OA_VMID14__MASK__SHIFT 0x0
15116#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
15117#define GDS_OA_VMID14__MASK_MASK 0x0000FFFFL
15118#define GDS_OA_VMID14__UNUSED_MASK 0xFFFF0000L
15119//GDS_OA_VMID15
15120#define GDS_OA_VMID15__MASK__SHIFT 0x0
15121#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
15122#define GDS_OA_VMID15__MASK_MASK 0x0000FFFFL
15123#define GDS_OA_VMID15__UNUSED_MASK 0xFFFF0000L
15124//GDS_GWS_RESET0
15125#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
15126#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
15127#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
15128#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
15129#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
15130#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
15131#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
15132#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
15133#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
15134#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
15135#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
15136#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
15137#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
15138#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
15139#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
15140#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
15141#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
15142#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
15143#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
15144#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
15145#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
15146#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
15147#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
15148#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
15149#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
15150#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
15151#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
15152#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
15153#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
15154#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
15155#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
15156#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
15157#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x00000001L
15158#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x00000002L
15159#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x00000004L
15160#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x00000008L
15161#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x00000010L
15162#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x00000020L
15163#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x00000040L
15164#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x00000080L
15165#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x00000100L
15166#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x00000200L
15167#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x00000400L
15168#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x00000800L
15169#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x00001000L
15170#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x00002000L
15171#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x00004000L
15172#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x00008000L
15173#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x00010000L
15174#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x00020000L
15175#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x00040000L
15176#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x00080000L
15177#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x00100000L
15178#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x00200000L
15179#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x00400000L
15180#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x00800000L
15181#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x01000000L
15182#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x02000000L
15183#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x04000000L
15184#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x08000000L
15185#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000L
15186#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000L
15187#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000L
15188#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000L
15189//GDS_GWS_RESET1
15190#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
15191#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
15192#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
15193#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
15194#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
15195#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
15196#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
15197#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
15198#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
15199#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
15200#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
15201#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
15202#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
15203#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
15204#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
15205#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
15206#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
15207#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
15208#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
15209#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
15210#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
15211#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
15212#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
15213#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
15214#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
15215#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
15216#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
15217#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
15218#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
15219#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
15220#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
15221#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
15222#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x00000001L
15223#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x00000002L
15224#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x00000004L
15225#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x00000008L
15226#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x00000010L
15227#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x00000020L
15228#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x00000040L
15229#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x00000080L
15230#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x00000100L
15231#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x00000200L
15232#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x00000400L
15233#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x00000800L
15234#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x00001000L
15235#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x00002000L
15236#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x00004000L
15237#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x00008000L
15238#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x00010000L
15239#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x00020000L
15240#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x00040000L
15241#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x00080000L
15242#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x00100000L
15243#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x00200000L
15244#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x00400000L
15245#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x00800000L
15246#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x01000000L
15247#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x02000000L
15248#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x04000000L
15249#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x08000000L
15250#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000L
15251#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000L
15252#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000L
15253#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000L
15254//GDS_GWS_RESOURCE_RESET
15255#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
15256#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
15257#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
15258#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000FF00L
15259//GDS_COMPUTE_MAX_WAVE_ID
15260#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
15261#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
15262//GDS_OA_RESET_MASK
15263#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
15264#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
15265#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
15266#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET__SHIFT 0x3
15267#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
15268#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
15269#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
15270#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
15271#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
15272#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
15273#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
15274#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
15275#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xc
15276#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x00000001L
15277#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x00000002L
15278#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x00000004L
15279#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET_MASK 0x00000008L
15280#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x00000010L
15281#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x00000020L
15282#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x00000040L
15283#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x00000080L
15284#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x00000100L
15285#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x00000200L
15286#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x00000400L
15287#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x00000800L
15288#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xFFFFF000L
15289//GDS_OA_RESET
15290#define GDS_OA_RESET__RESET__SHIFT 0x0
15291#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
15292#define GDS_OA_RESET__RESET_MASK 0x00000001L
15293#define GDS_OA_RESET__PIPE_ID_MASK 0x0000FF00L
15294//GDS_ENHANCE
15295#define GDS_ENHANCE__MISC__SHIFT 0x0
15296#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
15297#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
15298#define GDS_ENHANCE__RD_BUF_TAG_MISS__SHIFT 0x12
15299#define GDS_ENHANCE__GDSA_PC_CGTS_DIS__SHIFT 0x13
15300#define GDS_ENHANCE__GDSO_PC_CGTS_DIS__SHIFT 0x14
15301#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE__SHIFT 0x15
15302#define GDS_ENHANCE__UNUSED__SHIFT 0x16
15303#define GDS_ENHANCE__MISC_MASK 0x0000FFFFL
15304#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
15305#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x00020000L
15306#define GDS_ENHANCE__RD_BUF_TAG_MISS_MASK 0x00040000L
15307#define GDS_ENHANCE__GDSA_PC_CGTS_DIS_MASK 0x00080000L
15308#define GDS_ENHANCE__GDSO_PC_CGTS_DIS_MASK 0x00100000L
15309#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE_MASK 0x00200000L
15310#define GDS_ENHANCE__UNUSED_MASK 0xFFC00000L
15311//GDS_OA_CGPG_RESTORE
15312#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
15313#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
15314#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
15315#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
15316#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
15317#define GDS_OA_CGPG_RESTORE__VMID_MASK 0x000000FFL
15318#define GDS_OA_CGPG_RESTORE__MEID_MASK 0x00000F00L
15319#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0x0000F000L
15320#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0x000F0000L
15321#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xFFF00000L
15322//GDS_CS_CTXSW_STATUS
15323#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
15324#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
15325#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
15326#define GDS_CS_CTXSW_STATUS__R_MASK 0x00000001L
15327#define GDS_CS_CTXSW_STATUS__W_MASK 0x00000002L
15328#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
15329//GDS_CS_CTXSW_CNT0
15330#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
15331#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
15332#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15333#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15334//GDS_CS_CTXSW_CNT1
15335#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
15336#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
15337#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15338#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15339//GDS_CS_CTXSW_CNT2
15340#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
15341#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
15342#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15343#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15344//GDS_CS_CTXSW_CNT3
15345#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
15346#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
15347#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15348#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15349//GDS_GFX_CTXSW_STATUS
15350#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
15351#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
15352#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
15353#define GDS_GFX_CTXSW_STATUS__R_MASK 0x00000001L
15354#define GDS_GFX_CTXSW_STATUS__W_MASK 0x00000002L
15355#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
15356//GDS_VS_CTXSW_CNT0
15357#define GDS_VS_CTXSW_CNT0__UPDN__SHIFT 0x0
15358#define GDS_VS_CTXSW_CNT0__PTR__SHIFT 0x10
15359#define GDS_VS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15360#define GDS_VS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15361//GDS_VS_CTXSW_CNT1
15362#define GDS_VS_CTXSW_CNT1__UPDN__SHIFT 0x0
15363#define GDS_VS_CTXSW_CNT1__PTR__SHIFT 0x10
15364#define GDS_VS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15365#define GDS_VS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15366//GDS_VS_CTXSW_CNT2
15367#define GDS_VS_CTXSW_CNT2__UPDN__SHIFT 0x0
15368#define GDS_VS_CTXSW_CNT2__PTR__SHIFT 0x10
15369#define GDS_VS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15370#define GDS_VS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15371//GDS_VS_CTXSW_CNT3
15372#define GDS_VS_CTXSW_CNT3__UPDN__SHIFT 0x0
15373#define GDS_VS_CTXSW_CNT3__PTR__SHIFT 0x10
15374#define GDS_VS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15375#define GDS_VS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15376//GDS_PS0_CTXSW_CNT0
15377#define GDS_PS0_CTXSW_CNT0__UPDN__SHIFT 0x0
15378#define GDS_PS0_CTXSW_CNT0__PTR__SHIFT 0x10
15379#define GDS_PS0_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15380#define GDS_PS0_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15381//GDS_PS0_CTXSW_CNT1
15382#define GDS_PS0_CTXSW_CNT1__UPDN__SHIFT 0x0
15383#define GDS_PS0_CTXSW_CNT1__PTR__SHIFT 0x10
15384#define GDS_PS0_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15385#define GDS_PS0_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15386//GDS_PS0_CTXSW_CNT2
15387#define GDS_PS0_CTXSW_CNT2__UPDN__SHIFT 0x0
15388#define GDS_PS0_CTXSW_CNT2__PTR__SHIFT 0x10
15389#define GDS_PS0_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15390#define GDS_PS0_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15391//GDS_PS0_CTXSW_CNT3
15392#define GDS_PS0_CTXSW_CNT3__UPDN__SHIFT 0x0
15393#define GDS_PS0_CTXSW_CNT3__PTR__SHIFT 0x10
15394#define GDS_PS0_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15395#define GDS_PS0_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15396//GDS_PS1_CTXSW_CNT0
15397#define GDS_PS1_CTXSW_CNT0__UPDN__SHIFT 0x0
15398#define GDS_PS1_CTXSW_CNT0__PTR__SHIFT 0x10
15399#define GDS_PS1_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15400#define GDS_PS1_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15401//GDS_PS1_CTXSW_CNT1
15402#define GDS_PS1_CTXSW_CNT1__UPDN__SHIFT 0x0
15403#define GDS_PS1_CTXSW_CNT1__PTR__SHIFT 0x10
15404#define GDS_PS1_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15405#define GDS_PS1_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15406//GDS_PS1_CTXSW_CNT2
15407#define GDS_PS1_CTXSW_CNT2__UPDN__SHIFT 0x0
15408#define GDS_PS1_CTXSW_CNT2__PTR__SHIFT 0x10
15409#define GDS_PS1_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15410#define GDS_PS1_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15411//GDS_PS1_CTXSW_CNT3
15412#define GDS_PS1_CTXSW_CNT3__UPDN__SHIFT 0x0
15413#define GDS_PS1_CTXSW_CNT3__PTR__SHIFT 0x10
15414#define GDS_PS1_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15415#define GDS_PS1_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15416//GDS_PS2_CTXSW_CNT0
15417#define GDS_PS2_CTXSW_CNT0__UPDN__SHIFT 0x0
15418#define GDS_PS2_CTXSW_CNT0__PTR__SHIFT 0x10
15419#define GDS_PS2_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15420#define GDS_PS2_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15421//GDS_PS2_CTXSW_CNT1
15422#define GDS_PS2_CTXSW_CNT1__UPDN__SHIFT 0x0
15423#define GDS_PS2_CTXSW_CNT1__PTR__SHIFT 0x10
15424#define GDS_PS2_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15425#define GDS_PS2_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15426//GDS_PS2_CTXSW_CNT2
15427#define GDS_PS2_CTXSW_CNT2__UPDN__SHIFT 0x0
15428#define GDS_PS2_CTXSW_CNT2__PTR__SHIFT 0x10
15429#define GDS_PS2_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15430#define GDS_PS2_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15431//GDS_PS2_CTXSW_CNT3
15432#define GDS_PS2_CTXSW_CNT3__UPDN__SHIFT 0x0
15433#define GDS_PS2_CTXSW_CNT3__PTR__SHIFT 0x10
15434#define GDS_PS2_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15435#define GDS_PS2_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15436//GDS_PS3_CTXSW_CNT0
15437#define GDS_PS3_CTXSW_CNT0__UPDN__SHIFT 0x0
15438#define GDS_PS3_CTXSW_CNT0__PTR__SHIFT 0x10
15439#define GDS_PS3_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15440#define GDS_PS3_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15441//GDS_PS3_CTXSW_CNT1
15442#define GDS_PS3_CTXSW_CNT1__UPDN__SHIFT 0x0
15443#define GDS_PS3_CTXSW_CNT1__PTR__SHIFT 0x10
15444#define GDS_PS3_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15445#define GDS_PS3_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15446//GDS_PS3_CTXSW_CNT2
15447#define GDS_PS3_CTXSW_CNT2__UPDN__SHIFT 0x0
15448#define GDS_PS3_CTXSW_CNT2__PTR__SHIFT 0x10
15449#define GDS_PS3_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15450#define GDS_PS3_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15451//GDS_PS3_CTXSW_CNT3
15452#define GDS_PS3_CTXSW_CNT3__UPDN__SHIFT 0x0
15453#define GDS_PS3_CTXSW_CNT3__PTR__SHIFT 0x10
15454#define GDS_PS3_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15455#define GDS_PS3_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15456//GDS_PS4_CTXSW_CNT0
15457#define GDS_PS4_CTXSW_CNT0__UPDN__SHIFT 0x0
15458#define GDS_PS4_CTXSW_CNT0__PTR__SHIFT 0x10
15459#define GDS_PS4_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15460#define GDS_PS4_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15461//GDS_PS4_CTXSW_CNT1
15462#define GDS_PS4_CTXSW_CNT1__UPDN__SHIFT 0x0
15463#define GDS_PS4_CTXSW_CNT1__PTR__SHIFT 0x10
15464#define GDS_PS4_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15465#define GDS_PS4_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15466//GDS_PS4_CTXSW_CNT2
15467#define GDS_PS4_CTXSW_CNT2__UPDN__SHIFT 0x0
15468#define GDS_PS4_CTXSW_CNT2__PTR__SHIFT 0x10
15469#define GDS_PS4_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15470#define GDS_PS4_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15471//GDS_PS4_CTXSW_CNT3
15472#define GDS_PS4_CTXSW_CNT3__UPDN__SHIFT 0x0
15473#define GDS_PS4_CTXSW_CNT3__PTR__SHIFT 0x10
15474#define GDS_PS4_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15475#define GDS_PS4_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15476//GDS_PS5_CTXSW_CNT0
15477#define GDS_PS5_CTXSW_CNT0__UPDN__SHIFT 0x0
15478#define GDS_PS5_CTXSW_CNT0__PTR__SHIFT 0x10
15479#define GDS_PS5_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15480#define GDS_PS5_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15481//GDS_PS5_CTXSW_CNT1
15482#define GDS_PS5_CTXSW_CNT1__UPDN__SHIFT 0x0
15483#define GDS_PS5_CTXSW_CNT1__PTR__SHIFT 0x10
15484#define GDS_PS5_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15485#define GDS_PS5_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15486//GDS_PS5_CTXSW_CNT2
15487#define GDS_PS5_CTXSW_CNT2__UPDN__SHIFT 0x0
15488#define GDS_PS5_CTXSW_CNT2__PTR__SHIFT 0x10
15489#define GDS_PS5_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15490#define GDS_PS5_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15491//GDS_PS5_CTXSW_CNT3
15492#define GDS_PS5_CTXSW_CNT3__UPDN__SHIFT 0x0
15493#define GDS_PS5_CTXSW_CNT3__PTR__SHIFT 0x10
15494#define GDS_PS5_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15495#define GDS_PS5_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15496//GDS_PS6_CTXSW_CNT0
15497#define GDS_PS6_CTXSW_CNT0__UPDN__SHIFT 0x0
15498#define GDS_PS6_CTXSW_CNT0__PTR__SHIFT 0x10
15499#define GDS_PS6_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15500#define GDS_PS6_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15501//GDS_PS6_CTXSW_CNT1
15502#define GDS_PS6_CTXSW_CNT1__UPDN__SHIFT 0x0
15503#define GDS_PS6_CTXSW_CNT1__PTR__SHIFT 0x10
15504#define GDS_PS6_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15505#define GDS_PS6_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15506//GDS_PS6_CTXSW_CNT2
15507#define GDS_PS6_CTXSW_CNT2__UPDN__SHIFT 0x0
15508#define GDS_PS6_CTXSW_CNT2__PTR__SHIFT 0x10
15509#define GDS_PS6_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15510#define GDS_PS6_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15511//GDS_PS6_CTXSW_CNT3
15512#define GDS_PS6_CTXSW_CNT3__UPDN__SHIFT 0x0
15513#define GDS_PS6_CTXSW_CNT3__PTR__SHIFT 0x10
15514#define GDS_PS6_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15515#define GDS_PS6_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15516//GDS_PS7_CTXSW_CNT0
15517#define GDS_PS7_CTXSW_CNT0__UPDN__SHIFT 0x0
15518#define GDS_PS7_CTXSW_CNT0__PTR__SHIFT 0x10
15519#define GDS_PS7_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15520#define GDS_PS7_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15521//GDS_PS7_CTXSW_CNT1
15522#define GDS_PS7_CTXSW_CNT1__UPDN__SHIFT 0x0
15523#define GDS_PS7_CTXSW_CNT1__PTR__SHIFT 0x10
15524#define GDS_PS7_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15525#define GDS_PS7_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15526//GDS_PS7_CTXSW_CNT2
15527#define GDS_PS7_CTXSW_CNT2__UPDN__SHIFT 0x0
15528#define GDS_PS7_CTXSW_CNT2__PTR__SHIFT 0x10
15529#define GDS_PS7_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15530#define GDS_PS7_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15531//GDS_PS7_CTXSW_CNT3
15532#define GDS_PS7_CTXSW_CNT3__UPDN__SHIFT 0x0
15533#define GDS_PS7_CTXSW_CNT3__PTR__SHIFT 0x10
15534#define GDS_PS7_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15535#define GDS_PS7_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15536//GDS_GS_CTXSW_CNT0
15537#define GDS_GS_CTXSW_CNT0__UPDN__SHIFT 0x0
15538#define GDS_GS_CTXSW_CNT0__PTR__SHIFT 0x10
15539#define GDS_GS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
15540#define GDS_GS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
15541//GDS_GS_CTXSW_CNT1
15542#define GDS_GS_CTXSW_CNT1__UPDN__SHIFT 0x0
15543#define GDS_GS_CTXSW_CNT1__PTR__SHIFT 0x10
15544#define GDS_GS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
15545#define GDS_GS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
15546//GDS_GS_CTXSW_CNT2
15547#define GDS_GS_CTXSW_CNT2__UPDN__SHIFT 0x0
15548#define GDS_GS_CTXSW_CNT2__PTR__SHIFT 0x10
15549#define GDS_GS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
15550#define GDS_GS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
15551//GDS_GS_CTXSW_CNT3
15552#define GDS_GS_CTXSW_CNT3__UPDN__SHIFT 0x0
15553#define GDS_GS_CTXSW_CNT3__PTR__SHIFT 0x10
15554#define GDS_GS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
15555#define GDS_GS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
15556
15557
15558// addressBlock: gc_rasdec
15559//RAS_SIGNATURE_CONTROL
15560#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
15561#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
15562//RAS_SIGNATURE_MASK
15563#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
15564#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xFFFFFFFFL
15565//RAS_SX_SIGNATURE0
15566#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
15567#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15568//RAS_SX_SIGNATURE1
15569#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
15570#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
15571//RAS_SX_SIGNATURE2
15572#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
15573#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
15574//RAS_SX_SIGNATURE3
15575#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
15576#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
15577//RAS_DB_SIGNATURE0
15578#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
15579#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15580//RAS_PA_SIGNATURE0
15581#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
15582#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15583//RAS_VGT_SIGNATURE0
15584#define RAS_VGT_SIGNATURE0__SIGNATURE__SHIFT 0x0
15585#define RAS_VGT_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15586//RAS_SQ_SIGNATURE0
15587#define RAS_SQ_SIGNATURE0__SIGNATURE__SHIFT 0x0
15588#define RAS_SQ_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15589//RAS_SC_SIGNATURE0
15590#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
15591#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15592//RAS_SC_SIGNATURE1
15593#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
15594#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
15595//RAS_SC_SIGNATURE2
15596#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
15597#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
15598//RAS_SC_SIGNATURE3
15599#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
15600#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
15601//RAS_SC_SIGNATURE4
15602#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
15603#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xFFFFFFFFL
15604//RAS_SC_SIGNATURE5
15605#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
15606#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xFFFFFFFFL
15607//RAS_SC_SIGNATURE6
15608#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
15609#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xFFFFFFFFL
15610//RAS_SC_SIGNATURE7
15611#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
15612#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xFFFFFFFFL
15613//RAS_IA_SIGNATURE0
15614#define RAS_IA_SIGNATURE0__SIGNATURE__SHIFT 0x0
15615#define RAS_IA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15616//RAS_IA_SIGNATURE1
15617#define RAS_IA_SIGNATURE1__SIGNATURE__SHIFT 0x0
15618#define RAS_IA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
15619//RAS_SPI_SIGNATURE0
15620#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
15621#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15622//RAS_SPI_SIGNATURE1
15623#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
15624#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
15625//RAS_TA_SIGNATURE0
15626#define RAS_TA_SIGNATURE0__SIGNATURE__SHIFT 0x0
15627#define RAS_TA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15628//RAS_TD_SIGNATURE0
15629#define RAS_TD_SIGNATURE0__SIGNATURE__SHIFT 0x0
15630#define RAS_TD_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15631//RAS_CB_SIGNATURE0
15632#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
15633#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15634//RAS_BCI_SIGNATURE0
15635#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
15636#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
15637//RAS_BCI_SIGNATURE1
15638#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
15639#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
15640//RAS_TA_SIGNATURE1
15641#define RAS_TA_SIGNATURE1__SIGNATURE__SHIFT 0x0
15642#define RAS_TA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
15643
15644
15645// addressBlock: gc_gfxdec0
15646//DB_RENDER_CONTROL
15647#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
15648#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
15649#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
15650#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
15651#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
15652#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
15653#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
15654#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
15655#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
15656#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
15657#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
15658#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
15659#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
15660#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
15661#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
15662#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
15663#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
15664#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
15665#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000F00L
15666#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x00001000L
15667//DB_COUNT_CONTROL
15668#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE__SHIFT 0x0
15669#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
15670#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
15671#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
15672#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
15673#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
15674#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
15675#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
15676#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
15677#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE_MASK 0x00000001L
15678#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
15679#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
15680#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000F00L
15681#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000F000L
15682#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000F0000L
15683#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00F00000L
15684#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0F000000L
15685#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xF0000000L
15686//DB_DEPTH_VIEW
15687#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
15688#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
15689#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
15690#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
15691#define DB_DEPTH_VIEW__MIPID__SHIFT 0x1a
15692#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007FFL
15693#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00FFE000L
15694#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
15695#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
15696#define DB_DEPTH_VIEW__MIPID_MASK 0x3C000000L
15697//DB_RENDER_OVERRIDE
15698#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
15699#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
15700#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
15701#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
15702#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
15703#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
15704#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
15705#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
15706#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
15707#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
15708#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
15709#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT__SHIFT 0xf
15710#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
15711#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
15712#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
15713#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
15714#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
15715#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
15716#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
15717#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
15718#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
15719#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
15720#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
15721#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
15722#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000CL
15723#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
15724#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
15725#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
15726#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
15727#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
15728#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
15729#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
15730#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
15731#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
15732#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT_MASK 0x00008000L
15733#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
15734#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
15735#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
15736#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
15737#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03E00000L
15738#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
15739#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
15740#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
15741#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
15742#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
15743#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
15744//DB_RENDER_OVERRIDE2
15745#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
15746#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
15747#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
15748#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
15749#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
15750#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
15751#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
15752#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
15753#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
15754#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
15755#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
15756#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
15757#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
15758#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
15759#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
15760#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
15761#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
15762#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
15763#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
15764#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
15765#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
15766#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
15767#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
15768#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
15769#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
15770#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
15771#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
15772#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001C0000L
15773#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
15774#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
15775#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
15776#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
15777//DB_HTILE_DATA_BASE
15778#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
15779#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xFFFFFFFFL
15780//DB_HTILE_DATA_BASE_HI
15781#define DB_HTILE_DATA_BASE_HI__BASE_HI__SHIFT 0x0
15782#define DB_HTILE_DATA_BASE_HI__BASE_HI_MASK 0x000000FFL
15783//DB_DEPTH_SIZE
15784#define DB_DEPTH_SIZE__X_MAX__SHIFT 0x0
15785#define DB_DEPTH_SIZE__Y_MAX__SHIFT 0x10
15786#define DB_DEPTH_SIZE__X_MAX_MASK 0x00003FFFL
15787#define DB_DEPTH_SIZE__Y_MAX_MASK 0x3FFF0000L
15788//DB_DEPTH_BOUNDS_MIN
15789#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
15790#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xFFFFFFFFL
15791//DB_DEPTH_BOUNDS_MAX
15792#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
15793#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xFFFFFFFFL
15794//DB_STENCIL_CLEAR
15795#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
15796#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000FFL
15797//DB_DEPTH_CLEAR
15798#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
15799#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xFFFFFFFFL
15800//PA_SC_SCREEN_SCISSOR_TL
15801#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
15802#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
15803#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000FFFFL
15804#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xFFFF0000L
15805//PA_SC_SCREEN_SCISSOR_BR
15806#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
15807#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
15808#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000FFFFL
15809#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xFFFF0000L
15810//DB_Z_INFO
15811#define DB_Z_INFO__FORMAT__SHIFT 0x0
15812#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
15813#define DB_Z_INFO__SW_MODE__SHIFT 0x4
15814#define DB_Z_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
15815#define DB_Z_INFO__FAULT_BEHAVIOR__SHIFT 0xd
15816#define DB_Z_INFO__ITERATE_FLUSH__SHIFT 0xf
15817#define DB_Z_INFO__MAXMIP__SHIFT 0x10
15818#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
15819#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
15820#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
15821#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
15822#define DB_Z_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
15823#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
15824#define DB_Z_INFO__FORMAT_MASK 0x00000003L
15825#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000CL
15826#define DB_Z_INFO__SW_MODE_MASK 0x000001F0L
15827#define DB_Z_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
15828#define DB_Z_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
15829#define DB_Z_INFO__ITERATE_FLUSH_MASK 0x00008000L
15830#define DB_Z_INFO__MAXMIP_MASK 0x000F0000L
15831#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x07800000L
15832#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
15833#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
15834#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
15835#define DB_Z_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
15836#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
15837//DB_STENCIL_INFO
15838#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
15839#define DB_STENCIL_INFO__SW_MODE__SHIFT 0x4
15840#define DB_STENCIL_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
15841#define DB_STENCIL_INFO__FAULT_BEHAVIOR__SHIFT 0xd
15842#define DB_STENCIL_INFO__ITERATE_FLUSH__SHIFT 0xf
15843#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
15844#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
15845#define DB_STENCIL_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
15846#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
15847#define DB_STENCIL_INFO__SW_MODE_MASK 0x000001F0L
15848#define DB_STENCIL_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
15849#define DB_STENCIL_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
15850#define DB_STENCIL_INFO__ITERATE_FLUSH_MASK 0x00008000L
15851#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
15852#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
15853#define DB_STENCIL_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
15854//DB_Z_READ_BASE
15855#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
15856#define DB_Z_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
15857//DB_Z_READ_BASE_HI
15858#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
15859#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
15860//DB_STENCIL_READ_BASE
15861#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
15862#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
15863//DB_STENCIL_READ_BASE_HI
15864#define DB_STENCIL_READ_BASE_HI__BASE_HI__SHIFT 0x0
15865#define DB_STENCIL_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
15866//DB_Z_WRITE_BASE
15867#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
15868#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
15869//DB_Z_WRITE_BASE_HI
15870#define DB_Z_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
15871#define DB_Z_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
15872//DB_STENCIL_WRITE_BASE
15873#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
15874#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
15875//DB_STENCIL_WRITE_BASE_HI
15876#define DB_STENCIL_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
15877#define DB_STENCIL_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
15878//DB_DFSM_CONTROL
15879#define DB_DFSM_CONTROL__PUNCHOUT_MODE__SHIFT 0x0
15880#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP__SHIFT 0x2
15881#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW__SHIFT 0x3
15882#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
15883#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
15884#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
15885//DB_Z_INFO2
15886#define DB_Z_INFO2__EPITCH__SHIFT 0x0
15887#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
15888//DB_STENCIL_INFO2
15889#define DB_STENCIL_INFO2__EPITCH__SHIFT 0x0
15890#define DB_STENCIL_INFO2__EPITCH_MASK 0x0000FFFFL
15891//TA_BC_BASE_ADDR
15892#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
15893#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
15894//TA_BC_BASE_ADDR_HI
15895#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
15896#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
15897//COHER_DEST_BASE_HI_0
15898#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
15899#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0x000000FFL
15900//COHER_DEST_BASE_HI_1
15901#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
15902#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0x000000FFL
15903//COHER_DEST_BASE_HI_2
15904#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
15905#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0x000000FFL
15906//COHER_DEST_BASE_HI_3
15907#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
15908#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0x000000FFL
15909//COHER_DEST_BASE_2
15910#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
15911#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xFFFFFFFFL
15912//COHER_DEST_BASE_3
15913#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
15914#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xFFFFFFFFL
15915//PA_SC_WINDOW_OFFSET
15916#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
15917#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
15918#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000FFFFL
15919#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xFFFF0000L
15920//PA_SC_WINDOW_SCISSOR_TL
15921#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
15922#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
15923#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
15924#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007FFFL
15925#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
15926#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
15927//PA_SC_WINDOW_SCISSOR_BR
15928#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
15929#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
15930#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007FFFL
15931#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
15932//PA_SC_CLIPRECT_RULE
15933#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
15934#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000FFFFL
15935//PA_SC_CLIPRECT_0_TL
15936#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
15937#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
15938#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007FFFL
15939#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7FFF0000L
15940//PA_SC_CLIPRECT_0_BR
15941#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
15942#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
15943#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007FFFL
15944#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7FFF0000L
15945//PA_SC_CLIPRECT_1_TL
15946#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
15947#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
15948#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007FFFL
15949#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7FFF0000L
15950//PA_SC_CLIPRECT_1_BR
15951#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
15952#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
15953#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007FFFL
15954#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7FFF0000L
15955//PA_SC_CLIPRECT_2_TL
15956#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
15957#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
15958#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007FFFL
15959#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7FFF0000L
15960//PA_SC_CLIPRECT_2_BR
15961#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
15962#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
15963#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007FFFL
15964#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7FFF0000L
15965//PA_SC_CLIPRECT_3_TL
15966#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
15967#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
15968#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007FFFL
15969#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7FFF0000L
15970//PA_SC_CLIPRECT_3_BR
15971#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
15972#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
15973#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007FFFL
15974#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7FFF0000L
15975//PA_SC_EDGERULE
15976#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
15977#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
15978#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
15979#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
15980#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
15981#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
15982#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
15983#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000FL
15984#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000F0L
15985#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000F00L
15986#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003F000L
15987#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00FC0000L
15988#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0F000000L
15989#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xF0000000L
15990//PA_SU_HARDWARE_SCREEN_OFFSET
15991#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
15992#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
15993#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001FFL
15994#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01FF0000L
15995//CB_TARGET_MASK
15996#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
15997#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
15998#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
15999#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
16000#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
16001#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
16002#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
16003#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
16004#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000FL
16005#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000F0L
16006#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000F00L
16007#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000F000L
16008#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000F0000L
16009#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00F00000L
16010#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0F000000L
16011#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xF0000000L
16012//CB_SHADER_MASK
16013#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
16014#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
16015#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
16016#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
16017#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
16018#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
16019#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
16020#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
16021#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000FL
16022#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000F0L
16023#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000F00L
16024#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000F000L
16025#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000F0000L
16026#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00F00000L
16027#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0F000000L
16028#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xF0000000L
16029//PA_SC_GENERIC_SCISSOR_TL
16030#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
16031#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
16032#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16033#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007FFFL
16034#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
16035#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16036//PA_SC_GENERIC_SCISSOR_BR
16037#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
16038#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
16039#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007FFFL
16040#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
16041//COHER_DEST_BASE_0
16042#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
16043#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xFFFFFFFFL
16044//COHER_DEST_BASE_1
16045#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
16046#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xFFFFFFFFL
16047//PA_SC_VPORT_SCISSOR_0_TL
16048#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
16049#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
16050#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16051#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007FFFL
16052#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7FFF0000L
16053#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16054//PA_SC_VPORT_SCISSOR_0_BR
16055#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
16056#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
16057#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007FFFL
16058#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7FFF0000L
16059//PA_SC_VPORT_SCISSOR_1_TL
16060#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
16061#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
16062#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16063#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007FFFL
16064#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7FFF0000L
16065#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16066//PA_SC_VPORT_SCISSOR_1_BR
16067#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
16068#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
16069#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007FFFL
16070#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7FFF0000L
16071//PA_SC_VPORT_SCISSOR_2_TL
16072#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
16073#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
16074#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16075#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007FFFL
16076#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7FFF0000L
16077#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16078//PA_SC_VPORT_SCISSOR_2_BR
16079#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
16080#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
16081#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007FFFL
16082#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7FFF0000L
16083//PA_SC_VPORT_SCISSOR_3_TL
16084#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
16085#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
16086#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16087#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007FFFL
16088#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7FFF0000L
16089#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16090//PA_SC_VPORT_SCISSOR_3_BR
16091#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
16092#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
16093#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007FFFL
16094#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7FFF0000L
16095//PA_SC_VPORT_SCISSOR_4_TL
16096#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
16097#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
16098#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16099#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007FFFL
16100#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7FFF0000L
16101#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16102//PA_SC_VPORT_SCISSOR_4_BR
16103#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
16104#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
16105#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007FFFL
16106#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7FFF0000L
16107//PA_SC_VPORT_SCISSOR_5_TL
16108#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
16109#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
16110#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16111#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007FFFL
16112#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7FFF0000L
16113#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16114//PA_SC_VPORT_SCISSOR_5_BR
16115#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
16116#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
16117#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007FFFL
16118#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7FFF0000L
16119//PA_SC_VPORT_SCISSOR_6_TL
16120#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
16121#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
16122#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16123#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007FFFL
16124#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7FFF0000L
16125#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16126//PA_SC_VPORT_SCISSOR_6_BR
16127#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
16128#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
16129#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007FFFL
16130#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7FFF0000L
16131//PA_SC_VPORT_SCISSOR_7_TL
16132#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
16133#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
16134#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16135#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007FFFL
16136#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7FFF0000L
16137#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16138//PA_SC_VPORT_SCISSOR_7_BR
16139#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
16140#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
16141#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007FFFL
16142#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7FFF0000L
16143//PA_SC_VPORT_SCISSOR_8_TL
16144#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
16145#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
16146#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16147#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007FFFL
16148#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7FFF0000L
16149#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16150//PA_SC_VPORT_SCISSOR_8_BR
16151#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
16152#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
16153#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007FFFL
16154#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7FFF0000L
16155//PA_SC_VPORT_SCISSOR_9_TL
16156#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
16157#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
16158#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16159#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007FFFL
16160#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7FFF0000L
16161#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16162//PA_SC_VPORT_SCISSOR_9_BR
16163#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
16164#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
16165#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007FFFL
16166#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7FFF0000L
16167//PA_SC_VPORT_SCISSOR_10_TL
16168#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
16169#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
16170#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16171#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007FFFL
16172#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7FFF0000L
16173#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16174//PA_SC_VPORT_SCISSOR_10_BR
16175#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
16176#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
16177#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007FFFL
16178#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7FFF0000L
16179//PA_SC_VPORT_SCISSOR_11_TL
16180#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
16181#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
16182#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16183#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007FFFL
16184#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7FFF0000L
16185#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16186//PA_SC_VPORT_SCISSOR_11_BR
16187#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
16188#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
16189#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007FFFL
16190#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7FFF0000L
16191//PA_SC_VPORT_SCISSOR_12_TL
16192#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
16193#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
16194#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16195#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007FFFL
16196#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7FFF0000L
16197#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16198//PA_SC_VPORT_SCISSOR_12_BR
16199#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
16200#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
16201#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007FFFL
16202#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7FFF0000L
16203//PA_SC_VPORT_SCISSOR_13_TL
16204#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
16205#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
16206#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16207#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007FFFL
16208#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7FFF0000L
16209#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16210//PA_SC_VPORT_SCISSOR_13_BR
16211#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
16212#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
16213#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007FFFL
16214#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7FFF0000L
16215//PA_SC_VPORT_SCISSOR_14_TL
16216#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
16217#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
16218#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16219#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007FFFL
16220#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7FFF0000L
16221#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16222//PA_SC_VPORT_SCISSOR_14_BR
16223#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
16224#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
16225#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007FFFL
16226#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7FFF0000L
16227//PA_SC_VPORT_SCISSOR_15_TL
16228#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
16229#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
16230#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
16231#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007FFFL
16232#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7FFF0000L
16233#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
16234//PA_SC_VPORT_SCISSOR_15_BR
16235#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
16236#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
16237#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007FFFL
16238#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7FFF0000L
16239//PA_SC_VPORT_ZMIN_0
16240#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
16241#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xFFFFFFFFL
16242//PA_SC_VPORT_ZMAX_0
16243#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
16244#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xFFFFFFFFL
16245//PA_SC_VPORT_ZMIN_1
16246#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
16247#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xFFFFFFFFL
16248//PA_SC_VPORT_ZMAX_1
16249#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
16250#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xFFFFFFFFL
16251//PA_SC_VPORT_ZMIN_2
16252#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
16253#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xFFFFFFFFL
16254//PA_SC_VPORT_ZMAX_2
16255#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
16256#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xFFFFFFFFL
16257//PA_SC_VPORT_ZMIN_3
16258#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
16259#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xFFFFFFFFL
16260//PA_SC_VPORT_ZMAX_3
16261#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
16262#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xFFFFFFFFL
16263//PA_SC_VPORT_ZMIN_4
16264#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
16265#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xFFFFFFFFL
16266//PA_SC_VPORT_ZMAX_4
16267#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
16268#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xFFFFFFFFL
16269//PA_SC_VPORT_ZMIN_5
16270#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
16271#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xFFFFFFFFL
16272//PA_SC_VPORT_ZMAX_5
16273#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
16274#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xFFFFFFFFL
16275//PA_SC_VPORT_ZMIN_6
16276#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
16277#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xFFFFFFFFL
16278//PA_SC_VPORT_ZMAX_6
16279#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
16280#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xFFFFFFFFL
16281//PA_SC_VPORT_ZMIN_7
16282#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
16283#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xFFFFFFFFL
16284//PA_SC_VPORT_ZMAX_7
16285#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
16286#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xFFFFFFFFL
16287//PA_SC_VPORT_ZMIN_8
16288#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
16289#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xFFFFFFFFL
16290//PA_SC_VPORT_ZMAX_8
16291#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
16292#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xFFFFFFFFL
16293//PA_SC_VPORT_ZMIN_9
16294#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
16295#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xFFFFFFFFL
16296//PA_SC_VPORT_ZMAX_9
16297#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
16298#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xFFFFFFFFL
16299//PA_SC_VPORT_ZMIN_10
16300#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
16301#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xFFFFFFFFL
16302//PA_SC_VPORT_ZMAX_10
16303#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
16304#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xFFFFFFFFL
16305//PA_SC_VPORT_ZMIN_11
16306#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
16307#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xFFFFFFFFL
16308//PA_SC_VPORT_ZMAX_11
16309#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
16310#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xFFFFFFFFL
16311//PA_SC_VPORT_ZMIN_12
16312#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
16313#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xFFFFFFFFL
16314//PA_SC_VPORT_ZMAX_12
16315#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
16316#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xFFFFFFFFL
16317//PA_SC_VPORT_ZMIN_13
16318#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
16319#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xFFFFFFFFL
16320//PA_SC_VPORT_ZMAX_13
16321#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
16322#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xFFFFFFFFL
16323//PA_SC_VPORT_ZMIN_14
16324#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
16325#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xFFFFFFFFL
16326//PA_SC_VPORT_ZMAX_14
16327#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
16328#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xFFFFFFFFL
16329//PA_SC_VPORT_ZMIN_15
16330#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
16331#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xFFFFFFFFL
16332//PA_SC_VPORT_ZMAX_15
16333#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
16334#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xFFFFFFFFL
16335//PA_SC_RASTER_CONFIG
16336#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
16337#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
16338#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
16339#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
16340#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
16341#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
16342#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
16343#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
16344#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
16345#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
16346#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
16347#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
16348#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
16349#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
16350#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1d
16351#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
16352#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000CL
16353#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
16354#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
16355#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
16356#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
16357#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000C00L
16358#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
16359#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000C000L
16360#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
16361#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000C0000L
16362#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
16363#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
16364#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x1C000000L
16365#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0xE0000000L
16366//PA_SC_RASTER_CONFIG_1
16367#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
16368#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
16369#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x5
16370#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
16371#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000001CL
16372#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x000000E0L
16373//PA_SC_SCREEN_EXTENT_CONTROL
16374#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
16375#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
16376#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x00000003L
16377#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0x0000000CL
16378//PA_SC_TILE_STEERING_OVERRIDE
16379#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
16380#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE__SHIFT 0x1
16381#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE__SHIFT 0x5
16382#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING__SHIFT 0x8
16383#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
16384#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE_MASK 0x00000006L
16385#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE_MASK 0x00000060L
16386#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING_MASK 0x00000100L
16387//CP_PERFMON_CNTX_CNTL
16388#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
16389#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
16390//CP_PIPEID
16391#define CP_PIPEID__PIPE_ID__SHIFT 0x0
16392#define CP_PIPEID__PIPE_ID_MASK 0x00000003L
16393//CP_RINGID
16394#define CP_RINGID__RINGID__SHIFT 0x0
16395#define CP_RINGID__RINGID_MASK 0x00000003L
16396//CP_VMID
16397#define CP_VMID__VMID__SHIFT 0x0
16398#define CP_VMID__VMID_MASK 0x0000000FL
16399//PA_SC_RIGHT_VERT_GRID
16400#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR__SHIFT 0x0
16401#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF__SHIFT 0x8
16402#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
16403#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
16404#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
16405#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
16406#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
16407#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
16408//PA_SC_LEFT_VERT_GRID
16409#define PA_SC_LEFT_VERT_GRID__LEFT_QTR__SHIFT 0x0
16410#define PA_SC_LEFT_VERT_GRID__LEFT_HALF__SHIFT 0x8
16411#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
16412#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
16413#define PA_SC_LEFT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
16414#define PA_SC_LEFT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
16415#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
16416#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
16417//PA_SC_HORIZ_GRID
16418#define PA_SC_HORIZ_GRID__TOP_QTR__SHIFT 0x0
16419#define PA_SC_HORIZ_GRID__TOP_HALF__SHIFT 0x8
16420#define PA_SC_HORIZ_GRID__BOT_HALF__SHIFT 0x10
16421#define PA_SC_HORIZ_GRID__BOT_QTR__SHIFT 0x18
16422#define PA_SC_HORIZ_GRID__TOP_QTR_MASK 0x000000FFL
16423#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
16424#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
16425#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
16426//VGT_MULTI_PRIM_IB_RESET_INDX
16427#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
16428#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
16429//CB_BLEND_RED
16430#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
16431#define CB_BLEND_RED__BLEND_RED_MASK 0xFFFFFFFFL
16432//CB_BLEND_GREEN
16433#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
16434#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xFFFFFFFFL
16435//CB_BLEND_BLUE
16436#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
16437#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xFFFFFFFFL
16438//CB_BLEND_ALPHA
16439#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
16440#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xFFFFFFFFL
16441//CB_DCC_CONTROL
16442#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
16443#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE__SHIFT 0x1
16444#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK__SHIFT 0x2
16445#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
16446#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE_MASK 0x00000002L
16447#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK_MASK 0x0000007CL
16448//DB_STENCIL_CONTROL
16449#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
16450#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
16451#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
16452#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
16453#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
16454#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
16455#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000FL
16456#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000F0L
16457#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000F00L
16458#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000F000L
16459#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000F0000L
16460#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00F00000L
16461//DB_STENCILREFMASK
16462#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
16463#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
16464#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
16465#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
16466#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000FFL
16467#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000FF00L
16468#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00FF0000L
16469#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xFF000000L
16470//DB_STENCILREFMASK_BF
16471#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
16472#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
16473#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
16474#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
16475#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000FFL
16476#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000FF00L
16477#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00FF0000L
16478#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xFF000000L
16479//PA_CL_VPORT_XSCALE
16480#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
16481#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xFFFFFFFFL
16482//PA_CL_VPORT_XOFFSET
16483#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
16484#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16485//PA_CL_VPORT_YSCALE
16486#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
16487#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xFFFFFFFFL
16488//PA_CL_VPORT_YOFFSET
16489#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
16490#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16491//PA_CL_VPORT_ZSCALE
16492#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
16493#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16494//PA_CL_VPORT_ZOFFSET
16495#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
16496#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16497//PA_CL_VPORT_XSCALE_1
16498#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
16499#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xFFFFFFFFL
16500//PA_CL_VPORT_XOFFSET_1
16501#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
16502#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16503//PA_CL_VPORT_YSCALE_1
16504#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
16505#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xFFFFFFFFL
16506//PA_CL_VPORT_YOFFSET_1
16507#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
16508#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16509//PA_CL_VPORT_ZSCALE_1
16510#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
16511#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16512//PA_CL_VPORT_ZOFFSET_1
16513#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
16514#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16515//PA_CL_VPORT_XSCALE_2
16516#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
16517#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xFFFFFFFFL
16518//PA_CL_VPORT_XOFFSET_2
16519#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
16520#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16521//PA_CL_VPORT_YSCALE_2
16522#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
16523#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xFFFFFFFFL
16524//PA_CL_VPORT_YOFFSET_2
16525#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
16526#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16527//PA_CL_VPORT_ZSCALE_2
16528#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
16529#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16530//PA_CL_VPORT_ZOFFSET_2
16531#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
16532#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16533//PA_CL_VPORT_XSCALE_3
16534#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
16535#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xFFFFFFFFL
16536//PA_CL_VPORT_XOFFSET_3
16537#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
16538#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16539//PA_CL_VPORT_YSCALE_3
16540#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
16541#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xFFFFFFFFL
16542//PA_CL_VPORT_YOFFSET_3
16543#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
16544#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16545//PA_CL_VPORT_ZSCALE_3
16546#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
16547#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16548//PA_CL_VPORT_ZOFFSET_3
16549#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
16550#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16551//PA_CL_VPORT_XSCALE_4
16552#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
16553#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xFFFFFFFFL
16554//PA_CL_VPORT_XOFFSET_4
16555#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
16556#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16557//PA_CL_VPORT_YSCALE_4
16558#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
16559#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xFFFFFFFFL
16560//PA_CL_VPORT_YOFFSET_4
16561#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
16562#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16563//PA_CL_VPORT_ZSCALE_4
16564#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
16565#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16566//PA_CL_VPORT_ZOFFSET_4
16567#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
16568#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16569//PA_CL_VPORT_XSCALE_5
16570#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
16571#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xFFFFFFFFL
16572//PA_CL_VPORT_XOFFSET_5
16573#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
16574#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16575//PA_CL_VPORT_YSCALE_5
16576#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
16577#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xFFFFFFFFL
16578//PA_CL_VPORT_YOFFSET_5
16579#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
16580#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16581//PA_CL_VPORT_ZSCALE_5
16582#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
16583#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16584//PA_CL_VPORT_ZOFFSET_5
16585#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
16586#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16587//PA_CL_VPORT_XSCALE_6
16588#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
16589#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xFFFFFFFFL
16590//PA_CL_VPORT_XOFFSET_6
16591#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
16592#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16593//PA_CL_VPORT_YSCALE_6
16594#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
16595#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xFFFFFFFFL
16596//PA_CL_VPORT_YOFFSET_6
16597#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
16598#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16599//PA_CL_VPORT_ZSCALE_6
16600#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
16601#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16602//PA_CL_VPORT_ZOFFSET_6
16603#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
16604#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16605//PA_CL_VPORT_XSCALE_7
16606#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
16607#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xFFFFFFFFL
16608//PA_CL_VPORT_XOFFSET_7
16609#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
16610#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16611//PA_CL_VPORT_YSCALE_7
16612#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
16613#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xFFFFFFFFL
16614//PA_CL_VPORT_YOFFSET_7
16615#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
16616#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16617//PA_CL_VPORT_ZSCALE_7
16618#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
16619#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16620//PA_CL_VPORT_ZOFFSET_7
16621#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
16622#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16623//PA_CL_VPORT_XSCALE_8
16624#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
16625#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xFFFFFFFFL
16626//PA_CL_VPORT_XOFFSET_8
16627#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
16628#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16629//PA_CL_VPORT_YSCALE_8
16630#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
16631#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xFFFFFFFFL
16632//PA_CL_VPORT_YOFFSET_8
16633#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
16634#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16635//PA_CL_VPORT_ZSCALE_8
16636#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
16637#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16638//PA_CL_VPORT_ZOFFSET_8
16639#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
16640#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16641//PA_CL_VPORT_XSCALE_9
16642#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
16643#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xFFFFFFFFL
16644//PA_CL_VPORT_XOFFSET_9
16645#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
16646#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16647//PA_CL_VPORT_YSCALE_9
16648#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
16649#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xFFFFFFFFL
16650//PA_CL_VPORT_YOFFSET_9
16651#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
16652#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16653//PA_CL_VPORT_ZSCALE_9
16654#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
16655#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16656//PA_CL_VPORT_ZOFFSET_9
16657#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
16658#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16659//PA_CL_VPORT_XSCALE_10
16660#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
16661#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xFFFFFFFFL
16662//PA_CL_VPORT_XOFFSET_10
16663#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
16664#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16665//PA_CL_VPORT_YSCALE_10
16666#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
16667#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xFFFFFFFFL
16668//PA_CL_VPORT_YOFFSET_10
16669#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
16670#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16671//PA_CL_VPORT_ZSCALE_10
16672#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
16673#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16674//PA_CL_VPORT_ZOFFSET_10
16675#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
16676#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16677//PA_CL_VPORT_XSCALE_11
16678#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
16679#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xFFFFFFFFL
16680//PA_CL_VPORT_XOFFSET_11
16681#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
16682#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16683//PA_CL_VPORT_YSCALE_11
16684#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
16685#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xFFFFFFFFL
16686//PA_CL_VPORT_YOFFSET_11
16687#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
16688#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16689//PA_CL_VPORT_ZSCALE_11
16690#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
16691#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16692//PA_CL_VPORT_ZOFFSET_11
16693#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
16694#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16695//PA_CL_VPORT_XSCALE_12
16696#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
16697#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xFFFFFFFFL
16698//PA_CL_VPORT_XOFFSET_12
16699#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
16700#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16701//PA_CL_VPORT_YSCALE_12
16702#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
16703#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xFFFFFFFFL
16704//PA_CL_VPORT_YOFFSET_12
16705#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
16706#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16707//PA_CL_VPORT_ZSCALE_12
16708#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
16709#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16710//PA_CL_VPORT_ZOFFSET_12
16711#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
16712#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16713//PA_CL_VPORT_XSCALE_13
16714#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
16715#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xFFFFFFFFL
16716//PA_CL_VPORT_XOFFSET_13
16717#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
16718#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16719//PA_CL_VPORT_YSCALE_13
16720#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
16721#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xFFFFFFFFL
16722//PA_CL_VPORT_YOFFSET_13
16723#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
16724#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16725//PA_CL_VPORT_ZSCALE_13
16726#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
16727#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16728//PA_CL_VPORT_ZOFFSET_13
16729#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
16730#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16731//PA_CL_VPORT_XSCALE_14
16732#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
16733#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xFFFFFFFFL
16734//PA_CL_VPORT_XOFFSET_14
16735#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
16736#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16737//PA_CL_VPORT_YSCALE_14
16738#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
16739#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xFFFFFFFFL
16740//PA_CL_VPORT_YOFFSET_14
16741#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
16742#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16743//PA_CL_VPORT_ZSCALE_14
16744#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
16745#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16746//PA_CL_VPORT_ZOFFSET_14
16747#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
16748#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16749//PA_CL_VPORT_XSCALE_15
16750#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
16751#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xFFFFFFFFL
16752//PA_CL_VPORT_XOFFSET_15
16753#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
16754#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xFFFFFFFFL
16755//PA_CL_VPORT_YSCALE_15
16756#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
16757#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xFFFFFFFFL
16758//PA_CL_VPORT_YOFFSET_15
16759#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
16760#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xFFFFFFFFL
16761//PA_CL_VPORT_ZSCALE_15
16762#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
16763#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xFFFFFFFFL
16764//PA_CL_VPORT_ZOFFSET_15
16765#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
16766#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
16767//PA_CL_UCP_0_X
16768#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
16769#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xFFFFFFFFL
16770//PA_CL_UCP_0_Y
16771#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
16772#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
16773//PA_CL_UCP_0_Z
16774#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
16775#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
16776//PA_CL_UCP_0_W
16777#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
16778#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xFFFFFFFFL
16779//PA_CL_UCP_1_X
16780#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
16781#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xFFFFFFFFL
16782//PA_CL_UCP_1_Y
16783#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
16784#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
16785//PA_CL_UCP_1_Z
16786#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
16787#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
16788//PA_CL_UCP_1_W
16789#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
16790#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xFFFFFFFFL
16791//PA_CL_UCP_2_X
16792#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
16793#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xFFFFFFFFL
16794//PA_CL_UCP_2_Y
16795#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
16796#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
16797//PA_CL_UCP_2_Z
16798#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
16799#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
16800//PA_CL_UCP_2_W
16801#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
16802#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xFFFFFFFFL
16803//PA_CL_UCP_3_X
16804#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
16805#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xFFFFFFFFL
16806//PA_CL_UCP_3_Y
16807#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
16808#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
16809//PA_CL_UCP_3_Z
16810#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
16811#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
16812//PA_CL_UCP_3_W
16813#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
16814#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xFFFFFFFFL
16815//PA_CL_UCP_4_X
16816#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
16817#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xFFFFFFFFL
16818//PA_CL_UCP_4_Y
16819#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
16820#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
16821//PA_CL_UCP_4_Z
16822#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
16823#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
16824//PA_CL_UCP_4_W
16825#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
16826#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xFFFFFFFFL
16827//PA_CL_UCP_5_X
16828#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
16829#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xFFFFFFFFL
16830//PA_CL_UCP_5_Y
16831#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
16832#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
16833//PA_CL_UCP_5_Z
16834#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
16835#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
16836//PA_CL_UCP_5_W
16837#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
16838#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xFFFFFFFFL
16839//SPI_PS_INPUT_CNTL_0
16840#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
16841#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
16842#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
16843#define SPI_PS_INPUT_CNTL_0__CYL_WRAP__SHIFT 0xd
16844#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
16845#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
16846#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
16847#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
16848#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
16849#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
16850#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
16851#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
16852#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003FL
16853#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
16854#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
16855#define SPI_PS_INPUT_CNTL_0__CYL_WRAP_MASK 0x0001E000L
16856#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
16857#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
16858#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x00080000L
16859#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x00100000L
16860#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x00600000L
16861#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
16862#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x01000000L
16863#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x02000000L
16864//SPI_PS_INPUT_CNTL_1
16865#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
16866#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
16867#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
16868#define SPI_PS_INPUT_CNTL_1__CYL_WRAP__SHIFT 0xd
16869#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
16870#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
16871#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
16872#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
16873#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
16874#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
16875#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
16876#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
16877#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003FL
16878#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
16879#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
16880#define SPI_PS_INPUT_CNTL_1__CYL_WRAP_MASK 0x0001E000L
16881#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
16882#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
16883#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x00080000L
16884#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x00100000L
16885#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x00600000L
16886#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
16887#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x01000000L
16888#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x02000000L
16889//SPI_PS_INPUT_CNTL_2
16890#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
16891#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
16892#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
16893#define SPI_PS_INPUT_CNTL_2__CYL_WRAP__SHIFT 0xd
16894#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
16895#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
16896#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
16897#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
16898#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
16899#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
16900#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
16901#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
16902#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003FL
16903#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
16904#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
16905#define SPI_PS_INPUT_CNTL_2__CYL_WRAP_MASK 0x0001E000L
16906#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
16907#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
16908#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x00080000L
16909#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x00100000L
16910#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x00600000L
16911#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
16912#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x01000000L
16913#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x02000000L
16914//SPI_PS_INPUT_CNTL_3
16915#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
16916#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
16917#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
16918#define SPI_PS_INPUT_CNTL_3__CYL_WRAP__SHIFT 0xd
16919#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
16920#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
16921#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
16922#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
16923#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
16924#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
16925#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
16926#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
16927#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003FL
16928#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
16929#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
16930#define SPI_PS_INPUT_CNTL_3__CYL_WRAP_MASK 0x0001E000L
16931#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
16932#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
16933#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x00080000L
16934#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x00100000L
16935#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x00600000L
16936#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
16937#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x01000000L
16938#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x02000000L
16939//SPI_PS_INPUT_CNTL_4
16940#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
16941#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
16942#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
16943#define SPI_PS_INPUT_CNTL_4__CYL_WRAP__SHIFT 0xd
16944#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
16945#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
16946#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
16947#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
16948#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
16949#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
16950#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
16951#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
16952#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003FL
16953#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
16954#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
16955#define SPI_PS_INPUT_CNTL_4__CYL_WRAP_MASK 0x0001E000L
16956#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
16957#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
16958#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x00080000L
16959#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x00100000L
16960#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x00600000L
16961#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
16962#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x01000000L
16963#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x02000000L
16964//SPI_PS_INPUT_CNTL_5
16965#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
16966#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
16967#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
16968#define SPI_PS_INPUT_CNTL_5__CYL_WRAP__SHIFT 0xd
16969#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
16970#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
16971#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
16972#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
16973#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
16974#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
16975#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
16976#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
16977#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003FL
16978#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
16979#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
16980#define SPI_PS_INPUT_CNTL_5__CYL_WRAP_MASK 0x0001E000L
16981#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
16982#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
16983#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x00080000L
16984#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x00100000L
16985#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x00600000L
16986#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
16987#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x01000000L
16988#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x02000000L
16989//SPI_PS_INPUT_CNTL_6
16990#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
16991#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
16992#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
16993#define SPI_PS_INPUT_CNTL_6__CYL_WRAP__SHIFT 0xd
16994#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
16995#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
16996#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
16997#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
16998#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
16999#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17000#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
17001#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
17002#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003FL
17003#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
17004#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
17005#define SPI_PS_INPUT_CNTL_6__CYL_WRAP_MASK 0x0001E000L
17006#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
17007#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
17008#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x00080000L
17009#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x00100000L
17010#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17011#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17012#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x01000000L
17013#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x02000000L
17014//SPI_PS_INPUT_CNTL_7
17015#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
17016#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
17017#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
17018#define SPI_PS_INPUT_CNTL_7__CYL_WRAP__SHIFT 0xd
17019#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
17020#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
17021#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
17022#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
17023#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
17024#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17025#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
17026#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
17027#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003FL
17028#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
17029#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
17030#define SPI_PS_INPUT_CNTL_7__CYL_WRAP_MASK 0x0001E000L
17031#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
17032#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
17033#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x00080000L
17034#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x00100000L
17035#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17036#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17037#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x01000000L
17038#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x02000000L
17039//SPI_PS_INPUT_CNTL_8
17040#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
17041#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
17042#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
17043#define SPI_PS_INPUT_CNTL_8__CYL_WRAP__SHIFT 0xd
17044#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
17045#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
17046#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
17047#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
17048#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
17049#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17050#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
17051#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
17052#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003FL
17053#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
17054#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
17055#define SPI_PS_INPUT_CNTL_8__CYL_WRAP_MASK 0x0001E000L
17056#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
17057#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
17058#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x00080000L
17059#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x00100000L
17060#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17061#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17062#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x01000000L
17063#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x02000000L
17064//SPI_PS_INPUT_CNTL_9
17065#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
17066#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
17067#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
17068#define SPI_PS_INPUT_CNTL_9__CYL_WRAP__SHIFT 0xd
17069#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
17070#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
17071#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
17072#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
17073#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
17074#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17075#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
17076#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
17077#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003FL
17078#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
17079#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
17080#define SPI_PS_INPUT_CNTL_9__CYL_WRAP_MASK 0x0001E000L
17081#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
17082#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
17083#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x00080000L
17084#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x00100000L
17085#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17086#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17087#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x01000000L
17088#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x02000000L
17089//SPI_PS_INPUT_CNTL_10
17090#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
17091#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
17092#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
17093#define SPI_PS_INPUT_CNTL_10__CYL_WRAP__SHIFT 0xd
17094#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
17095#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
17096#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
17097#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
17098#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
17099#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17100#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
17101#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
17102#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003FL
17103#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
17104#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
17105#define SPI_PS_INPUT_CNTL_10__CYL_WRAP_MASK 0x0001E000L
17106#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
17107#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
17108#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x00080000L
17109#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x00100000L
17110#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17111#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17112#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x01000000L
17113#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x02000000L
17114//SPI_PS_INPUT_CNTL_11
17115#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
17116#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
17117#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
17118#define SPI_PS_INPUT_CNTL_11__CYL_WRAP__SHIFT 0xd
17119#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
17120#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
17121#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
17122#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
17123#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
17124#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17125#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
17126#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
17127#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003FL
17128#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
17129#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
17130#define SPI_PS_INPUT_CNTL_11__CYL_WRAP_MASK 0x0001E000L
17131#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
17132#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
17133#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x00080000L
17134#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x00100000L
17135#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17136#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17137#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x01000000L
17138#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x02000000L
17139//SPI_PS_INPUT_CNTL_12
17140#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
17141#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
17142#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
17143#define SPI_PS_INPUT_CNTL_12__CYL_WRAP__SHIFT 0xd
17144#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
17145#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
17146#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
17147#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
17148#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
17149#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17150#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
17151#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
17152#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003FL
17153#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
17154#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
17155#define SPI_PS_INPUT_CNTL_12__CYL_WRAP_MASK 0x0001E000L
17156#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
17157#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
17158#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x00080000L
17159#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x00100000L
17160#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17161#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17162#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x01000000L
17163#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x02000000L
17164//SPI_PS_INPUT_CNTL_13
17165#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
17166#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
17167#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
17168#define SPI_PS_INPUT_CNTL_13__CYL_WRAP__SHIFT 0xd
17169#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
17170#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
17171#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
17172#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
17173#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
17174#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17175#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
17176#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
17177#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003FL
17178#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
17179#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
17180#define SPI_PS_INPUT_CNTL_13__CYL_WRAP_MASK 0x0001E000L
17181#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
17182#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
17183#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x00080000L
17184#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x00100000L
17185#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17186#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17187#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x01000000L
17188#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x02000000L
17189//SPI_PS_INPUT_CNTL_14
17190#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
17191#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
17192#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
17193#define SPI_PS_INPUT_CNTL_14__CYL_WRAP__SHIFT 0xd
17194#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
17195#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
17196#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
17197#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
17198#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
17199#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17200#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
17201#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
17202#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003FL
17203#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
17204#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
17205#define SPI_PS_INPUT_CNTL_14__CYL_WRAP_MASK 0x0001E000L
17206#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
17207#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
17208#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x00080000L
17209#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x00100000L
17210#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17211#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17212#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x01000000L
17213#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x02000000L
17214//SPI_PS_INPUT_CNTL_15
17215#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
17216#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
17217#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
17218#define SPI_PS_INPUT_CNTL_15__CYL_WRAP__SHIFT 0xd
17219#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
17220#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
17221#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
17222#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
17223#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
17224#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17225#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
17226#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
17227#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003FL
17228#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
17229#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
17230#define SPI_PS_INPUT_CNTL_15__CYL_WRAP_MASK 0x0001E000L
17231#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
17232#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
17233#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x00080000L
17234#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x00100000L
17235#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17236#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17237#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x01000000L
17238#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x02000000L
17239//SPI_PS_INPUT_CNTL_16
17240#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
17241#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
17242#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
17243#define SPI_PS_INPUT_CNTL_16__CYL_WRAP__SHIFT 0xd
17244#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
17245#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
17246#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
17247#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
17248#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
17249#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17250#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
17251#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
17252#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003FL
17253#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
17254#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
17255#define SPI_PS_INPUT_CNTL_16__CYL_WRAP_MASK 0x0001E000L
17256#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
17257#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
17258#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x00080000L
17259#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x00100000L
17260#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17261#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17262#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x01000000L
17263#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x02000000L
17264//SPI_PS_INPUT_CNTL_17
17265#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
17266#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
17267#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
17268#define SPI_PS_INPUT_CNTL_17__CYL_WRAP__SHIFT 0xd
17269#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
17270#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
17271#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
17272#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
17273#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
17274#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17275#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
17276#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
17277#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003FL
17278#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
17279#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
17280#define SPI_PS_INPUT_CNTL_17__CYL_WRAP_MASK 0x0001E000L
17281#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
17282#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
17283#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x00080000L
17284#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x00100000L
17285#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17286#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17287#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x01000000L
17288#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x02000000L
17289//SPI_PS_INPUT_CNTL_18
17290#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
17291#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
17292#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
17293#define SPI_PS_INPUT_CNTL_18__CYL_WRAP__SHIFT 0xd
17294#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
17295#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
17296#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
17297#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
17298#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
17299#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17300#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
17301#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
17302#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003FL
17303#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
17304#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
17305#define SPI_PS_INPUT_CNTL_18__CYL_WRAP_MASK 0x0001E000L
17306#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
17307#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
17308#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x00080000L
17309#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x00100000L
17310#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17311#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17312#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x01000000L
17313#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x02000000L
17314//SPI_PS_INPUT_CNTL_19
17315#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
17316#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
17317#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
17318#define SPI_PS_INPUT_CNTL_19__CYL_WRAP__SHIFT 0xd
17319#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
17320#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
17321#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
17322#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
17323#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
17324#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
17325#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
17326#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
17327#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003FL
17328#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
17329#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
17330#define SPI_PS_INPUT_CNTL_19__CYL_WRAP_MASK 0x0001E000L
17331#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
17332#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
17333#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x00080000L
17334#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x00100000L
17335#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17336#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
17337#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x01000000L
17338#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x02000000L
17339//SPI_PS_INPUT_CNTL_20
17340#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
17341#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
17342#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
17343#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
17344#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
17345#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
17346#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
17347#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
17348#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
17349#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003FL
17350#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
17351#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
17352#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
17353#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x00080000L
17354#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x00100000L
17355#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17356#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x01000000L
17357#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x02000000L
17358//SPI_PS_INPUT_CNTL_21
17359#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
17360#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
17361#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
17362#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
17363#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
17364#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
17365#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
17366#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
17367#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
17368#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003FL
17369#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
17370#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
17371#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
17372#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x00080000L
17373#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x00100000L
17374#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17375#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x01000000L
17376#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x02000000L
17377//SPI_PS_INPUT_CNTL_22
17378#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
17379#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
17380#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
17381#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
17382#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
17383#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
17384#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
17385#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
17386#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
17387#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003FL
17388#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
17389#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
17390#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
17391#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x00080000L
17392#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x00100000L
17393#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17394#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x01000000L
17395#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x02000000L
17396//SPI_PS_INPUT_CNTL_23
17397#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
17398#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
17399#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
17400#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
17401#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
17402#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
17403#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
17404#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
17405#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
17406#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003FL
17407#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
17408#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
17409#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
17410#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x00080000L
17411#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x00100000L
17412#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17413#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x01000000L
17414#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x02000000L
17415//SPI_PS_INPUT_CNTL_24
17416#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
17417#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
17418#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
17419#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
17420#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
17421#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
17422#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
17423#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
17424#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
17425#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003FL
17426#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
17427#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
17428#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
17429#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x00080000L
17430#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x00100000L
17431#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17432#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x01000000L
17433#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x02000000L
17434//SPI_PS_INPUT_CNTL_25
17435#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
17436#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
17437#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
17438#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
17439#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
17440#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
17441#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
17442#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
17443#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
17444#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003FL
17445#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
17446#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
17447#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
17448#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x00080000L
17449#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x00100000L
17450#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17451#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x01000000L
17452#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x02000000L
17453//SPI_PS_INPUT_CNTL_26
17454#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
17455#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
17456#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
17457#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
17458#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
17459#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
17460#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
17461#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
17462#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
17463#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003FL
17464#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
17465#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
17466#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
17467#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x00080000L
17468#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x00100000L
17469#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17470#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x01000000L
17471#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x02000000L
17472//SPI_PS_INPUT_CNTL_27
17473#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
17474#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
17475#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
17476#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
17477#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
17478#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
17479#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
17480#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
17481#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
17482#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003FL
17483#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
17484#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
17485#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
17486#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x00080000L
17487#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x00100000L
17488#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17489#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x01000000L
17490#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x02000000L
17491//SPI_PS_INPUT_CNTL_28
17492#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
17493#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
17494#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
17495#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
17496#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
17497#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
17498#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
17499#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
17500#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
17501#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003FL
17502#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
17503#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
17504#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
17505#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x00080000L
17506#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x00100000L
17507#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17508#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x01000000L
17509#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x02000000L
17510//SPI_PS_INPUT_CNTL_29
17511#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
17512#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
17513#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
17514#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
17515#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
17516#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
17517#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
17518#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
17519#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
17520#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003FL
17521#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
17522#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
17523#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
17524#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x00080000L
17525#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x00100000L
17526#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17527#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x01000000L
17528#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x02000000L
17529//SPI_PS_INPUT_CNTL_30
17530#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
17531#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
17532#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
17533#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
17534#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
17535#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
17536#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
17537#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
17538#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
17539#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003FL
17540#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
17541#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
17542#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
17543#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x00080000L
17544#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x00100000L
17545#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17546#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x01000000L
17547#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x02000000L
17548//SPI_PS_INPUT_CNTL_31
17549#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
17550#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
17551#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
17552#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
17553#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
17554#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
17555#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
17556#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
17557#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
17558#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003FL
17559#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
17560#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
17561#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
17562#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x00080000L
17563#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x00100000L
17564#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x00600000L
17565#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x01000000L
17566#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x02000000L
17567//SPI_VS_OUT_CONFIG
17568#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
17569#define SPI_VS_OUT_CONFIG__VS_HALF_PACK__SHIFT 0x6
17570#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003EL
17571#define SPI_VS_OUT_CONFIG__VS_HALF_PACK_MASK 0x00000040L
17572//SPI_PS_INPUT_ENA
17573#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
17574#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
17575#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
17576#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
17577#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
17578#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
17579#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
17580#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
17581#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
17582#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
17583#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
17584#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
17585#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
17586#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
17587#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
17588#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
17589#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
17590#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
17591#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
17592#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
17593#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
17594#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
17595#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
17596#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
17597#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
17598#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
17599#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
17600#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
17601#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
17602#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
17603#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
17604#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
17605//SPI_PS_INPUT_ADDR
17606#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
17607#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
17608#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
17609#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
17610#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
17611#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
17612#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
17613#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
17614#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
17615#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
17616#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
17617#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
17618#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
17619#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
17620#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
17621#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
17622#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
17623#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
17624#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
17625#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
17626#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
17627#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
17628#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
17629#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
17630#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
17631#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
17632#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
17633#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
17634#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
17635#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
17636#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
17637#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
17638//SPI_INTERP_CONTROL_0
17639#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
17640#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
17641#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
17642#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
17643#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
17644#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
17645#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
17646#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
17647#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
17648#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001CL
17649#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000E0L
17650#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
17651#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
17652#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
17653//SPI_PS_IN_CONTROL
17654#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
17655#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
17656#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN__SHIFT 0x7
17657#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC__SHIFT 0x8
17658#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
17659#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003FL
17660#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
17661#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN_MASK 0x00000080L
17662#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC_MASK 0x00000100L
17663#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
17664//SPI_BARYC_CNTL
17665#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
17666#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
17667#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
17668#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
17669#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
17670#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
17671#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
17672#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
17673#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
17674#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
17675#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
17676#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
17677#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
17678#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
17679//SPI_TMPRING_SIZE
17680#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
17681#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
17682#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
17683#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
17684//SPI_SHADER_POS_FORMAT
17685#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
17686#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
17687#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
17688#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
17689#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000FL
17690#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000F0L
17691#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000F00L
17692#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000F000L
17693//SPI_SHADER_Z_FORMAT
17694#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
17695#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000FL
17696//SPI_SHADER_COL_FORMAT
17697#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
17698#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
17699#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
17700#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
17701#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
17702#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
17703#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
17704#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
17705#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000FL
17706#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000F0L
17707#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000F00L
17708#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000F000L
17709#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000F0000L
17710#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00F00000L
17711#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0F000000L
17712#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xF0000000L
17713//SX_PS_DOWNCONVERT
17714#define SX_PS_DOWNCONVERT__MRT0__SHIFT 0x0
17715#define SX_PS_DOWNCONVERT__MRT1__SHIFT 0x4
17716#define SX_PS_DOWNCONVERT__MRT2__SHIFT 0x8
17717#define SX_PS_DOWNCONVERT__MRT3__SHIFT 0xc
17718#define SX_PS_DOWNCONVERT__MRT4__SHIFT 0x10
17719#define SX_PS_DOWNCONVERT__MRT5__SHIFT 0x14
17720#define SX_PS_DOWNCONVERT__MRT6__SHIFT 0x18
17721#define SX_PS_DOWNCONVERT__MRT7__SHIFT 0x1c
17722#define SX_PS_DOWNCONVERT__MRT0_MASK 0x0000000FL
17723#define SX_PS_DOWNCONVERT__MRT1_MASK 0x000000F0L
17724#define SX_PS_DOWNCONVERT__MRT2_MASK 0x00000F00L
17725#define SX_PS_DOWNCONVERT__MRT3_MASK 0x0000F000L
17726#define SX_PS_DOWNCONVERT__MRT4_MASK 0x000F0000L
17727#define SX_PS_DOWNCONVERT__MRT5_MASK 0x00F00000L
17728#define SX_PS_DOWNCONVERT__MRT6_MASK 0x0F000000L
17729#define SX_PS_DOWNCONVERT__MRT7_MASK 0xF0000000L
17730//SX_BLEND_OPT_EPSILON
17731#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON__SHIFT 0x0
17732#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON__SHIFT 0x4
17733#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON__SHIFT 0x8
17734#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON__SHIFT 0xc
17735#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON__SHIFT 0x10
17736#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON__SHIFT 0x14
17737#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON__SHIFT 0x18
17738#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON__SHIFT 0x1c
17739#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON_MASK 0x0000000FL
17740#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON_MASK 0x000000F0L
17741#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON_MASK 0x00000F00L
17742#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON_MASK 0x0000F000L
17743#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON_MASK 0x000F0000L
17744#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON_MASK 0x00F00000L
17745#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON_MASK 0x0F000000L
17746#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON_MASK 0xF0000000L
17747//SX_BLEND_OPT_CONTROL
17748#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE__SHIFT 0x0
17749#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE__SHIFT 0x1
17750#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE__SHIFT 0x4
17751#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE__SHIFT 0x5
17752#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE__SHIFT 0x8
17753#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE__SHIFT 0x9
17754#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE__SHIFT 0xc
17755#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE__SHIFT 0xd
17756#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE__SHIFT 0x10
17757#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE__SHIFT 0x11
17758#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE__SHIFT 0x14
17759#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE__SHIFT 0x15
17760#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE__SHIFT 0x18
17761#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE__SHIFT 0x19
17762#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE__SHIFT 0x1c
17763#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE__SHIFT 0x1d
17764#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE__SHIFT 0x1f
17765#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE_MASK 0x00000001L
17766#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE_MASK 0x00000002L
17767#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE_MASK 0x00000010L
17768#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE_MASK 0x00000020L
17769#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE_MASK 0x00000100L
17770#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE_MASK 0x00000200L
17771#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE_MASK 0x00001000L
17772#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE_MASK 0x00002000L
17773#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE_MASK 0x00010000L
17774#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE_MASK 0x00020000L
17775#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE_MASK 0x00100000L
17776#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE_MASK 0x00200000L
17777#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE_MASK 0x01000000L
17778#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE_MASK 0x02000000L
17779#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE_MASK 0x10000000L
17780#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE_MASK 0x20000000L
17781#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE_MASK 0x80000000L
17782//SX_MRT0_BLEND_OPT
17783#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17784#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17785#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17786#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17787#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17788#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17789#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17790#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17791#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17792#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17793#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17794#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17795//SX_MRT1_BLEND_OPT
17796#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17797#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17798#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17799#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17800#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17801#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17802#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17803#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17804#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17805#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17806#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17807#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17808//SX_MRT2_BLEND_OPT
17809#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17810#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17811#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17812#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17813#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17814#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17815#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17816#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17817#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17818#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17819#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17820#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17821//SX_MRT3_BLEND_OPT
17822#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17823#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17824#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17825#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17826#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17827#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17828#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17829#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17830#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17831#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17832#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17833#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17834//SX_MRT4_BLEND_OPT
17835#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17836#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17837#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17838#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17839#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17840#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17841#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17842#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17843#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17844#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17845#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17846#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17847//SX_MRT5_BLEND_OPT
17848#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17849#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17850#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17851#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17852#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17853#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17854#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17855#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17856#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17857#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17858#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17859#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17860//SX_MRT6_BLEND_OPT
17861#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17862#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17863#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17864#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17865#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17866#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17867#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17868#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17869#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17870#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17871#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17872#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17873//SX_MRT7_BLEND_OPT
17874#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
17875#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
17876#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
17877#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
17878#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
17879#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
17880#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
17881#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
17882#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
17883#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
17884#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
17885#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
17886//CB_BLEND0_CONTROL
17887#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
17888#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
17889#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
17890#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
17891#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
17892#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
17893#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
17894#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
17895#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
17896#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
17897#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
17898#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
17899#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
17900#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
17901#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
17902#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
17903#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
17904#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
17905//CB_BLEND1_CONTROL
17906#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
17907#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
17908#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
17909#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
17910#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
17911#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
17912#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
17913#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
17914#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
17915#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
17916#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
17917#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
17918#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
17919#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
17920#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
17921#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
17922#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
17923#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
17924//CB_BLEND2_CONTROL
17925#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
17926#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
17927#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
17928#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
17929#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
17930#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
17931#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
17932#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
17933#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
17934#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
17935#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
17936#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
17937#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
17938#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
17939#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
17940#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
17941#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
17942#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
17943//CB_BLEND3_CONTROL
17944#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
17945#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
17946#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
17947#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
17948#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
17949#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
17950#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
17951#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
17952#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
17953#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
17954#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
17955#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
17956#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
17957#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
17958#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
17959#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
17960#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
17961#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
17962//CB_BLEND4_CONTROL
17963#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
17964#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
17965#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
17966#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
17967#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
17968#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
17969#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
17970#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
17971#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
17972#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
17973#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
17974#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
17975#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
17976#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
17977#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
17978#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
17979#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
17980#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
17981//CB_BLEND5_CONTROL
17982#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
17983#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
17984#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
17985#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
17986#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
17987#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
17988#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
17989#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
17990#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
17991#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
17992#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
17993#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
17994#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
17995#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
17996#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
17997#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
17998#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
17999#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
18000//CB_BLEND6_CONTROL
18001#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
18002#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
18003#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
18004#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
18005#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
18006#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
18007#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
18008#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
18009#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
18010#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
18011#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
18012#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
18013#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
18014#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
18015#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
18016#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
18017#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
18018#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
18019//CB_BLEND7_CONTROL
18020#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
18021#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
18022#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
18023#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
18024#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
18025#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
18026#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
18027#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
18028#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
18029#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
18030#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
18031#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
18032#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
18033#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
18034#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
18035#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
18036#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
18037#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
18038//CB_MRT0_EPITCH
18039#define CB_MRT0_EPITCH__EPITCH__SHIFT 0x0
18040#define CB_MRT0_EPITCH__EPITCH_MASK 0x0000FFFFL
18041//CB_MRT1_EPITCH
18042#define CB_MRT1_EPITCH__EPITCH__SHIFT 0x0
18043#define CB_MRT1_EPITCH__EPITCH_MASK 0x0000FFFFL
18044//CB_MRT2_EPITCH
18045#define CB_MRT2_EPITCH__EPITCH__SHIFT 0x0
18046#define CB_MRT2_EPITCH__EPITCH_MASK 0x0000FFFFL
18047//CB_MRT3_EPITCH
18048#define CB_MRT3_EPITCH__EPITCH__SHIFT 0x0
18049#define CB_MRT3_EPITCH__EPITCH_MASK 0x0000FFFFL
18050//CB_MRT4_EPITCH
18051#define CB_MRT4_EPITCH__EPITCH__SHIFT 0x0
18052#define CB_MRT4_EPITCH__EPITCH_MASK 0x0000FFFFL
18053//CB_MRT5_EPITCH
18054#define CB_MRT5_EPITCH__EPITCH__SHIFT 0x0
18055#define CB_MRT5_EPITCH__EPITCH_MASK 0x0000FFFFL
18056//CB_MRT6_EPITCH
18057#define CB_MRT6_EPITCH__EPITCH__SHIFT 0x0
18058#define CB_MRT6_EPITCH__EPITCH_MASK 0x0000FFFFL
18059//CB_MRT7_EPITCH
18060#define CB_MRT7_EPITCH__EPITCH__SHIFT 0x0
18061#define CB_MRT7_EPITCH__EPITCH_MASK 0x0000FFFFL
18062//CS_COPY_STATE
18063#define CS_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
18064#define CS_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
18065//GFX_COPY_STATE
18066#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
18067#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
18068//PA_CL_POINT_X_RAD
18069#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
18070#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
18071//PA_CL_POINT_Y_RAD
18072#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
18073#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
18074//PA_CL_POINT_SIZE
18075#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
18076#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xFFFFFFFFL
18077//PA_CL_POINT_CULL_RAD
18078#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
18079#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
18080//VGT_DMA_BASE_HI
18081#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
18082#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x0000FFFFL
18083//VGT_DMA_BASE
18084#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
18085#define VGT_DMA_BASE__BASE_ADDR_MASK 0xFFFFFFFFL
18086//VGT_DRAW_INITIATOR
18087#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
18088#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
18089#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
18090#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
18091#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
18092#define VGT_DRAW_INITIATOR__UNROLLED_INST__SHIFT 0x7
18093#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC__SHIFT 0x8
18094#define VGT_DRAW_INITIATOR__REG_RT_INDEX__SHIFT 0x1d
18095#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
18096#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000CL
18097#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
18098#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
18099#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
18100#define VGT_DRAW_INITIATOR__UNROLLED_INST_MASK 0x00000080L
18101#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC_MASK 0x00000100L
18102#define VGT_DRAW_INITIATOR__REG_RT_INDEX_MASK 0xE0000000L
18103//VGT_IMMED_DATA
18104#define VGT_IMMED_DATA__DATA__SHIFT 0x0
18105#define VGT_IMMED_DATA__DATA_MASK 0xFFFFFFFFL
18106//VGT_EVENT_ADDRESS_REG
18107#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
18108#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0FFFFFFFL
18109//DB_DEPTH_CONTROL
18110#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
18111#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
18112#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
18113#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
18114#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
18115#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
18116#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
18117#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
18118#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
18119#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
18120#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
18121#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
18122#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
18123#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
18124#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
18125#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
18126#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
18127#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
18128#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
18129#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
18130//DB_EQAA
18131#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
18132#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
18133#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
18134#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
18135#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
18136#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
18137#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
18138#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
18139#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
18140#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
18141#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
18142#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
18143#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
18144#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
18145#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
18146#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
18147#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
18148#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
18149#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
18150#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
18151#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
18152#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
18153#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
18154#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
18155//CB_COLOR_CONTROL
18156#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
18157#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
18158#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
18159#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
18160#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x00000001L
18161#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
18162#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
18163#define CB_COLOR_CONTROL__ROP3_MASK 0x00FF0000L
18164//DB_SHADER_CONTROL
18165#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
18166#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
18167#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
18168#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
18169#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
18170#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
18171#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
18172#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
18173#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
18174#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
18175#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
18176#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
18177#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
18178#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER__SHIFT 0x10
18179#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED__SHIFT 0x11
18180#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES__SHIFT 0x14
18181#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
18182#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
18183#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
18184#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
18185#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
18186#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
18187#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
18188#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
18189#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
18190#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
18191#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
18192#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
18193#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x00008000L
18194#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER_MASK 0x00010000L
18195#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED_MASK 0x00020000L
18196#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES_MASK 0x00700000L
18197//PA_CL_CLIP_CNTL
18198#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
18199#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
18200#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
18201#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
18202#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
18203#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
18204#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
18205#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
18206#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
18207#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
18208#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
18209#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
18210#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
18211#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
18212#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
18213#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
18214#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
18215#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
18216#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
18217#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
18218#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
18219#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
18220#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
18221#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
18222#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
18223#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
18224#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000C000L
18225#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
18226#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
18227#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
18228#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
18229#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
18230#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
18231#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
18232#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
18233#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
18234#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
18235#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
18236//PA_SU_SC_MODE_CNTL
18237#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
18238#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
18239#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
18240#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
18241#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
18242#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
18243#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
18244#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
18245#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
18246#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
18247#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
18248#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
18249#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
18250#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF__SHIFT 0x16
18251#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION__SHIFT 0x17
18252#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
18253#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
18254#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
18255#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
18256#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000E0L
18257#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
18258#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
18259#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
18260#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
18261#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
18262#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
18263#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
18264#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
18265#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF_MASK 0x00400000L
18266#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION_MASK 0x00800000L
18267//PA_CL_VTE_CNTL
18268#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
18269#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
18270#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
18271#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
18272#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
18273#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
18274#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
18275#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
18276#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
18277#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
18278#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
18279#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
18280#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
18281#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
18282#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
18283#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
18284#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
18285#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
18286#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
18287#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
18288//PA_CL_VS_OUT_CNTL
18289#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
18290#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
18291#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
18292#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
18293#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
18294#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
18295#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
18296#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
18297#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
18298#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
18299#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
18300#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
18301#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
18302#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
18303#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
18304#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
18305#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
18306#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
18307#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
18308#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
18309#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
18310#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
18311#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
18312#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
18313#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
18314#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
18315#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1a
18316#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID__SHIFT 0x1b
18317#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
18318#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
18319#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
18320#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
18321#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
18322#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
18323#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
18324#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
18325#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
18326#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
18327#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
18328#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
18329#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
18330#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
18331#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
18332#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
18333#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
18334#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
18335#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
18336#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
18337#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
18338#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
18339#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
18340#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
18341#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
18342#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
18343#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x04000000L
18344#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID_MASK 0x08000000L
18345//PA_CL_NANINF_CNTL
18346#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
18347#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
18348#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
18349#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
18350#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
18351#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
18352#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
18353#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
18354#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
18355#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
18356#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
18357#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
18358#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
18359#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
18360#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
18361#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
18362#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
18363#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
18364#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
18365#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
18366#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
18367#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
18368#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
18369#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
18370#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
18371#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
18372#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
18373#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
18374#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
18375#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
18376#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
18377#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
18378//PA_SU_LINE_STIPPLE_CNTL
18379#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
18380#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
18381#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
18382#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST__SHIFT 0x4
18383#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
18384#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
18385#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
18386#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST_MASK 0x00000010L
18387//PA_SU_LINE_STIPPLE_SCALE
18388#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
18389#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xFFFFFFFFL
18390//PA_SU_PRIM_FILTER_CNTL
18391#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
18392#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
18393#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
18394#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
18395#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
18396#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
18397#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
18398#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
18399#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
18400#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
18401#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
18402#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
18403#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
18404#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
18405#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
18406#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
18407#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
18408#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
18409#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
18410#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000FF00L
18411#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
18412#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
18413//PA_SU_SMALL_PRIM_FILTER_CNTL
18414#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE__SHIFT 0x0
18415#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x1
18416#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
18417#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
18418#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
18419#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE__SHIFT 0x5
18420#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
18421#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
18422#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
18423#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
18424#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
18425#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE_MASK 0x00000020L
18426//PA_CL_OBJPRIM_ID_CNTL
18427#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL__SHIFT 0x0
18428#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID__SHIFT 0x1
18429#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID__SHIFT 0x2
18430#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL_MASK 0x00000001L
18431#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID_MASK 0x00000002L
18432#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID_MASK 0x00000004L
18433//PA_CL_NGG_CNTL
18434#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF__SHIFT 0x0
18435#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA__SHIFT 0x1
18436#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF_MASK 0x00000001L
18437#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA_MASK 0x00000002L
18438//PA_SU_OVER_RASTERIZATION_CNTL
18439#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES__SHIFT 0x0
18440#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES__SHIFT 0x1
18441#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS__SHIFT 0x2
18442#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES__SHIFT 0x3
18443#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW__SHIFT 0x4
18444#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES_MASK 0x00000001L
18445#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES_MASK 0x00000002L
18446#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS_MASK 0x00000004L
18447#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES_MASK 0x00000008L
18448#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW_MASK 0x00000010L
18449//PA_SU_POINT_SIZE
18450#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
18451#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
18452#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000FFFFL
18453#define PA_SU_POINT_SIZE__WIDTH_MASK 0xFFFF0000L
18454//PA_SU_POINT_MINMAX
18455#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
18456#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
18457#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000FFFFL
18458#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xFFFF0000L
18459//PA_SU_LINE_CNTL
18460#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
18461#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000FFFFL
18462//PA_SC_LINE_STIPPLE
18463#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
18464#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
18465#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
18466#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
18467#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000FFFFL
18468#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00FF0000L
18469#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
18470#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
18471//VGT_OUTPUT_PATH_CNTL
18472#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT__SHIFT 0x0
18473#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT_MASK 0x00000007L
18474//VGT_HOS_CNTL
18475#define VGT_HOS_CNTL__TESS_MODE__SHIFT 0x0
18476#define VGT_HOS_CNTL__TESS_MODE_MASK 0x00000003L
18477//VGT_HOS_MAX_TESS_LEVEL
18478#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
18479#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xFFFFFFFFL
18480//VGT_HOS_MIN_TESS_LEVEL
18481#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
18482#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xFFFFFFFFL
18483//VGT_HOS_REUSE_DEPTH
18484#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH__SHIFT 0x0
18485#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH_MASK 0x000000FFL
18486//VGT_GROUP_PRIM_TYPE
18487#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE__SHIFT 0x0
18488#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER__SHIFT 0xe
18489#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS__SHIFT 0xf
18490#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER__SHIFT 0x10
18491#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE_MASK 0x0000001FL
18492#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER_MASK 0x00004000L
18493#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS_MASK 0x00008000L
18494#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER_MASK 0x00070000L
18495//VGT_GROUP_FIRST_DECR
18496#define VGT_GROUP_FIRST_DECR__FIRST_DECR__SHIFT 0x0
18497#define VGT_GROUP_FIRST_DECR__FIRST_DECR_MASK 0x0000000FL
18498//VGT_GROUP_DECR
18499#define VGT_GROUP_DECR__DECR__SHIFT 0x0
18500#define VGT_GROUP_DECR__DECR_MASK 0x0000000FL
18501//VGT_GROUP_VECT_0_CNTL
18502#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN__SHIFT 0x0
18503#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN__SHIFT 0x1
18504#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN__SHIFT 0x2
18505#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN__SHIFT 0x3
18506#define VGT_GROUP_VECT_0_CNTL__STRIDE__SHIFT 0x8
18507#define VGT_GROUP_VECT_0_CNTL__SHIFT__SHIFT 0x10
18508#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN_MASK 0x00000001L
18509#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN_MASK 0x00000002L
18510#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN_MASK 0x00000004L
18511#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN_MASK 0x00000008L
18512#define VGT_GROUP_VECT_0_CNTL__STRIDE_MASK 0x0000FF00L
18513#define VGT_GROUP_VECT_0_CNTL__SHIFT_MASK 0x00FF0000L
18514//VGT_GROUP_VECT_1_CNTL
18515#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN__SHIFT 0x0
18516#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN__SHIFT 0x1
18517#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN__SHIFT 0x2
18518#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN__SHIFT 0x3
18519#define VGT_GROUP_VECT_1_CNTL__STRIDE__SHIFT 0x8
18520#define VGT_GROUP_VECT_1_CNTL__SHIFT__SHIFT 0x10
18521#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN_MASK 0x00000001L
18522#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN_MASK 0x00000002L
18523#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN_MASK 0x00000004L
18524#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN_MASK 0x00000008L
18525#define VGT_GROUP_VECT_1_CNTL__STRIDE_MASK 0x0000FF00L
18526#define VGT_GROUP_VECT_1_CNTL__SHIFT_MASK 0x00FF0000L
18527//VGT_GROUP_VECT_0_FMT_CNTL
18528#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV__SHIFT 0x0
18529#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET__SHIFT 0x4
18530#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV__SHIFT 0x8
18531#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET__SHIFT 0xc
18532#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV__SHIFT 0x10
18533#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET__SHIFT 0x14
18534#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV__SHIFT 0x18
18535#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET__SHIFT 0x1c
18536#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV_MASK 0x0000000FL
18537#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
18538#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV_MASK 0x00000F00L
18539#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
18540#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV_MASK 0x000F0000L
18541#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
18542#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV_MASK 0x0F000000L
18543#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
18544//VGT_GROUP_VECT_1_FMT_CNTL
18545#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV__SHIFT 0x0
18546#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET__SHIFT 0x4
18547#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV__SHIFT 0x8
18548#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET__SHIFT 0xc
18549#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV__SHIFT 0x10
18550#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET__SHIFT 0x14
18551#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV__SHIFT 0x18
18552#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET__SHIFT 0x1c
18553#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV_MASK 0x0000000FL
18554#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
18555#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV_MASK 0x00000F00L
18556#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
18557#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV_MASK 0x000F0000L
18558#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
18559#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV_MASK 0x0F000000L
18560#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
18561//VGT_GS_MODE
18562#define VGT_GS_MODE__MODE__SHIFT 0x0
18563#define VGT_GS_MODE__RESERVED_0__SHIFT 0x3
18564#define VGT_GS_MODE__CUT_MODE__SHIFT 0x4
18565#define VGT_GS_MODE__RESERVED_1__SHIFT 0x6
18566#define VGT_GS_MODE__GS_C_PACK_EN__SHIFT 0xb
18567#define VGT_GS_MODE__RESERVED_2__SHIFT 0xc
18568#define VGT_GS_MODE__ES_PASSTHRU__SHIFT 0xd
18569#define VGT_GS_MODE__RESERVED_3__SHIFT 0xe
18570#define VGT_GS_MODE__RESERVED_4__SHIFT 0xf
18571#define VGT_GS_MODE__RESERVED_5__SHIFT 0x10
18572#define VGT_GS_MODE__PARTIAL_THD_AT_EOI__SHIFT 0x11
18573#define VGT_GS_MODE__SUPPRESS_CUTS__SHIFT 0x12
18574#define VGT_GS_MODE__ES_WRITE_OPTIMIZE__SHIFT 0x13
18575#define VGT_GS_MODE__GS_WRITE_OPTIMIZE__SHIFT 0x14
18576#define VGT_GS_MODE__ONCHIP__SHIFT 0x15
18577#define VGT_GS_MODE__MODE_MASK 0x00000007L
18578#define VGT_GS_MODE__RESERVED_0_MASK 0x00000008L
18579#define VGT_GS_MODE__CUT_MODE_MASK 0x00000030L
18580#define VGT_GS_MODE__RESERVED_1_MASK 0x000007C0L
18581#define VGT_GS_MODE__GS_C_PACK_EN_MASK 0x00000800L
18582#define VGT_GS_MODE__RESERVED_2_MASK 0x00001000L
18583#define VGT_GS_MODE__ES_PASSTHRU_MASK 0x00002000L
18584#define VGT_GS_MODE__RESERVED_3_MASK 0x00004000L
18585#define VGT_GS_MODE__RESERVED_4_MASK 0x00008000L
18586#define VGT_GS_MODE__RESERVED_5_MASK 0x00010000L
18587#define VGT_GS_MODE__PARTIAL_THD_AT_EOI_MASK 0x00020000L
18588#define VGT_GS_MODE__SUPPRESS_CUTS_MASK 0x00040000L
18589#define VGT_GS_MODE__ES_WRITE_OPTIMIZE_MASK 0x00080000L
18590#define VGT_GS_MODE__GS_WRITE_OPTIMIZE_MASK 0x00100000L
18591#define VGT_GS_MODE__ONCHIP_MASK 0x00600000L
18592//VGT_GS_ONCHIP_CNTL
18593#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP__SHIFT 0x0
18594#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP__SHIFT 0xb
18595#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP__SHIFT 0x16
18596#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP_MASK 0x000007FFL
18597#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP_MASK 0x003FF800L
18598#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP_MASK 0xFFC00000L
18599//PA_SC_MODE_CNTL_0
18600#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
18601#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
18602#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
18603#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
18604#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD__SHIFT 0x4
18605#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE__SHIFT 0x5
18606#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB__SHIFT 0x6
18607#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
18608#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
18609#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
18610#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
18611#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD_MASK 0x00000010L
18612#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE_MASK 0x00000020L
18613#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB_MASK 0x00000040L
18614//PA_SC_MODE_CNTL_1
18615#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
18616#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
18617#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
18618#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
18619#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
18620#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
18621#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
18622#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
18623#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
18624#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
18625#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
18626#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
18627#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
18628#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
18629#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
18630#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
18631#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
18632#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
18633#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
18634#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
18635#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
18636#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
18637#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
18638#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
18639#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
18640#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
18641#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
18642#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
18643#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
18644#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
18645#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
18646#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
18647#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
18648#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
18649#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
18650#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
18651#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
18652#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
18653#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
18654#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
18655#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
18656#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
18657#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00F00000L
18658#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
18659#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
18660#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
18661#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
18662#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
18663//VGT_ENHANCE
18664#define VGT_ENHANCE__MISC__SHIFT 0x0
18665#define VGT_ENHANCE__MISC_MASK 0xFFFFFFFFL
18666//VGT_GS_PER_ES
18667#define VGT_GS_PER_ES__GS_PER_ES__SHIFT 0x0
18668#define VGT_GS_PER_ES__GS_PER_ES_MASK 0x000007FFL
18669//VGT_ES_PER_GS
18670#define VGT_ES_PER_GS__ES_PER_GS__SHIFT 0x0
18671#define VGT_ES_PER_GS__ES_PER_GS_MASK 0x000007FFL
18672//VGT_GS_PER_VS
18673#define VGT_GS_PER_VS__GS_PER_VS__SHIFT 0x0
18674#define VGT_GS_PER_VS__GS_PER_VS_MASK 0x0000000FL
18675//VGT_GSVS_RING_OFFSET_1
18676#define VGT_GSVS_RING_OFFSET_1__OFFSET__SHIFT 0x0
18677#define VGT_GSVS_RING_OFFSET_1__OFFSET_MASK 0x00007FFFL
18678//VGT_GSVS_RING_OFFSET_2
18679#define VGT_GSVS_RING_OFFSET_2__OFFSET__SHIFT 0x0
18680#define VGT_GSVS_RING_OFFSET_2__OFFSET_MASK 0x00007FFFL
18681//VGT_GSVS_RING_OFFSET_3
18682#define VGT_GSVS_RING_OFFSET_3__OFFSET__SHIFT 0x0
18683#define VGT_GSVS_RING_OFFSET_3__OFFSET_MASK 0x00007FFFL
18684//VGT_GS_OUT_PRIM_TYPE
18685#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
18686#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1__SHIFT 0x8
18687#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2__SHIFT 0x10
18688#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3__SHIFT 0x16
18689#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM__SHIFT 0x1f
18690#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003FL
18691#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1_MASK 0x00003F00L
18692#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2_MASK 0x003F0000L
18693#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3_MASK 0x0FC00000L
18694#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM_MASK 0x80000000L
18695//IA_ENHANCE
18696#define IA_ENHANCE__MISC__SHIFT 0x0
18697#define IA_ENHANCE__MISC_MASK 0xFFFFFFFFL
18698//VGT_DMA_SIZE
18699#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
18700#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xFFFFFFFFL
18701//VGT_DMA_MAX_SIZE
18702#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
18703#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xFFFFFFFFL
18704//VGT_DMA_INDEX_TYPE
18705#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
18706#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
18707#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
18708#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
18709#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
18710#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
18711#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
18712#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
18713#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000CL
18714#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
18715#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x00000040L
18716#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
18717#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
18718#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
18719//WD_ENHANCE
18720#define WD_ENHANCE__MISC__SHIFT 0x0
18721#define WD_ENHANCE__MISC_MASK 0xFFFFFFFFL
18722//VGT_PRIMITIVEID_EN
18723#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
18724#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
18725#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE__SHIFT 0x2
18726#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
18727#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
18728#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE_MASK 0x00000004L
18729//VGT_DMA_NUM_INSTANCES
18730#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
18731#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
18732//VGT_PRIMITIVEID_RESET
18733#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
18734#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xFFFFFFFFL
18735//VGT_EVENT_INITIATOR
18736#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
18737#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
18738#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
18739#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
18740#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
18741#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
18742//VGT_GS_MAX_PRIMS_PER_SUBGROUP
18743#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP__SHIFT 0x0
18744#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP_MASK 0x0000FFFFL
18745//VGT_DRAW_PAYLOAD_CNTL
18746#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN__SHIFT 0x0
18747#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX__SHIFT 0x1
18748#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID__SHIFT 0x2
18749#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN__SHIFT 0x3
18750#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN_MASK 0x00000001L
18751#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
18752#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
18753#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
18754//VGT_INSTANCE_STEP_RATE_0
18755#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
18756#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
18757//VGT_INSTANCE_STEP_RATE_1
18758#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE__SHIFT 0x0
18759#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE_MASK 0xFFFFFFFFL
18760//VGT_ESGS_RING_ITEMSIZE
18761#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
18762#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
18763//VGT_GSVS_RING_ITEMSIZE
18764#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
18765#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
18766//VGT_REUSE_OFF
18767#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
18768#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
18769//VGT_VTX_CNT_EN
18770#define VGT_VTX_CNT_EN__VTX_CNT_EN__SHIFT 0x0
18771#define VGT_VTX_CNT_EN__VTX_CNT_EN_MASK 0x00000001L
18772//DB_HTILE_SURFACE
18773#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
18774#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN__SHIFT 0x2
18775#define DB_HTILE_SURFACE__PRELOAD__SHIFT 0x3
18776#define DB_HTILE_SURFACE__PREFETCH_WIDTH__SHIFT 0x4
18777#define DB_HTILE_SURFACE__PREFETCH_HEIGHT__SHIFT 0xa
18778#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
18779#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
18780#define DB_HTILE_SURFACE__RB_ALIGNED__SHIFT 0x13
18781#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
18782#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN_MASK 0x00000004L
18783#define DB_HTILE_SURFACE__PRELOAD_MASK 0x00000008L
18784#define DB_HTILE_SURFACE__PREFETCH_WIDTH_MASK 0x000003F0L
18785#define DB_HTILE_SURFACE__PREFETCH_HEIGHT_MASK 0x0000FC00L
18786#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
18787#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
18788#define DB_HTILE_SURFACE__RB_ALIGNED_MASK 0x00080000L
18789//DB_SRESULTS_COMPARE_STATE0
18790#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
18791#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
18792#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
18793#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
18794#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
18795#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000FF0L
18796#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000FF000L
18797#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
18798//DB_SRESULTS_COMPARE_STATE1
18799#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
18800#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
18801#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
18802#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
18803#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
18804#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000FF0L
18805#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000FF000L
18806#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
18807//DB_PRELOAD_CONTROL
18808#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
18809#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
18810#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
18811#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
18812#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000FFL
18813#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000FF00L
18814#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00FF0000L
18815#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xFF000000L
18816//VGT_STRMOUT_BUFFER_SIZE_0
18817#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE__SHIFT 0x0
18818#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE_MASK 0xFFFFFFFFL
18819//VGT_STRMOUT_VTX_STRIDE_0
18820#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE__SHIFT 0x0
18821#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE_MASK 0x000003FFL
18822//VGT_STRMOUT_BUFFER_OFFSET_0
18823#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET__SHIFT 0x0
18824#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET_MASK 0xFFFFFFFFL
18825//VGT_STRMOUT_BUFFER_SIZE_1
18826#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE__SHIFT 0x0
18827#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE_MASK 0xFFFFFFFFL
18828//VGT_STRMOUT_VTX_STRIDE_1
18829#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE__SHIFT 0x0
18830#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE_MASK 0x000003FFL
18831//VGT_STRMOUT_BUFFER_OFFSET_1
18832#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET__SHIFT 0x0
18833#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET_MASK 0xFFFFFFFFL
18834//VGT_STRMOUT_BUFFER_SIZE_2
18835#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE__SHIFT 0x0
18836#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE_MASK 0xFFFFFFFFL
18837//VGT_STRMOUT_VTX_STRIDE_2
18838#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE__SHIFT 0x0
18839#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE_MASK 0x000003FFL
18840//VGT_STRMOUT_BUFFER_OFFSET_2
18841#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET__SHIFT 0x0
18842#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET_MASK 0xFFFFFFFFL
18843//VGT_STRMOUT_BUFFER_SIZE_3
18844#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE__SHIFT 0x0
18845#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE_MASK 0xFFFFFFFFL
18846//VGT_STRMOUT_VTX_STRIDE_3
18847#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE__SHIFT 0x0
18848#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE_MASK 0x000003FFL
18849//VGT_STRMOUT_BUFFER_OFFSET_3
18850#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET__SHIFT 0x0
18851#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET_MASK 0xFFFFFFFFL
18852//VGT_STRMOUT_DRAW_OPAQUE_OFFSET
18853#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
18854#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xFFFFFFFFL
18855//VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
18856#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
18857#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xFFFFFFFFL
18858//VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
18859#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
18860#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001FFL
18861//VGT_GS_MAX_VERT_OUT
18862#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
18863#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007FFL
18864//VGT_TESS_DISTRIBUTION
18865#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
18866#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
18867#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
18868#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
18869#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT__SHIFT 0x1d
18870#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0x000000FFL
18871#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0x0000FF00L
18872#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0x00FF0000L
18873#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0x1F000000L
18874#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT_MASK 0xE0000000L
18875//VGT_SHADER_STAGES_EN
18876#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
18877#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
18878#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
18879#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
18880#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
18881#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN__SHIFT 0x9
18882#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0__SHIFT 0xa
18883#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1__SHIFT 0xb
18884#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
18885#define VGT_SHADER_STAGES_EN__PRIMGEN_EN__SHIFT 0xd
18886#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE__SHIFT 0xe
18887#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE__SHIFT 0xf
18888#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH__SHIFT 0x13
18889#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
18890#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
18891#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
18892#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
18893#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000C0L
18894#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN_MASK 0x00000200L
18895#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0_MASK 0x00000400L
18896#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1_MASK 0x00000800L
18897#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x00001000L
18898#define VGT_SHADER_STAGES_EN__PRIMGEN_EN_MASK 0x00002000L
18899#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE_MASK 0x00004000L
18900#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE_MASK 0x00078000L
18901#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH_MASK 0x00080000L
18902//VGT_LS_HS_CONFIG
18903#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
18904#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
18905#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
18906#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000FFL
18907#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
18908#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000FC000L
18909//VGT_GS_VERT_ITEMSIZE
18910#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE__SHIFT 0x0
18911#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
18912//VGT_GS_VERT_ITEMSIZE_1
18913#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE__SHIFT 0x0
18914#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE_MASK 0x00007FFFL
18915//VGT_GS_VERT_ITEMSIZE_2
18916#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE__SHIFT 0x0
18917#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE_MASK 0x00007FFFL
18918//VGT_GS_VERT_ITEMSIZE_3
18919#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE__SHIFT 0x0
18920#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE_MASK 0x00007FFFL
18921//VGT_TF_PARAM
18922#define VGT_TF_PARAM__TYPE__SHIFT 0x0
18923#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
18924#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
18925#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
18926#define VGT_TF_PARAM__DEPRECATED__SHIFT 0x9
18927#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
18928#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
18929#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
18930#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
18931#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001CL
18932#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000E0L
18933#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
18934#define VGT_TF_PARAM__DEPRECATED_MASK 0x00000200L
18935#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
18936#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00008000L
18937#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x00060000L
18938//DB_ALPHA_TO_MASK
18939#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
18940#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
18941#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
18942#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
18943#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
18944#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
18945#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
18946#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
18947#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000C00L
18948#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
18949#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000C000L
18950#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
18951//VGT_DISPATCH_DRAW_INDEX
18952#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX__SHIFT 0x0
18953#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX_MASK 0xFFFFFFFFL
18954//PA_SU_POLY_OFFSET_DB_FMT_CNTL
18955#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
18956#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
18957#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000FFL
18958#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
18959//PA_SU_POLY_OFFSET_CLAMP
18960#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
18961#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xFFFFFFFFL
18962//PA_SU_POLY_OFFSET_FRONT_SCALE
18963#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
18964#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xFFFFFFFFL
18965//PA_SU_POLY_OFFSET_FRONT_OFFSET
18966#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
18967#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xFFFFFFFFL
18968//PA_SU_POLY_OFFSET_BACK_SCALE
18969#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
18970#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xFFFFFFFFL
18971//PA_SU_POLY_OFFSET_BACK_OFFSET
18972#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
18973#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xFFFFFFFFL
18974//VGT_GS_INSTANCE_CNT
18975#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
18976#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
18977#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
18978#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001FCL
18979//VGT_STRMOUT_CONFIG
18980#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN__SHIFT 0x0
18981#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN__SHIFT 0x1
18982#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN__SHIFT 0x2
18983#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN__SHIFT 0x3
18984#define VGT_STRMOUT_CONFIG__RAST_STREAM__SHIFT 0x4
18985#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT__SHIFT 0x7
18986#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK__SHIFT 0x8
18987#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK__SHIFT 0x1f
18988#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN_MASK 0x00000001L
18989#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN_MASK 0x00000002L
18990#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN_MASK 0x00000004L
18991#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN_MASK 0x00000008L
18992#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK 0x00000070L
18993#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT_MASK 0x00000080L
18994#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK_MASK 0x00000F00L
18995#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK_MASK 0x80000000L
18996//VGT_STRMOUT_BUFFER_CONFIG
18997#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN__SHIFT 0x0
18998#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN__SHIFT 0x4
18999#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN__SHIFT 0x8
19000#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN__SHIFT 0xc
19001#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN_MASK 0x0000000FL
19002#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN_MASK 0x000000F0L
19003#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN_MASK 0x00000F00L
19004#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN_MASK 0x0000F000L
19005//VGT_DMA_EVENT_INITIATOR
19006#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
19007#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
19008#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
19009#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
19010#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
19011#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
19012//PA_SC_CENTROID_PRIORITY_0
19013#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
19014#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
19015#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
19016#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
19017#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
19018#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
19019#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
19020#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
19021#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000FL
19022#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000F0L
19023#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000F00L
19024#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000F000L
19025#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000F0000L
19026#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00F00000L
19027#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0F000000L
19028#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xF0000000L
19029//PA_SC_CENTROID_PRIORITY_1
19030#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
19031#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
19032#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
19033#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
19034#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
19035#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
19036#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
19037#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
19038#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000FL
19039#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000F0L
19040#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000F00L
19041#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000F000L
19042#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000F0000L
19043#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00F00000L
19044#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0F000000L
19045#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xF0000000L
19046//PA_SC_LINE_CNTL
19047#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
19048#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
19049#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
19050#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
19051#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
19052#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
19053#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
19054#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
19055//PA_SC_AA_CONFIG
19056#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
19057#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
19058#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
19059#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
19060#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
19061#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT__SHIFT 0x1a
19062#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
19063#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
19064#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001E000L
19065#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
19066#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
19067#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT_MASK 0x0C000000L
19068//PA_SU_VTX_CNTL
19069#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
19070#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
19071#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
19072#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
19073#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
19074#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
19075//PA_CL_GB_VERT_CLIP_ADJ
19076#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
19077#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
19078//PA_CL_GB_VERT_DISC_ADJ
19079#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
19080#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
19081//PA_CL_GB_HORZ_CLIP_ADJ
19082#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
19083#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
19084//PA_CL_GB_HORZ_DISC_ADJ
19085#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
19086#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
19087//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
19088#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
19089#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
19090#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
19091#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
19092#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
19093#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
19094#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
19095#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
19096#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000FL
19097#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000F0L
19098#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000F00L
19099#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000F000L
19100#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000F0000L
19101#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00F00000L
19102#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0F000000L
19103#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xF0000000L
19104//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
19105#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
19106#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
19107#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
19108#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
19109#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
19110#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
19111#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
19112#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
19113#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000FL
19114#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000F0L
19115#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000F00L
19116#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000F000L
19117#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000F0000L
19118#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00F00000L
19119#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0F000000L
19120#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xF0000000L
19121//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
19122#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
19123#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
19124#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
19125#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
19126#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
19127#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
19128#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
19129#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
19130#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000FL
19131#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000F0L
19132#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000F00L
19133#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000F000L
19134#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000F0000L
19135#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00F00000L
19136#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0F000000L
19137#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xF0000000L
19138//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
19139#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
19140#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
19141#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
19142#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
19143#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
19144#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
19145#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
19146#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
19147#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000FL
19148#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000F0L
19149#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000F00L
19150#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000F000L
19151#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000F0000L
19152#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00F00000L
19153#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0F000000L
19154#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xF0000000L
19155//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
19156#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
19157#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
19158#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
19159#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
19160#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
19161#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
19162#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
19163#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
19164#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000FL
19165#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000F0L
19166#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000F00L
19167#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000F000L
19168#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000F0000L
19169#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00F00000L
19170#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0F000000L
19171#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xF0000000L
19172//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
19173#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
19174#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
19175#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
19176#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
19177#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
19178#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
19179#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
19180#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
19181#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000FL
19182#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000F0L
19183#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000F00L
19184#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000F000L
19185#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000F0000L
19186#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00F00000L
19187#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0F000000L
19188#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xF0000000L
19189//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
19190#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
19191#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
19192#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
19193#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
19194#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
19195#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
19196#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
19197#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
19198#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000FL
19199#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000F0L
19200#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000F00L
19201#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000F000L
19202#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000F0000L
19203#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00F00000L
19204#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0F000000L
19205#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xF0000000L
19206//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
19207#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
19208#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
19209#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
19210#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
19211#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
19212#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
19213#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
19214#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
19215#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000FL
19216#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000F0L
19217#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000F00L
19218#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000F000L
19219#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000F0000L
19220#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00F00000L
19221#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0F000000L
19222#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xF0000000L
19223//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
19224#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
19225#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
19226#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
19227#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
19228#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
19229#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
19230#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
19231#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
19232#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000FL
19233#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000F0L
19234#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000F00L
19235#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000F000L
19236#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000F0000L
19237#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00F00000L
19238#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0F000000L
19239#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xF0000000L
19240//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
19241#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
19242#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
19243#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
19244#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
19245#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
19246#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
19247#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
19248#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
19249#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000FL
19250#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000F0L
19251#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000F00L
19252#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000F000L
19253#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000F0000L
19254#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00F00000L
19255#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0F000000L
19256#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xF0000000L
19257//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
19258#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
19259#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
19260#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
19261#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
19262#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
19263#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
19264#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
19265#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
19266#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000FL
19267#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000F0L
19268#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000F00L
19269#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000F000L
19270#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000F0000L
19271#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00F00000L
19272#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0F000000L
19273#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xF0000000L
19274//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
19275#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
19276#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
19277#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
19278#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
19279#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
19280#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
19281#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
19282#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
19283#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000FL
19284#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000F0L
19285#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000F00L
19286#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000F000L
19287#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000F0000L
19288#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00F00000L
19289#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0F000000L
19290#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xF0000000L
19291//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
19292#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
19293#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
19294#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
19295#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
19296#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
19297#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
19298#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
19299#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
19300#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000FL
19301#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000F0L
19302#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000F00L
19303#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000F000L
19304#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000F0000L
19305#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00F00000L
19306#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0F000000L
19307#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xF0000000L
19308//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
19309#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
19310#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
19311#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
19312#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
19313#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
19314#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
19315#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
19316#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
19317#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000FL
19318#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000F0L
19319#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000F00L
19320#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000F000L
19321#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000F0000L
19322#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00F00000L
19323#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0F000000L
19324#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xF0000000L
19325//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
19326#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
19327#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
19328#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
19329#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
19330#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
19331#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
19332#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
19333#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
19334#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000FL
19335#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000F0L
19336#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000F00L
19337#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000F000L
19338#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000F0000L
19339#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00F00000L
19340#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0F000000L
19341#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xF0000000L
19342//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
19343#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
19344#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
19345#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
19346#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
19347#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
19348#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
19349#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
19350#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
19351#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000FL
19352#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000F0L
19353#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000F00L
19354#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000F000L
19355#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000F0000L
19356#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00F00000L
19357#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0F000000L
19358#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xF0000000L
19359//PA_SC_AA_MASK_X0Y0_X1Y0
19360#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
19361#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
19362#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000FFFFL
19363#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xFFFF0000L
19364//PA_SC_AA_MASK_X0Y1_X1Y1
19365#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
19366#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
19367#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000FFFFL
19368#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xFFFF0000L
19369//PA_SC_SHADER_CONTROL
19370#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
19371#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID__SHIFT 0x2
19372#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION__SHIFT 0x3
19373#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x00000003L
19374#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID_MASK 0x00000004L
19375#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION_MASK 0x00000008L
19376//PA_SC_BINNER_CNTL_0
19377#define PA_SC_BINNER_CNTL_0__BINNING_MODE__SHIFT 0x0
19378#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X__SHIFT 0x2
19379#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y__SHIFT 0x3
19380#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND__SHIFT 0x4
19381#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND__SHIFT 0x7
19382#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN__SHIFT 0xa
19383#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
19384#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM__SHIFT 0x12
19385#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH__SHIFT 0x13
19386#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION__SHIFT 0x1b
19387#define PA_SC_BINNER_CNTL_0__BINNING_MODE_MASK 0x00000003L
19388#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_MASK 0x00000004L
19389#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_MASK 0x00000008L
19390#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND_MASK 0x00000070L
19391#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND_MASK 0x00000380L
19392#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
19393#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
19394#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM_MASK 0x00040000L
19395#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH_MASK 0x07F80000L
19396#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION_MASK 0x08000000L
19397//PA_SC_BINNER_CNTL_1
19398#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT__SHIFT 0x0
19399#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH__SHIFT 0x10
19400#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT_MASK 0x0000FFFFL
19401#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH_MASK 0xFFFF0000L
19402//PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
19403#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE__SHIFT 0x0
19404#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT__SHIFT 0x1
19405#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE__SHIFT 0x5
19406#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT__SHIFT 0x6
19407#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE__SHIFT 0xa
19408#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT__SHIFT 0xb
19409#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET__SHIFT 0xc
19410#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL__SHIFT 0xd
19411#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL__SHIFT 0xe
19412#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE__SHIFT 0xf
19413#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE__SHIFT 0x10
19414#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x12
19415#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x13
19416#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE__SHIFT 0x14
19417#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE__SHIFT 0x15
19418#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE__SHIFT 0x16
19419#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE__SHIFT 0x17
19420#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE__SHIFT 0x18
19421#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE_MASK 0x00000001L
19422#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT_MASK 0x0000001EL
19423#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE_MASK 0x00000020L
19424#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT_MASK 0x000003C0L
19425#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE_MASK 0x00000400L
19426#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT_MASK 0x00000800L
19427#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET_MASK 0x00001000L
19428#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL_MASK 0x00002000L
19429#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL_MASK 0x00004000L
19430#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE_MASK 0x00008000L
19431#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE_MASK 0x00030000L
19432#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00040000L
19433#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00080000L
19434#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE_MASK 0x00100000L
19435#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE_MASK 0x00200000L
19436#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE_MASK 0x00400000L
19437#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE_MASK 0x00800000L
19438#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE_MASK 0x01000000L
19439//PA_SC_NGG_MODE_CNTL
19440#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE__SHIFT 0x0
19441#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE_MASK 0x000007FFL
19442//VGT_VERTEX_REUSE_BLOCK_CNTL
19443#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH__SHIFT 0x0
19444#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH_MASK 0x000000FFL
19445//VGT_OUT_DEALLOC_CNTL
19446#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST__SHIFT 0x0
19447#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST_MASK 0x0000007FL
19448//CB_COLOR0_BASE
19449#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
19450#define CB_COLOR0_BASE__BASE_256B_MASK 0xFFFFFFFFL
19451//CB_COLOR0_BASE_EXT
19452#define CB_COLOR0_BASE_EXT__BASE_256B__SHIFT 0x0
19453#define CB_COLOR0_BASE_EXT__BASE_256B_MASK 0x000000FFL
19454//CB_COLOR0_ATTRIB2
19455#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
19456#define CB_COLOR0_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
19457#define CB_COLOR0_ATTRIB2__MAX_MIP__SHIFT 0x1c
19458#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
19459#define CB_COLOR0_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
19460#define CB_COLOR0_ATTRIB2__MAX_MIP_MASK 0xF0000000L
19461//CB_COLOR0_VIEW
19462#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
19463#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
19464#define CB_COLOR0_VIEW__MIP_LEVEL__SHIFT 0x18
19465#define CB_COLOR0_VIEW__SLICE_START_MASK 0x000007FFL
19466#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x00FFE000L
19467#define CB_COLOR0_VIEW__MIP_LEVEL_MASK 0x0F000000L
19468//CB_COLOR0_INFO
19469#define CB_COLOR0_INFO__ENDIAN__SHIFT 0x0
19470#define CB_COLOR0_INFO__FORMAT__SHIFT 0x2
19471#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
19472#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
19473#define CB_COLOR0_INFO__FAST_CLEAR__SHIFT 0xd
19474#define CB_COLOR0_INFO__COMPRESSION__SHIFT 0xe
19475#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
19476#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
19477#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
19478#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
19479#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
19480#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
19481#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
19482#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
19483#define CB_COLOR0_INFO__DCC_ENABLE__SHIFT 0x1c
19484#define CB_COLOR0_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
19485#define CB_COLOR0_INFO__ENDIAN_MASK 0x00000003L
19486#define CB_COLOR0_INFO__FORMAT_MASK 0x0000007CL
19487#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
19488#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
19489#define CB_COLOR0_INFO__FAST_CLEAR_MASK 0x00002000L
19490#define CB_COLOR0_INFO__COMPRESSION_MASK 0x00004000L
19491#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
19492#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
19493#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
19494#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
19495#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
19496#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
19497#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
19498#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
19499#define CB_COLOR0_INFO__DCC_ENABLE_MASK 0x10000000L
19500#define CB_COLOR0_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
19501//CB_COLOR0_ATTRIB
19502#define CB_COLOR0_ATTRIB__MIP0_DEPTH__SHIFT 0x0
19503#define CB_COLOR0_ATTRIB__META_LINEAR__SHIFT 0xb
19504#define CB_COLOR0_ATTRIB__NUM_SAMPLES__SHIFT 0xc
19505#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
19506#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
19507#define CB_COLOR0_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
19508#define CB_COLOR0_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
19509#define CB_COLOR0_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
19510#define CB_COLOR0_ATTRIB__RB_ALIGNED__SHIFT 0x1e
19511#define CB_COLOR0_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
19512#define CB_COLOR0_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
19513#define CB_COLOR0_ATTRIB__META_LINEAR_MASK 0x00000800L
19514#define CB_COLOR0_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
19515#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
19516#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
19517#define CB_COLOR0_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
19518#define CB_COLOR0_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
19519#define CB_COLOR0_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
19520#define CB_COLOR0_ATTRIB__RB_ALIGNED_MASK 0x40000000L
19521#define CB_COLOR0_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
19522//CB_COLOR0_DCC_CONTROL
19523#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
19524#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
19525#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
19526#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
19527#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
19528#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
19529#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
19530#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
19531#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
19532#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
19533#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
19534#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
19535#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
19536#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
19537#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
19538#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
19539#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
19540#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
19541//CB_COLOR0_CMASK
19542#define CB_COLOR0_CMASK__BASE_256B__SHIFT 0x0
19543#define CB_COLOR0_CMASK__BASE_256B_MASK 0xFFFFFFFFL
19544//CB_COLOR0_CMASK_BASE_EXT
19545#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19546#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19547//CB_COLOR0_FMASK
19548#define CB_COLOR0_FMASK__BASE_256B__SHIFT 0x0
19549#define CB_COLOR0_FMASK__BASE_256B_MASK 0xFFFFFFFFL
19550//CB_COLOR0_FMASK_BASE_EXT
19551#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19552#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19553//CB_COLOR0_CLEAR_WORD0
19554#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
19555#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
19556//CB_COLOR0_CLEAR_WORD1
19557#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
19558#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
19559//CB_COLOR0_DCC_BASE
19560#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
19561#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
19562//CB_COLOR0_DCC_BASE_EXT
19563#define CB_COLOR0_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
19564#define CB_COLOR0_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
19565//CB_COLOR1_BASE
19566#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
19567#define CB_COLOR1_BASE__BASE_256B_MASK 0xFFFFFFFFL
19568//CB_COLOR1_BASE_EXT
19569#define CB_COLOR1_BASE_EXT__BASE_256B__SHIFT 0x0
19570#define CB_COLOR1_BASE_EXT__BASE_256B_MASK 0x000000FFL
19571//CB_COLOR1_ATTRIB2
19572#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
19573#define CB_COLOR1_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
19574#define CB_COLOR1_ATTRIB2__MAX_MIP__SHIFT 0x1c
19575#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
19576#define CB_COLOR1_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
19577#define CB_COLOR1_ATTRIB2__MAX_MIP_MASK 0xF0000000L
19578//CB_COLOR1_VIEW
19579#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
19580#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
19581#define CB_COLOR1_VIEW__MIP_LEVEL__SHIFT 0x18
19582#define CB_COLOR1_VIEW__SLICE_START_MASK 0x000007FFL
19583#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x00FFE000L
19584#define CB_COLOR1_VIEW__MIP_LEVEL_MASK 0x0F000000L
19585//CB_COLOR1_INFO
19586#define CB_COLOR1_INFO__ENDIAN__SHIFT 0x0
19587#define CB_COLOR1_INFO__FORMAT__SHIFT 0x2
19588#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
19589#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
19590#define CB_COLOR1_INFO__FAST_CLEAR__SHIFT 0xd
19591#define CB_COLOR1_INFO__COMPRESSION__SHIFT 0xe
19592#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
19593#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
19594#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
19595#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
19596#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
19597#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
19598#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
19599#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
19600#define CB_COLOR1_INFO__DCC_ENABLE__SHIFT 0x1c
19601#define CB_COLOR1_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
19602#define CB_COLOR1_INFO__ENDIAN_MASK 0x00000003L
19603#define CB_COLOR1_INFO__FORMAT_MASK 0x0000007CL
19604#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
19605#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
19606#define CB_COLOR1_INFO__FAST_CLEAR_MASK 0x00002000L
19607#define CB_COLOR1_INFO__COMPRESSION_MASK 0x00004000L
19608#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
19609#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
19610#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
19611#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
19612#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
19613#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
19614#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
19615#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
19616#define CB_COLOR1_INFO__DCC_ENABLE_MASK 0x10000000L
19617#define CB_COLOR1_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
19618//CB_COLOR1_ATTRIB
19619#define CB_COLOR1_ATTRIB__MIP0_DEPTH__SHIFT 0x0
19620#define CB_COLOR1_ATTRIB__META_LINEAR__SHIFT 0xb
19621#define CB_COLOR1_ATTRIB__NUM_SAMPLES__SHIFT 0xc
19622#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
19623#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
19624#define CB_COLOR1_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
19625#define CB_COLOR1_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
19626#define CB_COLOR1_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
19627#define CB_COLOR1_ATTRIB__RB_ALIGNED__SHIFT 0x1e
19628#define CB_COLOR1_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
19629#define CB_COLOR1_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
19630#define CB_COLOR1_ATTRIB__META_LINEAR_MASK 0x00000800L
19631#define CB_COLOR1_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
19632#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
19633#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
19634#define CB_COLOR1_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
19635#define CB_COLOR1_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
19636#define CB_COLOR1_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
19637#define CB_COLOR1_ATTRIB__RB_ALIGNED_MASK 0x40000000L
19638#define CB_COLOR1_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
19639//CB_COLOR1_DCC_CONTROL
19640#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
19641#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
19642#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
19643#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
19644#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
19645#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
19646#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
19647#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
19648#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
19649#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
19650#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
19651#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
19652#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
19653#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
19654#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
19655#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
19656#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
19657#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
19658//CB_COLOR1_CMASK
19659#define CB_COLOR1_CMASK__BASE_256B__SHIFT 0x0
19660#define CB_COLOR1_CMASK__BASE_256B_MASK 0xFFFFFFFFL
19661//CB_COLOR1_CMASK_BASE_EXT
19662#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19663#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19664//CB_COLOR1_FMASK
19665#define CB_COLOR1_FMASK__BASE_256B__SHIFT 0x0
19666#define CB_COLOR1_FMASK__BASE_256B_MASK 0xFFFFFFFFL
19667//CB_COLOR1_FMASK_BASE_EXT
19668#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19669#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19670//CB_COLOR1_CLEAR_WORD0
19671#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
19672#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
19673//CB_COLOR1_CLEAR_WORD1
19674#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
19675#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
19676//CB_COLOR1_DCC_BASE
19677#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
19678#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
19679//CB_COLOR1_DCC_BASE_EXT
19680#define CB_COLOR1_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
19681#define CB_COLOR1_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
19682//CB_COLOR2_BASE
19683#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
19684#define CB_COLOR2_BASE__BASE_256B_MASK 0xFFFFFFFFL
19685//CB_COLOR2_BASE_EXT
19686#define CB_COLOR2_BASE_EXT__BASE_256B__SHIFT 0x0
19687#define CB_COLOR2_BASE_EXT__BASE_256B_MASK 0x000000FFL
19688//CB_COLOR2_ATTRIB2
19689#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
19690#define CB_COLOR2_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
19691#define CB_COLOR2_ATTRIB2__MAX_MIP__SHIFT 0x1c
19692#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
19693#define CB_COLOR2_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
19694#define CB_COLOR2_ATTRIB2__MAX_MIP_MASK 0xF0000000L
19695//CB_COLOR2_VIEW
19696#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
19697#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
19698#define CB_COLOR2_VIEW__MIP_LEVEL__SHIFT 0x18
19699#define CB_COLOR2_VIEW__SLICE_START_MASK 0x000007FFL
19700#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x00FFE000L
19701#define CB_COLOR2_VIEW__MIP_LEVEL_MASK 0x0F000000L
19702//CB_COLOR2_INFO
19703#define CB_COLOR2_INFO__ENDIAN__SHIFT 0x0
19704#define CB_COLOR2_INFO__FORMAT__SHIFT 0x2
19705#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
19706#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
19707#define CB_COLOR2_INFO__FAST_CLEAR__SHIFT 0xd
19708#define CB_COLOR2_INFO__COMPRESSION__SHIFT 0xe
19709#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
19710#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
19711#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
19712#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
19713#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
19714#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
19715#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
19716#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
19717#define CB_COLOR2_INFO__DCC_ENABLE__SHIFT 0x1c
19718#define CB_COLOR2_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
19719#define CB_COLOR2_INFO__ENDIAN_MASK 0x00000003L
19720#define CB_COLOR2_INFO__FORMAT_MASK 0x0000007CL
19721#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
19722#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
19723#define CB_COLOR2_INFO__FAST_CLEAR_MASK 0x00002000L
19724#define CB_COLOR2_INFO__COMPRESSION_MASK 0x00004000L
19725#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
19726#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
19727#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
19728#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
19729#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
19730#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
19731#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
19732#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
19733#define CB_COLOR2_INFO__DCC_ENABLE_MASK 0x10000000L
19734#define CB_COLOR2_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
19735//CB_COLOR2_ATTRIB
19736#define CB_COLOR2_ATTRIB__MIP0_DEPTH__SHIFT 0x0
19737#define CB_COLOR2_ATTRIB__META_LINEAR__SHIFT 0xb
19738#define CB_COLOR2_ATTRIB__NUM_SAMPLES__SHIFT 0xc
19739#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
19740#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
19741#define CB_COLOR2_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
19742#define CB_COLOR2_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
19743#define CB_COLOR2_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
19744#define CB_COLOR2_ATTRIB__RB_ALIGNED__SHIFT 0x1e
19745#define CB_COLOR2_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
19746#define CB_COLOR2_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
19747#define CB_COLOR2_ATTRIB__META_LINEAR_MASK 0x00000800L
19748#define CB_COLOR2_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
19749#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
19750#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
19751#define CB_COLOR2_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
19752#define CB_COLOR2_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
19753#define CB_COLOR2_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
19754#define CB_COLOR2_ATTRIB__RB_ALIGNED_MASK 0x40000000L
19755#define CB_COLOR2_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
19756//CB_COLOR2_DCC_CONTROL
19757#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
19758#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
19759#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
19760#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
19761#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
19762#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
19763#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
19764#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
19765#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
19766#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
19767#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
19768#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
19769#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
19770#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
19771#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
19772#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
19773#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
19774#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
19775//CB_COLOR2_CMASK
19776#define CB_COLOR2_CMASK__BASE_256B__SHIFT 0x0
19777#define CB_COLOR2_CMASK__BASE_256B_MASK 0xFFFFFFFFL
19778//CB_COLOR2_CMASK_BASE_EXT
19779#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19780#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19781//CB_COLOR2_FMASK
19782#define CB_COLOR2_FMASK__BASE_256B__SHIFT 0x0
19783#define CB_COLOR2_FMASK__BASE_256B_MASK 0xFFFFFFFFL
19784//CB_COLOR2_FMASK_BASE_EXT
19785#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19786#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19787//CB_COLOR2_CLEAR_WORD0
19788#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
19789#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
19790//CB_COLOR2_CLEAR_WORD1
19791#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
19792#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
19793//CB_COLOR2_DCC_BASE
19794#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
19795#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
19796//CB_COLOR2_DCC_BASE_EXT
19797#define CB_COLOR2_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
19798#define CB_COLOR2_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
19799//CB_COLOR3_BASE
19800#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
19801#define CB_COLOR3_BASE__BASE_256B_MASK 0xFFFFFFFFL
19802//CB_COLOR3_BASE_EXT
19803#define CB_COLOR3_BASE_EXT__BASE_256B__SHIFT 0x0
19804#define CB_COLOR3_BASE_EXT__BASE_256B_MASK 0x000000FFL
19805//CB_COLOR3_ATTRIB2
19806#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
19807#define CB_COLOR3_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
19808#define CB_COLOR3_ATTRIB2__MAX_MIP__SHIFT 0x1c
19809#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
19810#define CB_COLOR3_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
19811#define CB_COLOR3_ATTRIB2__MAX_MIP_MASK 0xF0000000L
19812//CB_COLOR3_VIEW
19813#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
19814#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
19815#define CB_COLOR3_VIEW__MIP_LEVEL__SHIFT 0x18
19816#define CB_COLOR3_VIEW__SLICE_START_MASK 0x000007FFL
19817#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x00FFE000L
19818#define CB_COLOR3_VIEW__MIP_LEVEL_MASK 0x0F000000L
19819//CB_COLOR3_INFO
19820#define CB_COLOR3_INFO__ENDIAN__SHIFT 0x0
19821#define CB_COLOR3_INFO__FORMAT__SHIFT 0x2
19822#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
19823#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
19824#define CB_COLOR3_INFO__FAST_CLEAR__SHIFT 0xd
19825#define CB_COLOR3_INFO__COMPRESSION__SHIFT 0xe
19826#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
19827#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
19828#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
19829#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
19830#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
19831#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
19832#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
19833#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
19834#define CB_COLOR3_INFO__DCC_ENABLE__SHIFT 0x1c
19835#define CB_COLOR3_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
19836#define CB_COLOR3_INFO__ENDIAN_MASK 0x00000003L
19837#define CB_COLOR3_INFO__FORMAT_MASK 0x0000007CL
19838#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
19839#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
19840#define CB_COLOR3_INFO__FAST_CLEAR_MASK 0x00002000L
19841#define CB_COLOR3_INFO__COMPRESSION_MASK 0x00004000L
19842#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
19843#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
19844#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
19845#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
19846#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
19847#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
19848#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
19849#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
19850#define CB_COLOR3_INFO__DCC_ENABLE_MASK 0x10000000L
19851#define CB_COLOR3_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
19852//CB_COLOR3_ATTRIB
19853#define CB_COLOR3_ATTRIB__MIP0_DEPTH__SHIFT 0x0
19854#define CB_COLOR3_ATTRIB__META_LINEAR__SHIFT 0xb
19855#define CB_COLOR3_ATTRIB__NUM_SAMPLES__SHIFT 0xc
19856#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
19857#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
19858#define CB_COLOR3_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
19859#define CB_COLOR3_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
19860#define CB_COLOR3_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
19861#define CB_COLOR3_ATTRIB__RB_ALIGNED__SHIFT 0x1e
19862#define CB_COLOR3_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
19863#define CB_COLOR3_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
19864#define CB_COLOR3_ATTRIB__META_LINEAR_MASK 0x00000800L
19865#define CB_COLOR3_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
19866#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
19867#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
19868#define CB_COLOR3_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
19869#define CB_COLOR3_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
19870#define CB_COLOR3_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
19871#define CB_COLOR3_ATTRIB__RB_ALIGNED_MASK 0x40000000L
19872#define CB_COLOR3_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
19873//CB_COLOR3_DCC_CONTROL
19874#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
19875#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
19876#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
19877#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
19878#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
19879#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
19880#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
19881#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
19882#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
19883#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
19884#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
19885#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
19886#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
19887#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
19888#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
19889#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
19890#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
19891#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
19892//CB_COLOR3_CMASK
19893#define CB_COLOR3_CMASK__BASE_256B__SHIFT 0x0
19894#define CB_COLOR3_CMASK__BASE_256B_MASK 0xFFFFFFFFL
19895//CB_COLOR3_CMASK_BASE_EXT
19896#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19897#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19898//CB_COLOR3_FMASK
19899#define CB_COLOR3_FMASK__BASE_256B__SHIFT 0x0
19900#define CB_COLOR3_FMASK__BASE_256B_MASK 0xFFFFFFFFL
19901//CB_COLOR3_FMASK_BASE_EXT
19902#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
19903#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
19904//CB_COLOR3_CLEAR_WORD0
19905#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
19906#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
19907//CB_COLOR3_CLEAR_WORD1
19908#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
19909#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
19910//CB_COLOR3_DCC_BASE
19911#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
19912#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
19913//CB_COLOR3_DCC_BASE_EXT
19914#define CB_COLOR3_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
19915#define CB_COLOR3_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
19916//CB_COLOR4_BASE
19917#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
19918#define CB_COLOR4_BASE__BASE_256B_MASK 0xFFFFFFFFL
19919//CB_COLOR4_BASE_EXT
19920#define CB_COLOR4_BASE_EXT__BASE_256B__SHIFT 0x0
19921#define CB_COLOR4_BASE_EXT__BASE_256B_MASK 0x000000FFL
19922//CB_COLOR4_ATTRIB2
19923#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
19924#define CB_COLOR4_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
19925#define CB_COLOR4_ATTRIB2__MAX_MIP__SHIFT 0x1c
19926#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
19927#define CB_COLOR4_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
19928#define CB_COLOR4_ATTRIB2__MAX_MIP_MASK 0xF0000000L
19929//CB_COLOR4_VIEW
19930#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
19931#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
19932#define CB_COLOR4_VIEW__MIP_LEVEL__SHIFT 0x18
19933#define CB_COLOR4_VIEW__SLICE_START_MASK 0x000007FFL
19934#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x00FFE000L
19935#define CB_COLOR4_VIEW__MIP_LEVEL_MASK 0x0F000000L
19936//CB_COLOR4_INFO
19937#define CB_COLOR4_INFO__ENDIAN__SHIFT 0x0
19938#define CB_COLOR4_INFO__FORMAT__SHIFT 0x2
19939#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
19940#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
19941#define CB_COLOR4_INFO__FAST_CLEAR__SHIFT 0xd
19942#define CB_COLOR4_INFO__COMPRESSION__SHIFT 0xe
19943#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
19944#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
19945#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
19946#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
19947#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
19948#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
19949#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
19950#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
19951#define CB_COLOR4_INFO__DCC_ENABLE__SHIFT 0x1c
19952#define CB_COLOR4_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
19953#define CB_COLOR4_INFO__ENDIAN_MASK 0x00000003L
19954#define CB_COLOR4_INFO__FORMAT_MASK 0x0000007CL
19955#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
19956#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
19957#define CB_COLOR4_INFO__FAST_CLEAR_MASK 0x00002000L
19958#define CB_COLOR4_INFO__COMPRESSION_MASK 0x00004000L
19959#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
19960#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
19961#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
19962#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
19963#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
19964#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
19965#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
19966#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
19967#define CB_COLOR4_INFO__DCC_ENABLE_MASK 0x10000000L
19968#define CB_COLOR4_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
19969//CB_COLOR4_ATTRIB
19970#define CB_COLOR4_ATTRIB__MIP0_DEPTH__SHIFT 0x0
19971#define CB_COLOR4_ATTRIB__META_LINEAR__SHIFT 0xb
19972#define CB_COLOR4_ATTRIB__NUM_SAMPLES__SHIFT 0xc
19973#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
19974#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
19975#define CB_COLOR4_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
19976#define CB_COLOR4_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
19977#define CB_COLOR4_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
19978#define CB_COLOR4_ATTRIB__RB_ALIGNED__SHIFT 0x1e
19979#define CB_COLOR4_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
19980#define CB_COLOR4_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
19981#define CB_COLOR4_ATTRIB__META_LINEAR_MASK 0x00000800L
19982#define CB_COLOR4_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
19983#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
19984#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
19985#define CB_COLOR4_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
19986#define CB_COLOR4_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
19987#define CB_COLOR4_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
19988#define CB_COLOR4_ATTRIB__RB_ALIGNED_MASK 0x40000000L
19989#define CB_COLOR4_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
19990//CB_COLOR4_DCC_CONTROL
19991#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
19992#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
19993#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
19994#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
19995#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
19996#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
19997#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
19998#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
19999#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
20000#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
20001#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
20002#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
20003#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
20004#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
20005#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
20006#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
20007#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
20008#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
20009//CB_COLOR4_CMASK
20010#define CB_COLOR4_CMASK__BASE_256B__SHIFT 0x0
20011#define CB_COLOR4_CMASK__BASE_256B_MASK 0xFFFFFFFFL
20012//CB_COLOR4_CMASK_BASE_EXT
20013#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20014#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20015//CB_COLOR4_FMASK
20016#define CB_COLOR4_FMASK__BASE_256B__SHIFT 0x0
20017#define CB_COLOR4_FMASK__BASE_256B_MASK 0xFFFFFFFFL
20018//CB_COLOR4_FMASK_BASE_EXT
20019#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20020#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20021//CB_COLOR4_CLEAR_WORD0
20022#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
20023#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
20024//CB_COLOR4_CLEAR_WORD1
20025#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
20026#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
20027//CB_COLOR4_DCC_BASE
20028#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
20029#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
20030//CB_COLOR4_DCC_BASE_EXT
20031#define CB_COLOR4_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
20032#define CB_COLOR4_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
20033//CB_COLOR5_BASE
20034#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
20035#define CB_COLOR5_BASE__BASE_256B_MASK 0xFFFFFFFFL
20036//CB_COLOR5_BASE_EXT
20037#define CB_COLOR5_BASE_EXT__BASE_256B__SHIFT 0x0
20038#define CB_COLOR5_BASE_EXT__BASE_256B_MASK 0x000000FFL
20039//CB_COLOR5_ATTRIB2
20040#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
20041#define CB_COLOR5_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
20042#define CB_COLOR5_ATTRIB2__MAX_MIP__SHIFT 0x1c
20043#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
20044#define CB_COLOR5_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
20045#define CB_COLOR5_ATTRIB2__MAX_MIP_MASK 0xF0000000L
20046//CB_COLOR5_VIEW
20047#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
20048#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
20049#define CB_COLOR5_VIEW__MIP_LEVEL__SHIFT 0x18
20050#define CB_COLOR5_VIEW__SLICE_START_MASK 0x000007FFL
20051#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x00FFE000L
20052#define CB_COLOR5_VIEW__MIP_LEVEL_MASK 0x0F000000L
20053//CB_COLOR5_INFO
20054#define CB_COLOR5_INFO__ENDIAN__SHIFT 0x0
20055#define CB_COLOR5_INFO__FORMAT__SHIFT 0x2
20056#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
20057#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
20058#define CB_COLOR5_INFO__FAST_CLEAR__SHIFT 0xd
20059#define CB_COLOR5_INFO__COMPRESSION__SHIFT 0xe
20060#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
20061#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
20062#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
20063#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
20064#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
20065#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
20066#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
20067#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
20068#define CB_COLOR5_INFO__DCC_ENABLE__SHIFT 0x1c
20069#define CB_COLOR5_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
20070#define CB_COLOR5_INFO__ENDIAN_MASK 0x00000003L
20071#define CB_COLOR5_INFO__FORMAT_MASK 0x0000007CL
20072#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
20073#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
20074#define CB_COLOR5_INFO__FAST_CLEAR_MASK 0x00002000L
20075#define CB_COLOR5_INFO__COMPRESSION_MASK 0x00004000L
20076#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
20077#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
20078#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
20079#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
20080#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
20081#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
20082#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
20083#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
20084#define CB_COLOR5_INFO__DCC_ENABLE_MASK 0x10000000L
20085#define CB_COLOR5_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
20086//CB_COLOR5_ATTRIB
20087#define CB_COLOR5_ATTRIB__MIP0_DEPTH__SHIFT 0x0
20088#define CB_COLOR5_ATTRIB__META_LINEAR__SHIFT 0xb
20089#define CB_COLOR5_ATTRIB__NUM_SAMPLES__SHIFT 0xc
20090#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
20091#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
20092#define CB_COLOR5_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
20093#define CB_COLOR5_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
20094#define CB_COLOR5_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
20095#define CB_COLOR5_ATTRIB__RB_ALIGNED__SHIFT 0x1e
20096#define CB_COLOR5_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
20097#define CB_COLOR5_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
20098#define CB_COLOR5_ATTRIB__META_LINEAR_MASK 0x00000800L
20099#define CB_COLOR5_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
20100#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
20101#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
20102#define CB_COLOR5_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
20103#define CB_COLOR5_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
20104#define CB_COLOR5_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
20105#define CB_COLOR5_ATTRIB__RB_ALIGNED_MASK 0x40000000L
20106#define CB_COLOR5_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
20107//CB_COLOR5_DCC_CONTROL
20108#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
20109#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
20110#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
20111#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
20112#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
20113#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
20114#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
20115#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
20116#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
20117#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
20118#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
20119#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
20120#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
20121#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
20122#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
20123#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
20124#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
20125#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
20126//CB_COLOR5_CMASK
20127#define CB_COLOR5_CMASK__BASE_256B__SHIFT 0x0
20128#define CB_COLOR5_CMASK__BASE_256B_MASK 0xFFFFFFFFL
20129//CB_COLOR5_CMASK_BASE_EXT
20130#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20131#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20132//CB_COLOR5_FMASK
20133#define CB_COLOR5_FMASK__BASE_256B__SHIFT 0x0
20134#define CB_COLOR5_FMASK__BASE_256B_MASK 0xFFFFFFFFL
20135//CB_COLOR5_FMASK_BASE_EXT
20136#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20137#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20138//CB_COLOR5_CLEAR_WORD0
20139#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
20140#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
20141//CB_COLOR5_CLEAR_WORD1
20142#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
20143#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
20144//CB_COLOR5_DCC_BASE
20145#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
20146#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
20147//CB_COLOR5_DCC_BASE_EXT
20148#define CB_COLOR5_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
20149#define CB_COLOR5_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
20150//CB_COLOR6_BASE
20151#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
20152#define CB_COLOR6_BASE__BASE_256B_MASK 0xFFFFFFFFL
20153//CB_COLOR6_BASE_EXT
20154#define CB_COLOR6_BASE_EXT__BASE_256B__SHIFT 0x0
20155#define CB_COLOR6_BASE_EXT__BASE_256B_MASK 0x000000FFL
20156//CB_COLOR6_ATTRIB2
20157#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
20158#define CB_COLOR6_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
20159#define CB_COLOR6_ATTRIB2__MAX_MIP__SHIFT 0x1c
20160#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
20161#define CB_COLOR6_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
20162#define CB_COLOR6_ATTRIB2__MAX_MIP_MASK 0xF0000000L
20163//CB_COLOR6_VIEW
20164#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
20165#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
20166#define CB_COLOR6_VIEW__MIP_LEVEL__SHIFT 0x18
20167#define CB_COLOR6_VIEW__SLICE_START_MASK 0x000007FFL
20168#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x00FFE000L
20169#define CB_COLOR6_VIEW__MIP_LEVEL_MASK 0x0F000000L
20170//CB_COLOR6_INFO
20171#define CB_COLOR6_INFO__ENDIAN__SHIFT 0x0
20172#define CB_COLOR6_INFO__FORMAT__SHIFT 0x2
20173#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
20174#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
20175#define CB_COLOR6_INFO__FAST_CLEAR__SHIFT 0xd
20176#define CB_COLOR6_INFO__COMPRESSION__SHIFT 0xe
20177#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
20178#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
20179#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
20180#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
20181#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
20182#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
20183#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
20184#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
20185#define CB_COLOR6_INFO__DCC_ENABLE__SHIFT 0x1c
20186#define CB_COLOR6_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
20187#define CB_COLOR6_INFO__ENDIAN_MASK 0x00000003L
20188#define CB_COLOR6_INFO__FORMAT_MASK 0x0000007CL
20189#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
20190#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
20191#define CB_COLOR6_INFO__FAST_CLEAR_MASK 0x00002000L
20192#define CB_COLOR6_INFO__COMPRESSION_MASK 0x00004000L
20193#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
20194#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
20195#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
20196#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
20197#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
20198#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
20199#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
20200#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
20201#define CB_COLOR6_INFO__DCC_ENABLE_MASK 0x10000000L
20202#define CB_COLOR6_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
20203//CB_COLOR6_ATTRIB
20204#define CB_COLOR6_ATTRIB__MIP0_DEPTH__SHIFT 0x0
20205#define CB_COLOR6_ATTRIB__META_LINEAR__SHIFT 0xb
20206#define CB_COLOR6_ATTRIB__NUM_SAMPLES__SHIFT 0xc
20207#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
20208#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
20209#define CB_COLOR6_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
20210#define CB_COLOR6_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
20211#define CB_COLOR6_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
20212#define CB_COLOR6_ATTRIB__RB_ALIGNED__SHIFT 0x1e
20213#define CB_COLOR6_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
20214#define CB_COLOR6_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
20215#define CB_COLOR6_ATTRIB__META_LINEAR_MASK 0x00000800L
20216#define CB_COLOR6_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
20217#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
20218#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
20219#define CB_COLOR6_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
20220#define CB_COLOR6_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
20221#define CB_COLOR6_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
20222#define CB_COLOR6_ATTRIB__RB_ALIGNED_MASK 0x40000000L
20223#define CB_COLOR6_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
20224//CB_COLOR6_DCC_CONTROL
20225#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
20226#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
20227#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
20228#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
20229#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
20230#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
20231#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
20232#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
20233#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
20234#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
20235#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
20236#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
20237#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
20238#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
20239#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
20240#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
20241#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
20242#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
20243//CB_COLOR6_CMASK
20244#define CB_COLOR6_CMASK__BASE_256B__SHIFT 0x0
20245#define CB_COLOR6_CMASK__BASE_256B_MASK 0xFFFFFFFFL
20246//CB_COLOR6_CMASK_BASE_EXT
20247#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20248#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20249//CB_COLOR6_FMASK
20250#define CB_COLOR6_FMASK__BASE_256B__SHIFT 0x0
20251#define CB_COLOR6_FMASK__BASE_256B_MASK 0xFFFFFFFFL
20252//CB_COLOR6_FMASK_BASE_EXT
20253#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20254#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20255//CB_COLOR6_CLEAR_WORD0
20256#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
20257#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
20258//CB_COLOR6_CLEAR_WORD1
20259#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
20260#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
20261//CB_COLOR6_DCC_BASE
20262#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
20263#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
20264//CB_COLOR6_DCC_BASE_EXT
20265#define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
20266#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
20267//CB_COLOR7_BASE
20268#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
20269#define CB_COLOR7_BASE__BASE_256B_MASK 0xFFFFFFFFL
20270//CB_COLOR7_BASE_EXT
20271#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT 0x0
20272#define CB_COLOR7_BASE_EXT__BASE_256B_MASK 0x000000FFL
20273//CB_COLOR7_ATTRIB2
20274#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
20275#define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
20276#define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT 0x1c
20277#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
20278#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
20279#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK 0xF0000000L
20280//CB_COLOR7_VIEW
20281#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
20282#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
20283#define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT 0x18
20284#define CB_COLOR7_VIEW__SLICE_START_MASK 0x000007FFL
20285#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x00FFE000L
20286#define CB_COLOR7_VIEW__MIP_LEVEL_MASK 0x0F000000L
20287//CB_COLOR7_INFO
20288#define CB_COLOR7_INFO__ENDIAN__SHIFT 0x0
20289#define CB_COLOR7_INFO__FORMAT__SHIFT 0x2
20290#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
20291#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
20292#define CB_COLOR7_INFO__FAST_CLEAR__SHIFT 0xd
20293#define CB_COLOR7_INFO__COMPRESSION__SHIFT 0xe
20294#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
20295#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
20296#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
20297#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
20298#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
20299#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
20300#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
20301#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
20302#define CB_COLOR7_INFO__DCC_ENABLE__SHIFT 0x1c
20303#define CB_COLOR7_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
20304#define CB_COLOR7_INFO__ENDIAN_MASK 0x00000003L
20305#define CB_COLOR7_INFO__FORMAT_MASK 0x0000007CL
20306#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
20307#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
20308#define CB_COLOR7_INFO__FAST_CLEAR_MASK 0x00002000L
20309#define CB_COLOR7_INFO__COMPRESSION_MASK 0x00004000L
20310#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
20311#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
20312#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
20313#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
20314#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
20315#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
20316#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
20317#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
20318#define CB_COLOR7_INFO__DCC_ENABLE_MASK 0x10000000L
20319#define CB_COLOR7_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
20320//CB_COLOR7_ATTRIB
20321#define CB_COLOR7_ATTRIB__MIP0_DEPTH__SHIFT 0x0
20322#define CB_COLOR7_ATTRIB__META_LINEAR__SHIFT 0xb
20323#define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT 0xc
20324#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
20325#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
20326#define CB_COLOR7_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
20327#define CB_COLOR7_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
20328#define CB_COLOR7_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
20329#define CB_COLOR7_ATTRIB__RB_ALIGNED__SHIFT 0x1e
20330#define CB_COLOR7_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
20331#define CB_COLOR7_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
20332#define CB_COLOR7_ATTRIB__META_LINEAR_MASK 0x00000800L
20333#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
20334#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
20335#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
20336#define CB_COLOR7_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
20337#define CB_COLOR7_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
20338#define CB_COLOR7_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
20339#define CB_COLOR7_ATTRIB__RB_ALIGNED_MASK 0x40000000L
20340#define CB_COLOR7_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
20341//CB_COLOR7_DCC_CONTROL
20342#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
20343#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
20344#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
20345#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
20346#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
20347#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
20348#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
20349#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
20350#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
20351#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
20352#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
20353#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
20354#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
20355#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
20356#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
20357#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
20358#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
20359#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
20360//CB_COLOR7_CMASK
20361#define CB_COLOR7_CMASK__BASE_256B__SHIFT 0x0
20362#define CB_COLOR7_CMASK__BASE_256B_MASK 0xFFFFFFFFL
20363//CB_COLOR7_CMASK_BASE_EXT
20364#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20365#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20366//CB_COLOR7_FMASK
20367#define CB_COLOR7_FMASK__BASE_256B__SHIFT 0x0
20368#define CB_COLOR7_FMASK__BASE_256B_MASK 0xFFFFFFFFL
20369//CB_COLOR7_FMASK_BASE_EXT
20370#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
20371#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
20372//CB_COLOR7_CLEAR_WORD0
20373#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
20374#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
20375//CB_COLOR7_CLEAR_WORD1
20376#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
20377#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
20378//CB_COLOR7_DCC_BASE
20379#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
20380#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
20381//CB_COLOR7_DCC_BASE_EXT
20382#define CB_COLOR7_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
20383#define CB_COLOR7_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
20384
20385
20386// addressBlock: gc_gfxudec
20387//CP_EOP_DONE_ADDR_LO
20388#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
20389#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
20390//CP_EOP_DONE_ADDR_HI
20391#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
20392#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
20393//CP_EOP_DONE_DATA_LO
20394#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
20395#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
20396//CP_EOP_DONE_DATA_HI
20397#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
20398#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
20399//CP_EOP_LAST_FENCE_LO
20400#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
20401#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xFFFFFFFFL
20402//CP_EOP_LAST_FENCE_HI
20403#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
20404#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xFFFFFFFFL
20405//CP_STREAM_OUT_ADDR_LO
20406#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO__SHIFT 0x2
20407#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO_MASK 0xFFFFFFFCL
20408//CP_STREAM_OUT_ADDR_HI
20409#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI__SHIFT 0x0
20410#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI_MASK 0x0000FFFFL
20411//CP_NUM_PRIM_WRITTEN_COUNT0_LO
20412#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO__SHIFT 0x0
20413#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO_MASK 0xFFFFFFFFL
20414//CP_NUM_PRIM_WRITTEN_COUNT0_HI
20415#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI__SHIFT 0x0
20416#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI_MASK 0xFFFFFFFFL
20417//CP_NUM_PRIM_NEEDED_COUNT0_LO
20418#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO__SHIFT 0x0
20419#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO_MASK 0xFFFFFFFFL
20420//CP_NUM_PRIM_NEEDED_COUNT0_HI
20421#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI__SHIFT 0x0
20422#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI_MASK 0xFFFFFFFFL
20423//CP_NUM_PRIM_WRITTEN_COUNT1_LO
20424#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO__SHIFT 0x0
20425#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO_MASK 0xFFFFFFFFL
20426//CP_NUM_PRIM_WRITTEN_COUNT1_HI
20427#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI__SHIFT 0x0
20428#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI_MASK 0xFFFFFFFFL
20429//CP_NUM_PRIM_NEEDED_COUNT1_LO
20430#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO__SHIFT 0x0
20431#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO_MASK 0xFFFFFFFFL
20432//CP_NUM_PRIM_NEEDED_COUNT1_HI
20433#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI__SHIFT 0x0
20434#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI_MASK 0xFFFFFFFFL
20435//CP_NUM_PRIM_WRITTEN_COUNT2_LO
20436#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO__SHIFT 0x0
20437#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO_MASK 0xFFFFFFFFL
20438//CP_NUM_PRIM_WRITTEN_COUNT2_HI
20439#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI__SHIFT 0x0
20440#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI_MASK 0xFFFFFFFFL
20441//CP_NUM_PRIM_NEEDED_COUNT2_LO
20442#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO__SHIFT 0x0
20443#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO_MASK 0xFFFFFFFFL
20444//CP_NUM_PRIM_NEEDED_COUNT2_HI
20445#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI__SHIFT 0x0
20446#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI_MASK 0xFFFFFFFFL
20447//CP_NUM_PRIM_WRITTEN_COUNT3_LO
20448#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO__SHIFT 0x0
20449#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO_MASK 0xFFFFFFFFL
20450//CP_NUM_PRIM_WRITTEN_COUNT3_HI
20451#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI__SHIFT 0x0
20452#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI_MASK 0xFFFFFFFFL
20453//CP_NUM_PRIM_NEEDED_COUNT3_LO
20454#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO__SHIFT 0x0
20455#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO_MASK 0xFFFFFFFFL
20456//CP_NUM_PRIM_NEEDED_COUNT3_HI
20457#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI__SHIFT 0x0
20458#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI_MASK 0xFFFFFFFFL
20459//CP_PIPE_STATS_ADDR_LO
20460#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
20461#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xFFFFFFFCL
20462//CP_PIPE_STATS_ADDR_HI
20463#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
20464#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0x0000FFFFL
20465//CP_VGT_IAVERT_COUNT_LO
20466#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
20467#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xFFFFFFFFL
20468//CP_VGT_IAVERT_COUNT_HI
20469#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
20470#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xFFFFFFFFL
20471//CP_VGT_IAPRIM_COUNT_LO
20472#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
20473#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xFFFFFFFFL
20474//CP_VGT_IAPRIM_COUNT_HI
20475#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
20476#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xFFFFFFFFL
20477//CP_VGT_GSPRIM_COUNT_LO
20478#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
20479#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
20480//CP_VGT_GSPRIM_COUNT_HI
20481#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
20482#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
20483//CP_VGT_VSINVOC_COUNT_LO
20484#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
20485#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
20486//CP_VGT_VSINVOC_COUNT_HI
20487#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
20488#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
20489//CP_VGT_GSINVOC_COUNT_LO
20490#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
20491#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
20492//CP_VGT_GSINVOC_COUNT_HI
20493#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
20494#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
20495//CP_VGT_HSINVOC_COUNT_LO
20496#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
20497#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
20498//CP_VGT_HSINVOC_COUNT_HI
20499#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
20500#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
20501//CP_VGT_DSINVOC_COUNT_LO
20502#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
20503#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
20504//CP_VGT_DSINVOC_COUNT_HI
20505#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
20506#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
20507//CP_PA_CINVOC_COUNT_LO
20508#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
20509#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xFFFFFFFFL
20510//CP_PA_CINVOC_COUNT_HI
20511#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
20512#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xFFFFFFFFL
20513//CP_PA_CPRIM_COUNT_LO
20514#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
20515#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xFFFFFFFFL
20516//CP_PA_CPRIM_COUNT_HI
20517#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
20518#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xFFFFFFFFL
20519//CP_SC_PSINVOC_COUNT0_LO
20520#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
20521#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xFFFFFFFFL
20522//CP_SC_PSINVOC_COUNT0_HI
20523#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
20524#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xFFFFFFFFL
20525//CP_SC_PSINVOC_COUNT1_LO
20526#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
20527#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xFFFFFFFFL
20528//CP_SC_PSINVOC_COUNT1_HI
20529#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
20530#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xFFFFFFFFL
20531//CP_VGT_CSINVOC_COUNT_LO
20532#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
20533#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
20534//CP_VGT_CSINVOC_COUNT_HI
20535#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
20536#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
20537//CP_PIPE_STATS_CONTROL
20538#define CP_PIPE_STATS_CONTROL__CACHE_POLICY__SHIFT 0x19
20539#define CP_PIPE_STATS_CONTROL__CACHE_POLICY_MASK 0x02000000L
20540//CP_STREAM_OUT_CONTROL
20541#define CP_STREAM_OUT_CONTROL__CACHE_POLICY__SHIFT 0x19
20542#define CP_STREAM_OUT_CONTROL__CACHE_POLICY_MASK 0x02000000L
20543//CP_STRMOUT_CNTL
20544#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE__SHIFT 0x0
20545#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE_MASK 0x00000001L
20546//SCRATCH_REG0
20547#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
20548#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
20549//SCRATCH_REG1
20550#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
20551#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
20552//SCRATCH_REG2
20553#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
20554#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
20555//SCRATCH_REG3
20556#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
20557#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
20558//SCRATCH_REG4
20559#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
20560#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
20561//SCRATCH_REG5
20562#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
20563#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
20564//SCRATCH_REG6
20565#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
20566#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
20567//SCRATCH_REG7
20568#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
20569#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
20570//CP_APPEND_DATA_HI
20571#define CP_APPEND_DATA_HI__DATA__SHIFT 0x0
20572#define CP_APPEND_DATA_HI__DATA_MASK 0xFFFFFFFFL
20573//CP_APPEND_LAST_CS_FENCE_HI
20574#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE__SHIFT 0x0
20575#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
20576//CP_APPEND_LAST_PS_FENCE_HI
20577#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE__SHIFT 0x0
20578#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
20579//SCRATCH_UMSK
20580#define SCRATCH_UMSK__OBSOLETE_UMSK__SHIFT 0x0
20581#define SCRATCH_UMSK__OBSOLETE_SWAP__SHIFT 0x10
20582#define SCRATCH_UMSK__OBSOLETE_UMSK_MASK 0x000000FFL
20583#define SCRATCH_UMSK__OBSOLETE_SWAP_MASK 0x00030000L
20584//SCRATCH_ADDR
20585#define SCRATCH_ADDR__OBSOLETE_ADDR__SHIFT 0x0
20586#define SCRATCH_ADDR__OBSOLETE_ADDR_MASK 0xFFFFFFFFL
20587//CP_PFP_ATOMIC_PREOP_LO
20588#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
20589#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
20590//CP_PFP_ATOMIC_PREOP_HI
20591#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
20592#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
20593//CP_PFP_GDS_ATOMIC0_PREOP_LO
20594#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
20595#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
20596//CP_PFP_GDS_ATOMIC0_PREOP_HI
20597#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
20598#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
20599//CP_PFP_GDS_ATOMIC1_PREOP_LO
20600#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
20601#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
20602//CP_PFP_GDS_ATOMIC1_PREOP_HI
20603#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
20604#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
20605//CP_APPEND_ADDR_LO
20606#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
20607#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xFFFFFFFCL
20608//CP_APPEND_ADDR_HI
20609#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
20610#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
20611#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
20612#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
20613#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x0000FFFFL
20614#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00010000L
20615#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x02000000L
20616#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xE0000000L
20617//CP_APPEND_DATA_LO
20618#define CP_APPEND_DATA_LO__DATA__SHIFT 0x0
20619#define CP_APPEND_DATA_LO__DATA_MASK 0xFFFFFFFFL
20620//CP_APPEND_LAST_CS_FENCE_LO
20621#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE__SHIFT 0x0
20622#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
20623//CP_APPEND_LAST_PS_FENCE_LO
20624#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE__SHIFT 0x0
20625#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
20626//CP_ATOMIC_PREOP_LO
20627#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
20628#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
20629//CP_ME_ATOMIC_PREOP_LO
20630#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
20631#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
20632//CP_ATOMIC_PREOP_HI
20633#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
20634#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
20635//CP_ME_ATOMIC_PREOP_HI
20636#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
20637#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
20638//CP_GDS_ATOMIC0_PREOP_LO
20639#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
20640#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
20641//CP_ME_GDS_ATOMIC0_PREOP_LO
20642#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
20643#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
20644//CP_GDS_ATOMIC0_PREOP_HI
20645#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
20646#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
20647//CP_ME_GDS_ATOMIC0_PREOP_HI
20648#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
20649#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
20650//CP_GDS_ATOMIC1_PREOP_LO
20651#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
20652#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
20653//CP_ME_GDS_ATOMIC1_PREOP_LO
20654#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
20655#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
20656//CP_GDS_ATOMIC1_PREOP_HI
20657#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
20658#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
20659//CP_ME_GDS_ATOMIC1_PREOP_HI
20660#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
20661#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
20662//CP_ME_MC_WADDR_LO
20663#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
20664#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xFFFFFFFCL
20665//CP_ME_MC_WADDR_HI
20666#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
20667#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
20668#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x0000FFFFL
20669#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x00400000L
20670//CP_ME_MC_WDATA_LO
20671#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
20672#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xFFFFFFFFL
20673//CP_ME_MC_WDATA_HI
20674#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
20675#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xFFFFFFFFL
20676//CP_ME_MC_RADDR_LO
20677#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
20678#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xFFFFFFFCL
20679//CP_ME_MC_RADDR_HI
20680#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
20681#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
20682#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x0000FFFFL
20683#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x00400000L
20684//CP_SEM_WAIT_TIMER
20685#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
20686#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xFFFFFFFFL
20687//CP_SIG_SEM_ADDR_LO
20688#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
20689#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
20690#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
20691#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
20692//CP_SIG_SEM_ADDR_HI
20693#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
20694#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
20695#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
20696#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
20697#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
20698#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
20699#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
20700#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
20701#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
20702#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
20703//CP_WAIT_REG_MEM_TIMEOUT
20704#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
20705#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xFFFFFFFFL
20706//CP_WAIT_SEM_ADDR_LO
20707#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
20708#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
20709#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
20710#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
20711//CP_WAIT_SEM_ADDR_HI
20712#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
20713#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
20714#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
20715#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
20716#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
20717#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
20718#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
20719#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
20720#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
20721#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
20722//CP_DMA_PFP_CONTROL
20723#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
20724#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
20725#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
20726#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
20727#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
20728#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
20729#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
20730#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x00300000L
20731#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
20732#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000L
20733//CP_DMA_ME_CONTROL
20734#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
20735#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
20736#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
20737#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
20738#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
20739#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
20740#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
20741#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x00300000L
20742#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
20743#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000L
20744//CP_COHER_BASE_HI
20745#define CP_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
20746#define CP_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
20747//CP_COHER_START_DELAY
20748#define CP_COHER_START_DELAY__START_DELAY_COUNT__SHIFT 0x0
20749#define CP_COHER_START_DELAY__START_DELAY_COUNT_MASK 0x0000003FL
20750//CP_COHER_CNTL
20751#define CP_COHER_CNTL__TC_NC_ACTION_ENA__SHIFT 0x3
20752#define CP_COHER_CNTL__TC_WC_ACTION_ENA__SHIFT 0x4
20753#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA__SHIFT 0x5
20754#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA__SHIFT 0xf
20755#define CP_COHER_CNTL__TC_WB_ACTION_ENA__SHIFT 0x12
20756#define CP_COHER_CNTL__TCL1_ACTION_ENA__SHIFT 0x16
20757#define CP_COHER_CNTL__TC_ACTION_ENA__SHIFT 0x17
20758#define CP_COHER_CNTL__CB_ACTION_ENA__SHIFT 0x19
20759#define CP_COHER_CNTL__DB_ACTION_ENA__SHIFT 0x1a
20760#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA__SHIFT 0x1b
20761#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA__SHIFT 0x1c
20762#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA__SHIFT 0x1d
20763#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA__SHIFT 0x1e
20764#define CP_COHER_CNTL__TC_NC_ACTION_ENA_MASK 0x00000008L
20765#define CP_COHER_CNTL__TC_WC_ACTION_ENA_MASK 0x00000010L
20766#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA_MASK 0x00000020L
20767#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA_MASK 0x00008000L
20768#define CP_COHER_CNTL__TC_WB_ACTION_ENA_MASK 0x00040000L
20769#define CP_COHER_CNTL__TCL1_ACTION_ENA_MASK 0x00400000L
20770#define CP_COHER_CNTL__TC_ACTION_ENA_MASK 0x00800000L
20771#define CP_COHER_CNTL__CB_ACTION_ENA_MASK 0x02000000L
20772#define CP_COHER_CNTL__DB_ACTION_ENA_MASK 0x04000000L
20773#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA_MASK 0x08000000L
20774#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA_MASK 0x10000000L
20775#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA_MASK 0x20000000L
20776#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA_MASK 0x40000000L
20777//CP_COHER_SIZE
20778#define CP_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
20779#define CP_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
20780//CP_COHER_BASE
20781#define CP_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
20782#define CP_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
20783//CP_COHER_STATUS
20784#define CP_COHER_STATUS__MEID__SHIFT 0x18
20785#define CP_COHER_STATUS__STATUS__SHIFT 0x1f
20786#define CP_COHER_STATUS__MEID_MASK 0x03000000L
20787#define CP_COHER_STATUS__STATUS_MASK 0x80000000L
20788//CP_DMA_ME_SRC_ADDR
20789#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
20790#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
20791//CP_DMA_ME_SRC_ADDR_HI
20792#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
20793#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
20794//CP_DMA_ME_DST_ADDR
20795#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
20796#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
20797//CP_DMA_ME_DST_ADDR_HI
20798#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
20799#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
20800//CP_DMA_ME_COMMAND
20801#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
20802#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
20803#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
20804#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
20805#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
20806#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
20807#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x1f
20808#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
20809#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
20810#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
20811#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
20812#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
20813#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
20814#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x80000000L
20815//CP_DMA_PFP_SRC_ADDR
20816#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
20817#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
20818//CP_DMA_PFP_SRC_ADDR_HI
20819#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
20820#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
20821//CP_DMA_PFP_DST_ADDR
20822#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
20823#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
20824//CP_DMA_PFP_DST_ADDR_HI
20825#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
20826#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
20827//CP_DMA_PFP_COMMAND
20828#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
20829#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
20830#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
20831#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
20832#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
20833#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
20834#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x1f
20835#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
20836#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
20837#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
20838#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
20839#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
20840#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
20841#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x80000000L
20842//CP_DMA_CNTL
20843#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL__SHIFT 0x0
20844#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
20845#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
20846#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
20847#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
20848#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
20849#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL_MASK 0x00000001L
20850#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
20851#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x000F0000L
20852#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
20853#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
20854#define CP_DMA_CNTL__PIO_COUNT_MASK 0xC0000000L
20855//CP_DMA_READ_TAGS
20856#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
20857#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
20858#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03FFFFFFL
20859#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
20860//CP_COHER_SIZE_HI
20861#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
20862#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
20863//CP_PFP_IB_CONTROL
20864#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
20865#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x000000FFL
20866//CP_PFP_LOAD_CONTROL
20867#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
20868#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
20869#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
20870#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
20871#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
20872#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
20873#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
20874#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
20875//CP_SCRATCH_INDEX
20876#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
20877#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000000FFL
20878//CP_SCRATCH_DATA
20879#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
20880#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
20881//CP_RB_OFFSET
20882#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
20883#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
20884//CP_IB1_OFFSET
20885#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
20886#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
20887//CP_IB2_OFFSET
20888#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
20889#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
20890//CP_IB1_PREAMBLE_BEGIN
20891#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
20892#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000FFFFFL
20893//CP_IB1_PREAMBLE_END
20894#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
20895#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000FFFFFL
20896//CP_IB2_PREAMBLE_BEGIN
20897#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
20898#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000FFFFFL
20899//CP_IB2_PREAMBLE_END
20900#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
20901#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000FFFFFL
20902//CP_CE_IB1_OFFSET
20903#define CP_CE_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
20904#define CP_CE_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
20905//CP_CE_IB2_OFFSET
20906#define CP_CE_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
20907#define CP_CE_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
20908//CP_CE_COUNTER
20909#define CP_CE_COUNTER__CONST_ENGINE_COUNT__SHIFT 0x0
20910#define CP_CE_COUNTER__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
20911//CP_CE_RB_OFFSET
20912#define CP_CE_RB_OFFSET__RB_OFFSET__SHIFT 0x0
20913#define CP_CE_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
20914//CP_CE_INIT_CMD_BUFSZ
20915#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ__SHIFT 0x0
20916#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ_MASK 0x00000FFFL
20917//CP_CE_IB1_CMD_BUFSZ
20918#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
20919#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
20920//CP_CE_IB2_CMD_BUFSZ
20921#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
20922#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
20923//CP_IB1_CMD_BUFSZ
20924#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
20925#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
20926//CP_IB2_CMD_BUFSZ
20927#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
20928#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
20929//CP_ST_CMD_BUFSZ
20930#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ__SHIFT 0x0
20931#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ_MASK 0x000FFFFFL
20932//CP_CE_INIT_BASE_LO
20933#define CP_CE_INIT_BASE_LO__INIT_BASE_LO__SHIFT 0x5
20934#define CP_CE_INIT_BASE_LO__INIT_BASE_LO_MASK 0xFFFFFFE0L
20935//CP_CE_INIT_BASE_HI
20936#define CP_CE_INIT_BASE_HI__INIT_BASE_HI__SHIFT 0x0
20937#define CP_CE_INIT_BASE_HI__INIT_BASE_HI_MASK 0x0000FFFFL
20938//CP_CE_INIT_BUFSZ
20939#define CP_CE_INIT_BUFSZ__INIT_BUFSZ__SHIFT 0x0
20940#define CP_CE_INIT_BUFSZ__INIT_BUFSZ_MASK 0x00000FFFL
20941//CP_CE_IB1_BASE_LO
20942#define CP_CE_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
20943#define CP_CE_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
20944//CP_CE_IB1_BASE_HI
20945#define CP_CE_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
20946#define CP_CE_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
20947//CP_CE_IB1_BUFSZ
20948#define CP_CE_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
20949#define CP_CE_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
20950//CP_CE_IB2_BASE_LO
20951#define CP_CE_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
20952#define CP_CE_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
20953//CP_CE_IB2_BASE_HI
20954#define CP_CE_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
20955#define CP_CE_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
20956//CP_CE_IB2_BUFSZ
20957#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
20958#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
20959//CP_IB1_BASE_LO
20960#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
20961#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
20962//CP_IB1_BASE_HI
20963#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
20964#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
20965//CP_IB1_BUFSZ
20966#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
20967#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
20968//CP_IB2_BASE_LO
20969#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
20970#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
20971//CP_IB2_BASE_HI
20972#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
20973#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
20974//CP_IB2_BUFSZ
20975#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
20976#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
20977//CP_ST_BASE_LO
20978#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
20979#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xFFFFFFFCL
20980//CP_ST_BASE_HI
20981#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
20982#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x0000FFFFL
20983//CP_ST_BUFSZ
20984#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
20985#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000FFFFFL
20986//CP_EOP_DONE_EVENT_CNTL
20987#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP__SHIFT 0x0
20988#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA__SHIFT 0xc
20989#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY__SHIFT 0x19
20990#define CP_EOP_DONE_EVENT_CNTL__EXECUTE__SHIFT 0x1c
20991#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP_MASK 0x0000007FL
20992#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA_MASK 0x0003F000L
20993#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY_MASK 0x02000000L
20994#define CP_EOP_DONE_EVENT_CNTL__EXECUTE_MASK 0x10000000L
20995//CP_EOP_DONE_DATA_CNTL
20996#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
20997#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
20998#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
20999#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
21000#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
21001#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xE0000000L
21002//CP_EOP_DONE_CNTX_ID
21003#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
21004#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
21005//CP_PFP_COMPLETION_STATUS
21006#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
21007#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x00000003L
21008//CP_CE_COMPLETION_STATUS
21009#define CP_CE_COMPLETION_STATUS__STATUS__SHIFT 0x0
21010#define CP_CE_COMPLETION_STATUS__STATUS_MASK 0x00000003L
21011//CP_PRED_NOT_VISIBLE
21012#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
21013#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x00000001L
21014//CP_PFP_METADATA_BASE_ADDR
21015#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
21016#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
21017//CP_PFP_METADATA_BASE_ADDR_HI
21018#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
21019#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
21020//CP_CE_METADATA_BASE_ADDR
21021#define CP_CE_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
21022#define CP_CE_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
21023//CP_CE_METADATA_BASE_ADDR_HI
21024#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
21025#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
21026//CP_DRAW_INDX_INDR_ADDR
21027#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
21028#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
21029//CP_DRAW_INDX_INDR_ADDR_HI
21030#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
21031#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
21032//CP_DISPATCH_INDR_ADDR
21033#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
21034#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
21035//CP_DISPATCH_INDR_ADDR_HI
21036#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
21037#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
21038//CP_INDEX_BASE_ADDR
21039#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
21040#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
21041//CP_INDEX_BASE_ADDR_HI
21042#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
21043#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
21044//CP_INDEX_TYPE
21045#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
21046#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
21047//CP_GDS_BKUP_ADDR
21048#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
21049#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
21050//CP_GDS_BKUP_ADDR_HI
21051#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
21052#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
21053//CP_SAMPLE_STATUS
21054#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
21055#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
21056#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
21057#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
21058#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
21059#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
21060#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
21061#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
21062#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x00000001L
21063#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x00000002L
21064#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x00000004L
21065#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x00000008L
21066#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x00000010L
21067#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x00000020L
21068#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x00000040L
21069#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x00000080L
21070//CP_ME_COHER_CNTL
21071#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
21072#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
21073#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
21074#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
21075#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
21076#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
21077#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
21078#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
21079#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
21080#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
21081#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
21082#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
21083#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
21084#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
21085#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
21086#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
21087#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
21088#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
21089#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
21090#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
21091#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
21092#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
21093#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
21094#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
21095#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
21096#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
21097//CP_ME_COHER_SIZE
21098#define CP_ME_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
21099#define CP_ME_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
21100//CP_ME_COHER_SIZE_HI
21101#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
21102#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
21103//CP_ME_COHER_BASE
21104#define CP_ME_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
21105#define CP_ME_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
21106//CP_ME_COHER_BASE_HI
21107#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
21108#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
21109//CP_ME_COHER_STATUS
21110#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
21111#define CP_ME_COHER_STATUS__STATUS__SHIFT 0x1f
21112#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000FFL
21113#define CP_ME_COHER_STATUS__STATUS_MASK 0x80000000L
21114//RLC_GPM_PERF_COUNT_0
21115#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
21116#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
21117#define RLC_GPM_PERF_COUNT_0__SH_INDEX__SHIFT 0x8
21118#define RLC_GPM_PERF_COUNT_0__CU_INDEX__SHIFT 0xc
21119#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
21120#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
21121#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
21122#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
21123#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0x0000000FL
21124#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0x000000F0L
21125#define RLC_GPM_PERF_COUNT_0__SH_INDEX_MASK 0x00000F00L
21126#define RLC_GPM_PERF_COUNT_0__CU_INDEX_MASK 0x0000F000L
21127#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x00030000L
21128#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0x000C0000L
21129#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x00100000L
21130#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xFFE00000L
21131//RLC_GPM_PERF_COUNT_1
21132#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
21133#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
21134#define RLC_GPM_PERF_COUNT_1__SH_INDEX__SHIFT 0x8
21135#define RLC_GPM_PERF_COUNT_1__CU_INDEX__SHIFT 0xc
21136#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
21137#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
21138#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
21139#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
21140#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0x0000000FL
21141#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0x000000F0L
21142#define RLC_GPM_PERF_COUNT_1__SH_INDEX_MASK 0x00000F00L
21143#define RLC_GPM_PERF_COUNT_1__CU_INDEX_MASK 0x0000F000L
21144#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x00030000L
21145#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0x000C0000L
21146#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x00100000L
21147#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xFFE00000L
21148//GRBM_GFX_INDEX
21149#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
21150#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8
21151#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
21152#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d
21153#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
21154#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
21155#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
21156#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L
21157#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
21158#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
21159#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
21160#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
21161//VGT_GSVS_RING_SIZE
21162#define VGT_GSVS_RING_SIZE__MEM_SIZE__SHIFT 0x0
21163#define VGT_GSVS_RING_SIZE__MEM_SIZE_MASK 0xFFFFFFFFL
21164//VGT_PRIMITIVE_TYPE
21165#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
21166#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
21167//VGT_INDEX_TYPE
21168#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
21169#define VGT_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
21170#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
21171#define VGT_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
21172//VGT_STRMOUT_BUFFER_FILLED_SIZE_0
21173#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE__SHIFT 0x0
21174#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE_MASK 0xFFFFFFFFL
21175//VGT_STRMOUT_BUFFER_FILLED_SIZE_1
21176#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE__SHIFT 0x0
21177#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE_MASK 0xFFFFFFFFL
21178//VGT_STRMOUT_BUFFER_FILLED_SIZE_2
21179#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE__SHIFT 0x0
21180#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE_MASK 0xFFFFFFFFL
21181//VGT_STRMOUT_BUFFER_FILLED_SIZE_3
21182#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE__SHIFT 0x0
21183#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE_MASK 0xFFFFFFFFL
21184//VGT_MAX_VTX_INDX
21185#define VGT_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
21186#define VGT_MAX_VTX_INDX__MAX_INDX_MASK 0xFFFFFFFFL
21187//VGT_MIN_VTX_INDX
21188#define VGT_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
21189#define VGT_MIN_VTX_INDX__MIN_INDX_MASK 0xFFFFFFFFL
21190//VGT_INDX_OFFSET
21191#define VGT_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
21192#define VGT_INDX_OFFSET__INDX_OFFSET_MASK 0xFFFFFFFFL
21193//VGT_MULTI_PRIM_IB_RESET_EN
21194#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
21195#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS__SHIFT 0x1
21196#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
21197#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS_MASK 0x00000002L
21198//VGT_NUM_INDICES
21199#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
21200#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xFFFFFFFFL
21201//VGT_NUM_INSTANCES
21202#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
21203#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
21204//VGT_TF_RING_SIZE
21205#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
21206#define VGT_TF_RING_SIZE__SIZE_MASK 0x0000FFFFL
21207//VGT_HS_OFFCHIP_PARAM
21208#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
21209#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0x9
21210#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x000001FFL
21211#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000600L
21212//VGT_TF_MEMORY_BASE
21213#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
21214#define VGT_TF_MEMORY_BASE__BASE_MASK 0xFFFFFFFFL
21215//VGT_TF_MEMORY_BASE_HI
21216#define VGT_TF_MEMORY_BASE_HI__BASE_HI__SHIFT 0x0
21217#define VGT_TF_MEMORY_BASE_HI__BASE_HI_MASK 0x000000FFL
21218//WD_POS_BUF_BASE
21219#define WD_POS_BUF_BASE__BASE__SHIFT 0x0
21220#define WD_POS_BUF_BASE__BASE_MASK 0xFFFFFFFFL
21221//WD_POS_BUF_BASE_HI
21222#define WD_POS_BUF_BASE_HI__BASE_HI__SHIFT 0x0
21223#define WD_POS_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
21224//WD_CNTL_SB_BUF_BASE
21225#define WD_CNTL_SB_BUF_BASE__BASE__SHIFT 0x0
21226#define WD_CNTL_SB_BUF_BASE__BASE_MASK 0xFFFFFFFFL
21227//WD_CNTL_SB_BUF_BASE_HI
21228#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI__SHIFT 0x0
21229#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
21230//WD_INDEX_BUF_BASE
21231#define WD_INDEX_BUF_BASE__BASE__SHIFT 0x0
21232#define WD_INDEX_BUF_BASE__BASE_MASK 0xFFFFFFFFL
21233//WD_INDEX_BUF_BASE_HI
21234#define WD_INDEX_BUF_BASE_HI__BASE_HI__SHIFT 0x0
21235#define WD_INDEX_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
21236//IA_MULTI_VGT_PARAM
21237#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE__SHIFT 0x0
21238#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON__SHIFT 0x10
21239#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP__SHIFT 0x11
21240#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON__SHIFT 0x12
21241#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI__SHIFT 0x13
21242#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP__SHIFT 0x14
21243#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC__SHIFT 0x15
21244#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV__SHIFT 0x16
21245#define IA_MULTI_VGT_PARAM__HW_USE_ONLY__SHIFT 0x17
21246#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE_MASK 0x0000FFFFL
21247#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON_MASK 0x00010000L
21248#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP_MASK 0x00020000L
21249#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON_MASK 0x00040000L
21250#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI_MASK 0x00080000L
21251#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP_MASK 0x00100000L
21252#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
21253#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
21254#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
21255//VGT_INSTANCE_BASE_ID
21256#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
21257#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
21258//PA_SU_LINE_STIPPLE_VALUE
21259#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
21260#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00FFFFFFL
21261//PA_SC_LINE_STIPPLE_STATE
21262#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
21263#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
21264#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000FL
21265#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000FF00L
21266//PA_SC_SCREEN_EXTENT_MIN_0
21267#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
21268#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
21269#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0x0000FFFFL
21270#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xFFFF0000L
21271//PA_SC_SCREEN_EXTENT_MAX_0
21272#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
21273#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
21274#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0x0000FFFFL
21275#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xFFFF0000L
21276//PA_SC_SCREEN_EXTENT_MIN_1
21277#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
21278#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
21279#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0x0000FFFFL
21280#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xFFFF0000L
21281//PA_SC_SCREEN_EXTENT_MAX_1
21282#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
21283#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
21284#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0x0000FFFFL
21285#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xFFFF0000L
21286//PA_SC_P3D_TRAP_SCREEN_HV_EN
21287#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
21288#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
21289#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
21290#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
21291//PA_SC_P3D_TRAP_SCREEN_H
21292#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
21293#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
21294//PA_SC_P3D_TRAP_SCREEN_V
21295#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
21296#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
21297//PA_SC_P3D_TRAP_SCREEN_OCCURRENCE
21298#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
21299#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
21300//PA_SC_P3D_TRAP_SCREEN_COUNT
21301#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
21302#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
21303//PA_SC_HP3D_TRAP_SCREEN_HV_EN
21304#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
21305#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
21306#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
21307#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
21308//PA_SC_HP3D_TRAP_SCREEN_H
21309#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
21310#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
21311//PA_SC_HP3D_TRAP_SCREEN_V
21312#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
21313#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
21314//PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE
21315#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
21316#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
21317//PA_SC_HP3D_TRAP_SCREEN_COUNT
21318#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
21319#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
21320//PA_SC_TRAP_SCREEN_HV_EN
21321#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
21322#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
21323#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
21324#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
21325//PA_SC_TRAP_SCREEN_H
21326#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
21327#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
21328//PA_SC_TRAP_SCREEN_V
21329#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
21330#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
21331//PA_SC_TRAP_SCREEN_OCCURRENCE
21332#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
21333#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
21334//PA_SC_TRAP_SCREEN_COUNT
21335#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
21336#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
21337//SQ_THREAD_TRACE_BASE
21338#define SQ_THREAD_TRACE_BASE__ADDR__SHIFT 0x0
21339#define SQ_THREAD_TRACE_BASE__ADDR_MASK 0xFFFFFFFFL
21340//SQ_THREAD_TRACE_SIZE
21341#define SQ_THREAD_TRACE_SIZE__SIZE__SHIFT 0x0
21342#define SQ_THREAD_TRACE_SIZE__SIZE_MASK 0x003FFFFFL
21343//SQ_THREAD_TRACE_MASK
21344#define SQ_THREAD_TRACE_MASK__CU_SEL__SHIFT 0x0
21345#define SQ_THREAD_TRACE_MASK__SH_SEL__SHIFT 0x5
21346#define SQ_THREAD_TRACE_MASK__REG_STALL_EN__SHIFT 0x7
21347#define SQ_THREAD_TRACE_MASK__SIMD_EN__SHIFT 0x8
21348#define SQ_THREAD_TRACE_MASK__VM_ID_MASK__SHIFT 0xc
21349#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN__SHIFT 0xe
21350#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN__SHIFT 0xf
21351#define SQ_THREAD_TRACE_MASK__CU_SEL_MASK 0x0000001FL
21352#define SQ_THREAD_TRACE_MASK__SH_SEL_MASK 0x00000020L
21353#define SQ_THREAD_TRACE_MASK__REG_STALL_EN_MASK 0x00000080L
21354#define SQ_THREAD_TRACE_MASK__SIMD_EN_MASK 0x00000F00L
21355#define SQ_THREAD_TRACE_MASK__VM_ID_MASK_MASK 0x00003000L
21356#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN_MASK 0x00004000L
21357#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN_MASK 0x00008000L
21358//SQ_THREAD_TRACE_TOKEN_MASK
21359#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK__SHIFT 0x0
21360#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK__SHIFT 0x10
21361#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL__SHIFT 0x18
21362#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK_MASK 0x0000FFFFL
21363#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK_MASK 0x00FF0000L
21364#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL_MASK 0x01000000L
21365//SQ_THREAD_TRACE_PERF_MASK
21366#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK__SHIFT 0x0
21367#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK__SHIFT 0x10
21368#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK_MASK 0x0000FFFFL
21369#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK_MASK 0xFFFF0000L
21370//SQ_THREAD_TRACE_CTRL
21371#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER__SHIFT 0x1f
21372#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER_MASK 0x80000000L
21373//SQ_THREAD_TRACE_MODE
21374#define SQ_THREAD_TRACE_MODE__MASK_PS__SHIFT 0x0
21375#define SQ_THREAD_TRACE_MODE__MASK_VS__SHIFT 0x3
21376#define SQ_THREAD_TRACE_MODE__MASK_GS__SHIFT 0x6
21377#define SQ_THREAD_TRACE_MODE__MASK_ES__SHIFT 0x9
21378#define SQ_THREAD_TRACE_MODE__MASK_HS__SHIFT 0xc
21379#define SQ_THREAD_TRACE_MODE__MASK_LS__SHIFT 0xf
21380#define SQ_THREAD_TRACE_MODE__MASK_CS__SHIFT 0x12
21381#define SQ_THREAD_TRACE_MODE__MODE__SHIFT 0x15
21382#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE__SHIFT 0x17
21383#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN__SHIFT 0x19
21384#define SQ_THREAD_TRACE_MODE__TC_PERF_EN__SHIFT 0x1a
21385#define SQ_THREAD_TRACE_MODE__ISSUE_MASK__SHIFT 0x1b
21386#define SQ_THREAD_TRACE_MODE__TEST_MODE__SHIFT 0x1d
21387#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN__SHIFT 0x1e
21388#define SQ_THREAD_TRACE_MODE__WRAP__SHIFT 0x1f
21389#define SQ_THREAD_TRACE_MODE__MASK_PS_MASK 0x00000007L
21390#define SQ_THREAD_TRACE_MODE__MASK_VS_MASK 0x00000038L
21391#define SQ_THREAD_TRACE_MODE__MASK_GS_MASK 0x000001C0L
21392#define SQ_THREAD_TRACE_MODE__MASK_ES_MASK 0x00000E00L
21393#define SQ_THREAD_TRACE_MODE__MASK_HS_MASK 0x00007000L
21394#define SQ_THREAD_TRACE_MODE__MASK_LS_MASK 0x00038000L
21395#define SQ_THREAD_TRACE_MODE__MASK_CS_MASK 0x001C0000L
21396#define SQ_THREAD_TRACE_MODE__MODE_MASK 0x00600000L
21397#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE_MASK 0x01800000L
21398#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN_MASK 0x02000000L
21399#define SQ_THREAD_TRACE_MODE__TC_PERF_EN_MASK 0x04000000L
21400#define SQ_THREAD_TRACE_MODE__ISSUE_MASK_MASK 0x18000000L
21401#define SQ_THREAD_TRACE_MODE__TEST_MODE_MASK 0x20000000L
21402#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN_MASK 0x40000000L
21403#define SQ_THREAD_TRACE_MODE__WRAP_MASK 0x80000000L
21404//SQ_THREAD_TRACE_BASE2
21405#define SQ_THREAD_TRACE_BASE2__ADDR_HI__SHIFT 0x0
21406#define SQ_THREAD_TRACE_BASE2__ADDR_HI_MASK 0x0000000FL
21407//SQ_THREAD_TRACE_TOKEN_MASK2
21408#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK__SHIFT 0x0
21409#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK_MASK 0xFFFFFFFFL
21410//SQ_THREAD_TRACE_WPTR
21411#define SQ_THREAD_TRACE_WPTR__WPTR__SHIFT 0x0
21412#define SQ_THREAD_TRACE_WPTR__READ_OFFSET__SHIFT 0x1e
21413#define SQ_THREAD_TRACE_WPTR__WPTR_MASK 0x3FFFFFFFL
21414#define SQ_THREAD_TRACE_WPTR__READ_OFFSET_MASK 0xC0000000L
21415//SQ_THREAD_TRACE_STATUS
21416#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
21417#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0x10
21418#define SQ_THREAD_TRACE_STATUS__UTC_ERROR__SHIFT 0x1c
21419#define SQ_THREAD_TRACE_STATUS__NEW_BUF__SHIFT 0x1d
21420#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x1e
21421#define SQ_THREAD_TRACE_STATUS__FULL__SHIFT 0x1f
21422#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x000003FFL
21423#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x03FF0000L
21424#define SQ_THREAD_TRACE_STATUS__UTC_ERROR_MASK 0x10000000L
21425#define SQ_THREAD_TRACE_STATUS__NEW_BUF_MASK 0x20000000L
21426#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x40000000L
21427#define SQ_THREAD_TRACE_STATUS__FULL_MASK 0x80000000L
21428//SQ_THREAD_TRACE_HIWATER
21429#define SQ_THREAD_TRACE_HIWATER__HIWATER__SHIFT 0x0
21430#define SQ_THREAD_TRACE_HIWATER__HIWATER_MASK 0x00000007L
21431//SQ_THREAD_TRACE_CNTR
21432#define SQ_THREAD_TRACE_CNTR__CNTR__SHIFT 0x0
21433#define SQ_THREAD_TRACE_CNTR__CNTR_MASK 0xFFFFFFFFL
21434//SQ_THREAD_TRACE_USERDATA_0
21435#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
21436#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xFFFFFFFFL
21437//SQ_THREAD_TRACE_USERDATA_1
21438#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
21439#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xFFFFFFFFL
21440//SQ_THREAD_TRACE_USERDATA_2
21441#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
21442#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xFFFFFFFFL
21443//SQ_THREAD_TRACE_USERDATA_3
21444#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
21445#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xFFFFFFFFL
21446//SQC_CACHES
21447#define SQC_CACHES__TARGET_INST__SHIFT 0x0
21448#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
21449#define SQC_CACHES__INVALIDATE__SHIFT 0x2
21450#define SQC_CACHES__WRITEBACK__SHIFT 0x3
21451#define SQC_CACHES__VOL__SHIFT 0x4
21452#define SQC_CACHES__COMPLETE__SHIFT 0x10
21453#define SQC_CACHES__TARGET_INST_MASK 0x00000001L
21454#define SQC_CACHES__TARGET_DATA_MASK 0x00000002L
21455#define SQC_CACHES__INVALIDATE_MASK 0x00000004L
21456#define SQC_CACHES__WRITEBACK_MASK 0x00000008L
21457#define SQC_CACHES__VOL_MASK 0x00000010L
21458#define SQC_CACHES__COMPLETE_MASK 0x00010000L
21459//SQC_WRITEBACK
21460#define SQC_WRITEBACK__DWB__SHIFT 0x0
21461#define SQC_WRITEBACK__DIRTY__SHIFT 0x1
21462#define SQC_WRITEBACK__DWB_MASK 0x00000001L
21463#define SQC_WRITEBACK__DIRTY_MASK 0x00000002L
21464//TA_CS_BC_BASE_ADDR
21465#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
21466#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
21467//TA_CS_BC_BASE_ADDR_HI
21468#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
21469#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
21470//DB_OCCLUSION_COUNT0_LOW
21471#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
21472#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
21473//DB_OCCLUSION_COUNT0_HI
21474#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
21475#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7FFFFFFFL
21476//DB_OCCLUSION_COUNT1_LOW
21477#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
21478#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
21479//DB_OCCLUSION_COUNT1_HI
21480#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
21481#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7FFFFFFFL
21482//DB_OCCLUSION_COUNT2_LOW
21483#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
21484#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
21485//DB_OCCLUSION_COUNT2_HI
21486#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
21487#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7FFFFFFFL
21488//DB_OCCLUSION_COUNT3_LOW
21489#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
21490#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
21491//DB_OCCLUSION_COUNT3_HI
21492#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
21493#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7FFFFFFFL
21494//DB_ZPASS_COUNT_LOW
21495#define DB_ZPASS_COUNT_LOW__COUNT_LOW__SHIFT 0x0
21496#define DB_ZPASS_COUNT_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
21497//DB_ZPASS_COUNT_HI
21498#define DB_ZPASS_COUNT_HI__COUNT_HI__SHIFT 0x0
21499#define DB_ZPASS_COUNT_HI__COUNT_HI_MASK 0x7FFFFFFFL
21500//GDS_RD_ADDR
21501#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
21502#define GDS_RD_ADDR__READ_ADDR_MASK 0xFFFFFFFFL
21503//GDS_RD_DATA
21504#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
21505#define GDS_RD_DATA__READ_DATA_MASK 0xFFFFFFFFL
21506//GDS_RD_BURST_ADDR
21507#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
21508#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xFFFFFFFFL
21509//GDS_RD_BURST_COUNT
21510#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
21511#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xFFFFFFFFL
21512//GDS_RD_BURST_DATA
21513#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
21514#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xFFFFFFFFL
21515//GDS_WR_ADDR
21516#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
21517#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
21518//GDS_WR_DATA
21519#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
21520#define GDS_WR_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
21521//GDS_WR_BURST_ADDR
21522#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
21523#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
21524//GDS_WR_BURST_DATA
21525#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
21526#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
21527//GDS_WRITE_COMPLETE
21528#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
21529#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xFFFFFFFFL
21530//GDS_ATOM_CNTL
21531#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
21532#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
21533#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
21534#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
21535#define GDS_ATOM_CNTL__AINC_MASK 0x0000003FL
21536#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000C0L
21537#define GDS_ATOM_CNTL__DMODE_MASK 0x00000300L
21538#define GDS_ATOM_CNTL__UNUSED2_MASK 0xFFFFFC00L
21539//GDS_ATOM_COMPLETE
21540#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
21541#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
21542#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
21543#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xFFFFFFFEL
21544//GDS_ATOM_BASE
21545#define GDS_ATOM_BASE__BASE__SHIFT 0x0
21546#define GDS_ATOM_BASE__UNUSED__SHIFT 0x10
21547#define GDS_ATOM_BASE__BASE_MASK 0x0000FFFFL
21548#define GDS_ATOM_BASE__UNUSED_MASK 0xFFFF0000L
21549//GDS_ATOM_SIZE
21550#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
21551#define GDS_ATOM_SIZE__UNUSED__SHIFT 0x10
21552#define GDS_ATOM_SIZE__SIZE_MASK 0x0000FFFFL
21553#define GDS_ATOM_SIZE__UNUSED_MASK 0xFFFF0000L
21554//GDS_ATOM_OFFSET0
21555#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
21556#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
21557#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000FFL
21558#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xFFFFFF00L
21559//GDS_ATOM_OFFSET1
21560#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
21561#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
21562#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000FFL
21563#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xFFFFFF00L
21564//GDS_ATOM_DST
21565#define GDS_ATOM_DST__DST__SHIFT 0x0
21566#define GDS_ATOM_DST__DST_MASK 0xFFFFFFFFL
21567//GDS_ATOM_OP
21568#define GDS_ATOM_OP__OP__SHIFT 0x0
21569#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
21570#define GDS_ATOM_OP__OP_MASK 0x000000FFL
21571#define GDS_ATOM_OP__UNUSED_MASK 0xFFFFFF00L
21572//GDS_ATOM_SRC0
21573#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
21574#define GDS_ATOM_SRC0__DATA_MASK 0xFFFFFFFFL
21575//GDS_ATOM_SRC0_U
21576#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
21577#define GDS_ATOM_SRC0_U__DATA_MASK 0xFFFFFFFFL
21578//GDS_ATOM_SRC1
21579#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
21580#define GDS_ATOM_SRC1__DATA_MASK 0xFFFFFFFFL
21581//GDS_ATOM_SRC1_U
21582#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
21583#define GDS_ATOM_SRC1_U__DATA_MASK 0xFFFFFFFFL
21584//GDS_ATOM_READ0
21585#define GDS_ATOM_READ0__DATA__SHIFT 0x0
21586#define GDS_ATOM_READ0__DATA_MASK 0xFFFFFFFFL
21587//GDS_ATOM_READ0_U
21588#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
21589#define GDS_ATOM_READ0_U__DATA_MASK 0xFFFFFFFFL
21590//GDS_ATOM_READ1
21591#define GDS_ATOM_READ1__DATA__SHIFT 0x0
21592#define GDS_ATOM_READ1__DATA_MASK 0xFFFFFFFFL
21593//GDS_ATOM_READ1_U
21594#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
21595#define GDS_ATOM_READ1_U__DATA_MASK 0xFFFFFFFFL
21596//GDS_GWS_RESOURCE_CNTL
21597#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
21598#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
21599#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003FL
21600#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xFFFFFFC0L
21601//GDS_GWS_RESOURCE
21602#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
21603#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
21604#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xd
21605#define GDS_GWS_RESOURCE__DED__SHIFT 0xe
21606#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0xf
21607#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x10
21608#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1c
21609#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1d
21610#define GDS_GWS_RESOURCE__HALTED__SHIFT 0x1e
21611#define GDS_GWS_RESOURCE__UNUSED1__SHIFT 0x1f
21612#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
21613#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001FFEL
21614#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
21615#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
21616#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
21617#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x0FFF0000L
21618#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x10000000L
21619#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x20000000L
21620#define GDS_GWS_RESOURCE__HALTED_MASK 0x40000000L
21621#define GDS_GWS_RESOURCE__UNUSED1_MASK 0x80000000L
21622//GDS_GWS_RESOURCE_CNT
21623#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
21624#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
21625#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000FFFFL
21626#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xFFFF0000L
21627//GDS_OA_CNTL
21628#define GDS_OA_CNTL__INDEX__SHIFT 0x0
21629#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
21630#define GDS_OA_CNTL__INDEX_MASK 0x0000000FL
21631#define GDS_OA_CNTL__UNUSED_MASK 0xFFFFFFF0L
21632//GDS_OA_COUNTER
21633#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
21634#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xFFFFFFFFL
21635//GDS_OA_ADDRESS
21636#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
21637#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x10
21638#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x14
21639#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x16
21640#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
21641#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
21642#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0x0000FFFFL
21643#define GDS_OA_ADDRESS__CRAWLER_MASK 0x000F0000L
21644#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x00300000L
21645#define GDS_OA_ADDRESS__UNUSED_MASK 0x3FC00000L
21646#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000L
21647#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000L
21648//GDS_OA_INCDEC
21649#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
21650#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
21651#define GDS_OA_INCDEC__VALUE_MASK 0x7FFFFFFFL
21652#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000L
21653//GDS_OA_RING_SIZE
21654#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
21655#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xFFFFFFFFL
21656//SPI_CONFIG_CNTL
21657#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
21658#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
21659#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
21660#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
21661#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET__SHIFT 0x1a
21662#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL__SHIFT 0x1b
21663#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA__SHIFT 0x1c
21664#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA__SHIFT 0x1d
21665#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL__SHIFT 0x1e
21666#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001FFFFFL
21667#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00E00000L
21668#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
21669#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
21670#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET_MASK 0x04000000L
21671#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL_MASK 0x08000000L
21672#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA_MASK 0x10000000L
21673#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA_MASK 0x20000000L
21674#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL_MASK 0xC0000000L
21675//SPI_CONFIG_CNTL_1
21676#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
21677#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
21678#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE__SHIFT 0x5
21679#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x6
21680#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
21681#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE__SHIFT 0x8
21682#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE__SHIFT 0x9
21683#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
21684#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE__SHIFT 0xe
21685#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE__SHIFT 0xf
21686#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE__SHIFT 0x10
21687#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000FL
21688#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
21689#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE_MASK 0x00000020L
21690#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000040L
21691#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
21692#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE_MASK 0x00000100L
21693#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE_MASK 0x00000200L
21694#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003C00L
21695#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE_MASK 0x00004000L
21696#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE_MASK 0x00008000L
21697#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE_MASK 0xFFFF0000L
21698//SPI_CONFIG_CNTL_2
21699#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
21700#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
21701#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0x0000000FL
21702#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0x000000F0L
21703
21704
21705// addressBlock: gc_perfddec
21706//CPG_PERFCOUNTER1_LO
21707#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21708#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21709//CPG_PERFCOUNTER1_HI
21710#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21711#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21712//CPG_PERFCOUNTER0_LO
21713#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21714#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21715//CPG_PERFCOUNTER0_HI
21716#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21717#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21718//CPC_PERFCOUNTER1_LO
21719#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21720#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21721//CPC_PERFCOUNTER1_HI
21722#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21723#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21724//CPC_PERFCOUNTER0_LO
21725#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21726#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21727//CPC_PERFCOUNTER0_HI
21728#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21729#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21730//CPF_PERFCOUNTER1_LO
21731#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21732#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21733//CPF_PERFCOUNTER1_HI
21734#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21735#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21736//CPF_PERFCOUNTER0_LO
21737#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21738#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21739//CPF_PERFCOUNTER0_HI
21740#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21741#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21742//CPF_LATENCY_STATS_DATA
21743#define CPF_LATENCY_STATS_DATA__DATA__SHIFT 0x0
21744#define CPF_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
21745//CPG_LATENCY_STATS_DATA
21746#define CPG_LATENCY_STATS_DATA__DATA__SHIFT 0x0
21747#define CPG_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
21748//CPC_LATENCY_STATS_DATA
21749#define CPC_LATENCY_STATS_DATA__DATA__SHIFT 0x0
21750#define CPC_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
21751//GRBM_PERFCOUNTER0_LO
21752#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21753#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21754//GRBM_PERFCOUNTER0_HI
21755#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21756#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21757//GRBM_PERFCOUNTER1_LO
21758#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21759#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21760//GRBM_PERFCOUNTER1_HI
21761#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21762#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21763//GRBM_SE0_PERFCOUNTER_LO
21764#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
21765#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21766//GRBM_SE0_PERFCOUNTER_HI
21767#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
21768#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21769//GRBM_SE1_PERFCOUNTER_LO
21770#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
21771#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21772//GRBM_SE1_PERFCOUNTER_HI
21773#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
21774#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21775//GRBM_SE2_PERFCOUNTER_LO
21776#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
21777#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21778//GRBM_SE2_PERFCOUNTER_HI
21779#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
21780#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21781//GRBM_SE3_PERFCOUNTER_LO
21782#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
21783#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21784//GRBM_SE3_PERFCOUNTER_HI
21785#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
21786#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21787//WD_PERFCOUNTER0_LO
21788#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21789#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21790//WD_PERFCOUNTER0_HI
21791#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21792#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21793//WD_PERFCOUNTER1_LO
21794#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21795#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21796//WD_PERFCOUNTER1_HI
21797#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21798#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21799//WD_PERFCOUNTER2_LO
21800#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21801#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21802//WD_PERFCOUNTER2_HI
21803#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21804#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21805//WD_PERFCOUNTER3_LO
21806#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21807#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21808//WD_PERFCOUNTER3_HI
21809#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21810#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21811//IA_PERFCOUNTER0_LO
21812#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21813#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21814//IA_PERFCOUNTER0_HI
21815#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21816#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21817//IA_PERFCOUNTER1_LO
21818#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21819#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21820//IA_PERFCOUNTER1_HI
21821#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21822#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21823//IA_PERFCOUNTER2_LO
21824#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21825#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21826//IA_PERFCOUNTER2_HI
21827#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21828#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21829//IA_PERFCOUNTER3_LO
21830#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21831#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21832//IA_PERFCOUNTER3_HI
21833#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21834#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21835//VGT_PERFCOUNTER0_LO
21836#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21837#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21838//VGT_PERFCOUNTER0_HI
21839#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21840#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21841//VGT_PERFCOUNTER1_LO
21842#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21843#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21844//VGT_PERFCOUNTER1_HI
21845#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21846#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21847//VGT_PERFCOUNTER2_LO
21848#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21849#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21850//VGT_PERFCOUNTER2_HI
21851#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21852#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21853//VGT_PERFCOUNTER3_LO
21854#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21855#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21856//VGT_PERFCOUNTER3_HI
21857#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21858#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21859//PA_SU_PERFCOUNTER0_LO
21860#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21861#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21862//PA_SU_PERFCOUNTER0_HI
21863#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21864#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
21865//PA_SU_PERFCOUNTER1_LO
21866#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21867#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21868//PA_SU_PERFCOUNTER1_HI
21869#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21870#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
21871//PA_SU_PERFCOUNTER2_LO
21872#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21873#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21874//PA_SU_PERFCOUNTER2_HI
21875#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21876#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
21877//PA_SU_PERFCOUNTER3_LO
21878#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21879#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21880//PA_SU_PERFCOUNTER3_HI
21881#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21882#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
21883//PA_SC_PERFCOUNTER0_LO
21884#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21885#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21886//PA_SC_PERFCOUNTER0_HI
21887#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21888#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21889//PA_SC_PERFCOUNTER1_LO
21890#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21891#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21892//PA_SC_PERFCOUNTER1_HI
21893#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21894#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21895//PA_SC_PERFCOUNTER2_LO
21896#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21897#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21898//PA_SC_PERFCOUNTER2_HI
21899#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21900#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21901//PA_SC_PERFCOUNTER3_LO
21902#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21903#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21904//PA_SC_PERFCOUNTER3_HI
21905#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21906#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21907//PA_SC_PERFCOUNTER4_LO
21908#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
21909#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21910//PA_SC_PERFCOUNTER4_HI
21911#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
21912#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21913//PA_SC_PERFCOUNTER5_LO
21914#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
21915#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21916//PA_SC_PERFCOUNTER5_HI
21917#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
21918#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21919//PA_SC_PERFCOUNTER6_LO
21920#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
21921#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21922//PA_SC_PERFCOUNTER6_HI
21923#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
21924#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21925//PA_SC_PERFCOUNTER7_LO
21926#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
21927#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21928//PA_SC_PERFCOUNTER7_HI
21929#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
21930#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21931//SPI_PERFCOUNTER0_HI
21932#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21933#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21934//SPI_PERFCOUNTER0_LO
21935#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21936#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21937//SPI_PERFCOUNTER1_HI
21938#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21939#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21940//SPI_PERFCOUNTER1_LO
21941#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21942#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21943//SPI_PERFCOUNTER2_HI
21944#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21945#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21946//SPI_PERFCOUNTER2_LO
21947#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21948#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21949//SPI_PERFCOUNTER3_HI
21950#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21951#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21952//SPI_PERFCOUNTER3_LO
21953#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21954#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21955//SPI_PERFCOUNTER4_HI
21956#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
21957#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21958//SPI_PERFCOUNTER4_LO
21959#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
21960#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21961//SPI_PERFCOUNTER5_HI
21962#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
21963#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21964//SPI_PERFCOUNTER5_LO
21965#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
21966#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21967//SQ_PERFCOUNTER0_LO
21968#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21969#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21970//SQ_PERFCOUNTER0_HI
21971#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21972#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21973//SQ_PERFCOUNTER1_LO
21974#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21975#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21976//SQ_PERFCOUNTER1_HI
21977#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21978#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21979//SQ_PERFCOUNTER2_LO
21980#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21981#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21982//SQ_PERFCOUNTER2_HI
21983#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21984#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21985//SQ_PERFCOUNTER3_LO
21986#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21987#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21988//SQ_PERFCOUNTER3_HI
21989#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21990#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21991//SQ_PERFCOUNTER4_LO
21992#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
21993#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
21994//SQ_PERFCOUNTER4_HI
21995#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
21996#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
21997//SQ_PERFCOUNTER5_LO
21998#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
21999#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22000//SQ_PERFCOUNTER5_HI
22001#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
22002#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22003//SQ_PERFCOUNTER6_LO
22004#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
22005#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22006//SQ_PERFCOUNTER6_HI
22007#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
22008#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22009//SQ_PERFCOUNTER7_LO
22010#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
22011#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22012//SQ_PERFCOUNTER7_HI
22013#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
22014#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22015//SQ_PERFCOUNTER8_LO
22016#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO__SHIFT 0x0
22017#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22018//SQ_PERFCOUNTER8_HI
22019#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI__SHIFT 0x0
22020#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22021//SQ_PERFCOUNTER9_LO
22022#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO__SHIFT 0x0
22023#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22024//SQ_PERFCOUNTER9_HI
22025#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI__SHIFT 0x0
22026#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22027//SQ_PERFCOUNTER10_LO
22028#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO__SHIFT 0x0
22029#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22030//SQ_PERFCOUNTER10_HI
22031#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI__SHIFT 0x0
22032#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22033//SQ_PERFCOUNTER11_LO
22034#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO__SHIFT 0x0
22035#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22036//SQ_PERFCOUNTER11_HI
22037#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI__SHIFT 0x0
22038#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22039//SQ_PERFCOUNTER12_LO
22040#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO__SHIFT 0x0
22041#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22042//SQ_PERFCOUNTER12_HI
22043#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI__SHIFT 0x0
22044#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22045//SQ_PERFCOUNTER13_LO
22046#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO__SHIFT 0x0
22047#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22048//SQ_PERFCOUNTER13_HI
22049#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI__SHIFT 0x0
22050#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22051//SQ_PERFCOUNTER14_LO
22052#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO__SHIFT 0x0
22053#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22054//SQ_PERFCOUNTER14_HI
22055#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI__SHIFT 0x0
22056#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22057//SQ_PERFCOUNTER15_LO
22058#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO__SHIFT 0x0
22059#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22060//SQ_PERFCOUNTER15_HI
22061#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI__SHIFT 0x0
22062#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22063//SX_PERFCOUNTER0_LO
22064#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22065#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22066//SX_PERFCOUNTER0_HI
22067#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22068#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22069//SX_PERFCOUNTER1_LO
22070#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22071#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22072//SX_PERFCOUNTER1_HI
22073#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22074#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22075//SX_PERFCOUNTER2_LO
22076#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22077#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22078//SX_PERFCOUNTER2_HI
22079#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22080#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22081//SX_PERFCOUNTER3_LO
22082#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22083#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22084//SX_PERFCOUNTER3_HI
22085#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22086#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22087//GDS_PERFCOUNTER0_LO
22088#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22089#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22090//GDS_PERFCOUNTER0_HI
22091#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22092#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22093//GDS_PERFCOUNTER1_LO
22094#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22095#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22096//GDS_PERFCOUNTER1_HI
22097#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22098#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22099//GDS_PERFCOUNTER2_LO
22100#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22101#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22102//GDS_PERFCOUNTER2_HI
22103#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22104#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22105//GDS_PERFCOUNTER3_LO
22106#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22107#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22108//GDS_PERFCOUNTER3_HI
22109#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22110#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22111//TA_PERFCOUNTER0_LO
22112#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22113#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22114//TA_PERFCOUNTER0_HI
22115#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22116#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22117//TA_PERFCOUNTER1_LO
22118#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22119#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22120//TA_PERFCOUNTER1_HI
22121#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22122#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22123//TD_PERFCOUNTER0_LO
22124#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22125#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22126//TD_PERFCOUNTER0_HI
22127#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22128#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22129//TD_PERFCOUNTER1_LO
22130#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22131#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22132//TD_PERFCOUNTER1_HI
22133#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22134#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22135//TCP_PERFCOUNTER0_LO
22136#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22137#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22138//TCP_PERFCOUNTER0_HI
22139#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22140#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22141//TCP_PERFCOUNTER1_LO
22142#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22143#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22144//TCP_PERFCOUNTER1_HI
22145#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22146#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22147//TCP_PERFCOUNTER2_LO
22148#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22149#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22150//TCP_PERFCOUNTER2_HI
22151#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22152#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22153//TCP_PERFCOUNTER3_LO
22154#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22155#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22156//TCP_PERFCOUNTER3_HI
22157#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22158#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22159//TCC_PERFCOUNTER0_LO
22160#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22161#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22162//TCC_PERFCOUNTER0_HI
22163#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22164#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22165//TCC_PERFCOUNTER1_LO
22166#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22167#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22168//TCC_PERFCOUNTER1_HI
22169#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22170#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22171//TCC_PERFCOUNTER2_LO
22172#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22173#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22174//TCC_PERFCOUNTER2_HI
22175#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22176#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22177//TCC_PERFCOUNTER3_LO
22178#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22179#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22180//TCC_PERFCOUNTER3_HI
22181#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22182#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22183//TCA_PERFCOUNTER0_LO
22184#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22185#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22186//TCA_PERFCOUNTER0_HI
22187#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22188#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22189//TCA_PERFCOUNTER1_LO
22190#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22191#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22192//TCA_PERFCOUNTER1_HI
22193#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22194#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22195//TCA_PERFCOUNTER2_LO
22196#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22197#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22198//TCA_PERFCOUNTER2_HI
22199#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22200#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22201//TCA_PERFCOUNTER3_LO
22202#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22203#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22204//TCA_PERFCOUNTER3_HI
22205#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22206#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22207//CB_PERFCOUNTER0_LO
22208#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22209#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22210//CB_PERFCOUNTER0_HI
22211#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22212#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22213//CB_PERFCOUNTER1_LO
22214#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22215#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22216//CB_PERFCOUNTER1_HI
22217#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22218#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22219//CB_PERFCOUNTER2_LO
22220#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22221#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22222//CB_PERFCOUNTER2_HI
22223#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22224#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22225//CB_PERFCOUNTER3_LO
22226#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22227#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22228//CB_PERFCOUNTER3_HI
22229#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22230#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22231//DB_PERFCOUNTER0_LO
22232#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22233#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22234//DB_PERFCOUNTER0_HI
22235#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22236#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22237//DB_PERFCOUNTER1_LO
22238#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22239#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22240//DB_PERFCOUNTER1_HI
22241#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22242#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22243//DB_PERFCOUNTER2_LO
22244#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22245#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22246//DB_PERFCOUNTER2_HI
22247#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22248#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22249//DB_PERFCOUNTER3_LO
22250#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22251#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22252//DB_PERFCOUNTER3_HI
22253#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22254#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22255//RLC_PERFCOUNTER0_LO
22256#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22257#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22258//RLC_PERFCOUNTER0_HI
22259#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22260#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22261//RLC_PERFCOUNTER1_LO
22262#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22263#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22264//RLC_PERFCOUNTER1_HI
22265#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22266#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22267//RMI_PERFCOUNTER0_LO
22268#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
22269#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22270//RMI_PERFCOUNTER0_HI
22271#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
22272#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22273//RMI_PERFCOUNTER1_LO
22274#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
22275#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22276//RMI_PERFCOUNTER1_HI
22277#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
22278#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22279//RMI_PERFCOUNTER2_LO
22280#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
22281#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22282//RMI_PERFCOUNTER2_HI
22283#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
22284#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22285//RMI_PERFCOUNTER3_LO
22286#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
22287#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
22288//RMI_PERFCOUNTER3_HI
22289#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
22290#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
22291
22292
22293// addressBlock: gc_utcl2_atcl2pfcntrdec
22294//ATC_L2_PERFCOUNTER_LO
22295#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
22296#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
22297//ATC_L2_PERFCOUNTER_HI
22298#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
22299#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
22300#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
22301#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
22302
22303
22304// addressBlock: gc_utcl2_vml2prdec
22305//MC_VM_L2_PERFCOUNTER_LO
22306#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
22307#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
22308//MC_VM_L2_PERFCOUNTER_HI
22309#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
22310#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
22311#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
22312#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
22313
22314
22315// addressBlock: gc_perfsdec
22316//CPG_PERFCOUNTER1_SELECT
22317#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
22318#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
22319#define CPG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
22320#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
22321#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
22322#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
22323#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
22324#define CPG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
22325#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
22326#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
22327//CPG_PERFCOUNTER0_SELECT1
22328#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
22329#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
22330#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
22331#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
22332#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
22333#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
22334#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
22335#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
22336//CPG_PERFCOUNTER0_SELECT
22337#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
22338#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
22339#define CPG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
22340#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
22341#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
22342#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
22343#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
22344#define CPG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
22345#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
22346#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
22347//CPC_PERFCOUNTER1_SELECT
22348#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
22349#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
22350#define CPC_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
22351#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
22352#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
22353#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
22354#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
22355#define CPC_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
22356#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
22357#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
22358//CPC_PERFCOUNTER0_SELECT1
22359#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
22360#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
22361#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
22362#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
22363#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
22364#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
22365#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
22366#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
22367//CPF_PERFCOUNTER1_SELECT
22368#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
22369#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
22370#define CPF_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
22371#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
22372#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
22373#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
22374#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
22375#define CPF_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
22376#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
22377#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
22378//CPF_PERFCOUNTER0_SELECT1
22379#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
22380#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
22381#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
22382#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
22383#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
22384#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
22385#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
22386#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
22387//CPF_PERFCOUNTER0_SELECT
22388#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
22389#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
22390#define CPF_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
22391#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
22392#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
22393#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
22394#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
22395#define CPF_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
22396#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
22397#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
22398//CP_PERFMON_CNTL
22399#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
22400#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
22401#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
22402#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
22403#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000FL
22404#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000F0L
22405#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
22406#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
22407//CPC_PERFCOUNTER0_SELECT
22408#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
22409#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
22410#define CPC_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
22411#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
22412#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
22413#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
22414#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
22415#define CPC_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
22416#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
22417#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
22418//CPF_TC_PERF_COUNTER_WINDOW_SELECT
22419#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
22420#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
22421#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
22422#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x00000007L
22423#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
22424#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
22425//CPG_TC_PERF_COUNTER_WINDOW_SELECT
22426#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
22427#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
22428#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
22429#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
22430#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
22431#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
22432//CPF_LATENCY_STATS_SELECT
22433#define CPF_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
22434#define CPF_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
22435#define CPF_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
22436#define CPF_LATENCY_STATS_SELECT__INDEX_MASK 0x0000000FL
22437#define CPF_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
22438#define CPF_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
22439//CPG_LATENCY_STATS_SELECT
22440#define CPG_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
22441#define CPG_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
22442#define CPG_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
22443#define CPG_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
22444#define CPG_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
22445#define CPG_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
22446//CPC_LATENCY_STATS_SELECT
22447#define CPC_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
22448#define CPC_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
22449#define CPC_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
22450#define CPC_LATENCY_STATS_SELECT__INDEX_MASK 0x00000007L
22451#define CPC_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
22452#define CPC_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
22453//CP_DRAW_OBJECT
22454#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
22455#define CP_DRAW_OBJECT__OBJECT_MASK 0xFFFFFFFFL
22456//CP_DRAW_OBJECT_COUNTER
22457#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
22458#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0x0000FFFFL
22459//CP_DRAW_WINDOW_MASK_HI
22460#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
22461#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xFFFFFFFFL
22462//CP_DRAW_WINDOW_HI
22463#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
22464#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xFFFFFFFFL
22465//CP_DRAW_WINDOW_LO
22466#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
22467#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
22468#define CP_DRAW_WINDOW_LO__MIN_MASK 0x0000FFFFL
22469#define CP_DRAW_WINDOW_LO__MAX_MASK 0xFFFF0000L
22470//CP_DRAW_WINDOW_CNTL
22471#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
22472#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
22473#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
22474#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
22475#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x00000001L
22476#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x00000002L
22477#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x00000004L
22478#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x00000100L
22479//GRBM_PERFCOUNTER0_SELECT
22480#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22481#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
22482#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
22483#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
22484#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
22485#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
22486#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
22487#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
22488#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
22489#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
22490#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
22491#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
22492#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
22493#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
22494#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
22495#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
22496#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
22497#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
22498#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
22499#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
22500#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
22501#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
22502#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
22503#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
22504#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
22505#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
22506#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
22507#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
22508#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
22509#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
22510#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
22511#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
22512#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
22513#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
22514#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
22515#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
22516#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
22517#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
22518#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
22519#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
22520#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
22521#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
22522#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
22523#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
22524//GRBM_PERFCOUNTER1_SELECT
22525#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22526#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
22527#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
22528#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
22529#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
22530#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
22531#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
22532#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
22533#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
22534#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
22535#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
22536#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
22537#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
22538#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
22539#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
22540#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
22541#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
22542#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
22543#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
22544#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
22545#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
22546#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
22547#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
22548#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
22549#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
22550#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
22551#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
22552#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
22553#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
22554#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
22555#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
22556#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
22557#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
22558#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
22559#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
22560#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
22561#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
22562#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
22563#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
22564#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
22565#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
22566#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
22567#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
22568#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
22569//GRBM_SE0_PERFCOUNTER_SELECT
22570#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
22571#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
22572#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
22573#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
22574#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
22575#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
22576#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
22577#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
22578#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
22579#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
22580#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
22581#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
22582#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
22583#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
22584#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
22585#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
22586#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
22587#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
22588#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
22589#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
22590#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
22591#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
22592#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
22593#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
22594#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
22595#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
22596//GRBM_SE1_PERFCOUNTER_SELECT
22597#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
22598#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
22599#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
22600#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
22601#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
22602#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
22603#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
22604#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
22605#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
22606#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
22607#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
22608#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
22609#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
22610#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
22611#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
22612#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
22613#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
22614#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
22615#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
22616#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
22617#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
22618#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
22619#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
22620#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
22621#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
22622#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
22623//GRBM_SE2_PERFCOUNTER_SELECT
22624#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
22625#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
22626#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
22627#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
22628#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
22629#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
22630#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
22631#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
22632#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
22633#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
22634#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
22635#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
22636#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
22637#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
22638#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
22639#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
22640#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
22641#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
22642#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
22643#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
22644#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
22645#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
22646#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
22647#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
22648#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
22649#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
22650//GRBM_SE3_PERFCOUNTER_SELECT
22651#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
22652#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
22653#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
22654#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
22655#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
22656#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
22657#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
22658#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
22659#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
22660#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
22661#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
22662#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
22663#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
22664#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
22665#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
22666#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
22667#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
22668#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
22669#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
22670#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
22671#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
22672#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
22673#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
22674#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
22675#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
22676#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
22677//WD_PERFCOUNTER0_SELECT
22678#define WD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22679#define WD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
22680#define WD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
22681#define WD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
22682//WD_PERFCOUNTER1_SELECT
22683#define WD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22684#define WD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
22685#define WD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
22686#define WD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
22687//WD_PERFCOUNTER2_SELECT
22688#define WD_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
22689#define WD_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
22690#define WD_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
22691#define WD_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
22692//WD_PERFCOUNTER3_SELECT
22693#define WD_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
22694#define WD_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
22695#define WD_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
22696#define WD_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
22697//IA_PERFCOUNTER0_SELECT
22698#define IA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22699#define IA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
22700#define IA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
22701#define IA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
22702#define IA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
22703#define IA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
22704#define IA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
22705#define IA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
22706#define IA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
22707#define IA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
22708//IA_PERFCOUNTER1_SELECT
22709#define IA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22710#define IA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
22711#define IA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
22712#define IA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
22713//IA_PERFCOUNTER2_SELECT
22714#define IA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
22715#define IA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
22716#define IA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
22717#define IA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
22718//IA_PERFCOUNTER3_SELECT
22719#define IA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
22720#define IA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
22721#define IA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
22722#define IA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
22723//IA_PERFCOUNTER0_SELECT1
22724#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
22725#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
22726#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
22727#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
22728#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
22729#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22730#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
22731#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
22732//VGT_PERFCOUNTER0_SELECT
22733#define VGT_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22734#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
22735#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
22736#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
22737#define VGT_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
22738#define VGT_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
22739#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
22740#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
22741#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
22742#define VGT_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
22743//VGT_PERFCOUNTER1_SELECT
22744#define VGT_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22745#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
22746#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
22747#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
22748#define VGT_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
22749#define VGT_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
22750#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
22751#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
22752#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
22753#define VGT_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
22754//VGT_PERFCOUNTER2_SELECT
22755#define VGT_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
22756#define VGT_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
22757#define VGT_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
22758#define VGT_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
22759//VGT_PERFCOUNTER3_SELECT
22760#define VGT_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
22761#define VGT_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
22762#define VGT_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
22763#define VGT_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
22764//VGT_PERFCOUNTER0_SELECT1
22765#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
22766#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
22767#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
22768#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
22769#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
22770#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22771#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
22772#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
22773//VGT_PERFCOUNTER1_SELECT1
22774#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
22775#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
22776#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
22777#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
22778#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
22779#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22780#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
22781#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
22782//VGT_PERFCOUNTER_SEID_MASK
22783#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK__SHIFT 0x0
22784#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK_MASK 0x000000FFL
22785//PA_SU_PERFCOUNTER0_SELECT
22786#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22787#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
22788#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
22789#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
22790#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
22791#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
22792//PA_SU_PERFCOUNTER0_SELECT1
22793#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
22794#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
22795#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
22796#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22797//PA_SU_PERFCOUNTER1_SELECT
22798#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22799#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
22800#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
22801#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
22802#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
22803#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
22804//PA_SU_PERFCOUNTER1_SELECT1
22805#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
22806#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
22807#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
22808#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22809//PA_SU_PERFCOUNTER2_SELECT
22810#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
22811#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
22812#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
22813#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
22814//PA_SU_PERFCOUNTER3_SELECT
22815#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
22816#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
22817#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
22818#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
22819//PA_SC_PERFCOUNTER0_SELECT
22820#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22821#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
22822#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
22823#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
22824#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
22825#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
22826//PA_SC_PERFCOUNTER0_SELECT1
22827#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
22828#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
22829#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
22830#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22831//PA_SC_PERFCOUNTER1_SELECT
22832#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22833#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
22834//PA_SC_PERFCOUNTER2_SELECT
22835#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
22836#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
22837//PA_SC_PERFCOUNTER3_SELECT
22838#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
22839#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
22840//PA_SC_PERFCOUNTER4_SELECT
22841#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
22842#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
22843//PA_SC_PERFCOUNTER5_SELECT
22844#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
22845#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
22846//PA_SC_PERFCOUNTER6_SELECT
22847#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
22848#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
22849//PA_SC_PERFCOUNTER7_SELECT
22850#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
22851#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
22852//SPI_PERFCOUNTER0_SELECT
22853#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22854#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
22855#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
22856#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
22857#define SPI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
22858#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
22859#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
22860#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
22861#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
22862#define SPI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
22863//SPI_PERFCOUNTER1_SELECT
22864#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22865#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
22866#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
22867#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
22868#define SPI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
22869#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
22870#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
22871#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
22872#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
22873#define SPI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
22874//SPI_PERFCOUNTER2_SELECT
22875#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
22876#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
22877#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
22878#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
22879#define SPI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
22880#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
22881#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
22882#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
22883#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
22884#define SPI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
22885//SPI_PERFCOUNTER3_SELECT
22886#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
22887#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
22888#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
22889#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
22890#define SPI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
22891#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
22892#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
22893#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
22894#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
22895#define SPI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
22896//SPI_PERFCOUNTER0_SELECT1
22897#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
22898#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
22899#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
22900#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
22901#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
22902#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22903#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
22904#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
22905//SPI_PERFCOUNTER1_SELECT1
22906#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
22907#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
22908#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
22909#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
22910#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
22911#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22912#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
22913#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
22914//SPI_PERFCOUNTER2_SELECT1
22915#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
22916#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
22917#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
22918#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
22919#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
22920#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22921#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
22922#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
22923//SPI_PERFCOUNTER3_SELECT1
22924#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
22925#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
22926#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
22927#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
22928#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
22929#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
22930#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
22931#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
22932//SPI_PERFCOUNTER4_SELECT
22933#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
22934#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000000FFL
22935//SPI_PERFCOUNTER5_SELECT
22936#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
22937#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000000FFL
22938//SPI_PERFCOUNTER_BINS
22939#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
22940#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
22941#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
22942#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
22943#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
22944#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
22945#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
22946#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
22947#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000FL
22948#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000F0L
22949#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000F00L
22950#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000F000L
22951#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000F0000L
22952#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00F00000L
22953#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0F000000L
22954#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xF0000000L
22955//SQ_PERFCOUNTER0_SELECT
22956#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
22957#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK__SHIFT 0xc
22958#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
22959#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
22960#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK__SHIFT 0x18
22961#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
22962#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
22963#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
22964#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
22965#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
22966#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK_MASK 0x0F000000L
22967#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
22968//SQ_PERFCOUNTER1_SELECT
22969#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
22970#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK__SHIFT 0xc
22971#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
22972#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
22973#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK__SHIFT 0x18
22974#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
22975#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
22976#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
22977#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
22978#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
22979#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK_MASK 0x0F000000L
22980#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
22981//SQ_PERFCOUNTER2_SELECT
22982#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
22983#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK__SHIFT 0xc
22984#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
22985#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
22986#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK__SHIFT 0x18
22987#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
22988#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
22989#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
22990#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
22991#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
22992#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK_MASK 0x0F000000L
22993#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
22994//SQ_PERFCOUNTER3_SELECT
22995#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
22996#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK__SHIFT 0xc
22997#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
22998#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
22999#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK__SHIFT 0x18
23000#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
23001#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
23002#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23003#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23004#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
23005#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK_MASK 0x0F000000L
23006#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
23007//SQ_PERFCOUNTER4_SELECT
23008#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
23009#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK__SHIFT 0xc
23010#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23011#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
23012#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK__SHIFT 0x18
23013#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
23014#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
23015#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23016#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23017#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
23018#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK_MASK 0x0F000000L
23019#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
23020//SQ_PERFCOUNTER5_SELECT
23021#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
23022#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK__SHIFT 0xc
23023#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23024#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
23025#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK__SHIFT 0x18
23026#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
23027#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
23028#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23029#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23030#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
23031#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK_MASK 0x0F000000L
23032#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
23033//SQ_PERFCOUNTER6_SELECT
23034#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
23035#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK__SHIFT 0xc
23036#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23037#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
23038#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK__SHIFT 0x18
23039#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
23040#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
23041#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23042#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23043#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
23044#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK_MASK 0x0F000000L
23045#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
23046//SQ_PERFCOUNTER7_SELECT
23047#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
23048#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK__SHIFT 0xc
23049#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23050#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
23051#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK__SHIFT 0x18
23052#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
23053#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
23054#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23055#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23056#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
23057#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK_MASK 0x0F000000L
23058#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
23059//SQ_PERFCOUNTER8_SELECT
23060#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
23061#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK__SHIFT 0xc
23062#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23063#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
23064#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK__SHIFT 0x18
23065#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
23066#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001FFL
23067#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23068#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23069#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00F00000L
23070#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK_MASK 0x0F000000L
23071#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xF0000000L
23072//SQ_PERFCOUNTER9_SELECT
23073#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
23074#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK__SHIFT 0xc
23075#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23076#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
23077#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK__SHIFT 0x18
23078#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
23079#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001FFL
23080#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23081#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23082#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00F00000L
23083#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK_MASK 0x0F000000L
23084#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xF0000000L
23085//SQ_PERFCOUNTER10_SELECT
23086#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
23087#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK__SHIFT 0xc
23088#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23089#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
23090#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK__SHIFT 0x18
23091#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
23092#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001FFL
23093#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23094#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23095#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00F00000L
23096#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK_MASK 0x0F000000L
23097#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xF0000000L
23098//SQ_PERFCOUNTER11_SELECT
23099#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
23100#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK__SHIFT 0xc
23101#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23102#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
23103#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK__SHIFT 0x18
23104#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
23105#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001FFL
23106#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23107#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23108#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00F00000L
23109#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK_MASK 0x0F000000L
23110#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xF0000000L
23111//SQ_PERFCOUNTER12_SELECT
23112#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
23113#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK__SHIFT 0xc
23114#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23115#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
23116#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK__SHIFT 0x18
23117#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
23118#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001FFL
23119#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23120#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23121#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00F00000L
23122#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK_MASK 0x0F000000L
23123#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xF0000000L
23124//SQ_PERFCOUNTER13_SELECT
23125#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
23126#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK__SHIFT 0xc
23127#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23128#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
23129#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK__SHIFT 0x18
23130#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
23131#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001FFL
23132#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23133#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23134#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00F00000L
23135#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK_MASK 0x0F000000L
23136#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xF0000000L
23137//SQ_PERFCOUNTER14_SELECT
23138#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
23139#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK__SHIFT 0xc
23140#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23141#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
23142#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK__SHIFT 0x18
23143#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
23144#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001FFL
23145#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23146#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23147#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00F00000L
23148#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK_MASK 0x0F000000L
23149#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xF0000000L
23150//SQ_PERFCOUNTER15_SELECT
23151#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
23152#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK__SHIFT 0xc
23153#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
23154#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
23155#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK__SHIFT 0x18
23156#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
23157#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001FFL
23158#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
23159#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
23160#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00F00000L
23161#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK_MASK 0x0F000000L
23162#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xF0000000L
23163//SQ_PERFCOUNTER_CTRL
23164#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
23165#define SQ_PERFCOUNTER_CTRL__VS_EN__SHIFT 0x1
23166#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
23167#define SQ_PERFCOUNTER_CTRL__ES_EN__SHIFT 0x3
23168#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
23169#define SQ_PERFCOUNTER_CTRL__LS_EN__SHIFT 0x5
23170#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
23171#define SQ_PERFCOUNTER_CTRL__CNTR_RATE__SHIFT 0x8
23172#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH__SHIFT 0xd
23173#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
23174#define SQ_PERFCOUNTER_CTRL__VS_EN_MASK 0x00000002L
23175#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
23176#define SQ_PERFCOUNTER_CTRL__ES_EN_MASK 0x00000008L
23177#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
23178#define SQ_PERFCOUNTER_CTRL__LS_EN_MASK 0x00000020L
23179#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
23180#define SQ_PERFCOUNTER_CTRL__CNTR_RATE_MASK 0x00001F00L
23181#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH_MASK 0x00002000L
23182//SQ_PERFCOUNTER_MASK
23183#define SQ_PERFCOUNTER_MASK__SH0_MASK__SHIFT 0x0
23184#define SQ_PERFCOUNTER_MASK__SH1_MASK__SHIFT 0x10
23185#define SQ_PERFCOUNTER_MASK__SH0_MASK_MASK 0x0000FFFFL
23186#define SQ_PERFCOUNTER_MASK__SH1_MASK_MASK 0xFFFF0000L
23187//SQ_PERFCOUNTER_CTRL2
23188#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
23189#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
23190//SX_PERFCOUNTER0_SELECT
23191#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23192#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23193#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23194#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23195#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23196#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23197//SX_PERFCOUNTER1_SELECT
23198#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23199#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23200#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23201#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23202#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23203#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23204//SX_PERFCOUNTER2_SELECT
23205#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23206#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23207#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
23208#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23209#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23210#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
23211//SX_PERFCOUNTER3_SELECT
23212#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23213#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23214#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
23215#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23216#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23217#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
23218//SX_PERFCOUNTER0_SELECT1
23219#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
23220#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
23221#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003FFL
23222#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000FFC00L
23223//SX_PERFCOUNTER1_SELECT1
23224#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
23225#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
23226#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003FFL
23227#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000FFC00L
23228//GDS_PERFCOUNTER0_SELECT
23229#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23230#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23231#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23232#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23233#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23234#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23235//GDS_PERFCOUNTER1_SELECT
23236#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23237#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23238#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23239#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23240#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23241#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23242//GDS_PERFCOUNTER2_SELECT
23243#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23244#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23245#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
23246#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23247#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23248#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
23249//GDS_PERFCOUNTER3_SELECT
23250#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23251#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
23252#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
23253#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
23254#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
23255#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
23256//GDS_PERFCOUNTER0_SELECT1
23257#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
23258#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
23259#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003FFL
23260#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000FFC00L
23261//TA_PERFCOUNTER0_SELECT
23262#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23263#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23264#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23265#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23266#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23267#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
23268#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003FC00L
23269#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23270#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23271#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23272//TA_PERFCOUNTER0_SELECT1
23273#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23274#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23275#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
23276#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
23277#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000FFL
23278#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003FC00L
23279#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
23280#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
23281//TA_PERFCOUNTER1_SELECT
23282#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23283#define TA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
23284#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23285#define TA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
23286#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23287#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
23288#define TA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x0003FC00L
23289#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23290#define TA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
23291#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23292//TD_PERFCOUNTER0_SELECT
23293#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23294#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23295#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23296#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23297#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23298#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
23299#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003FC00L
23300#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23301#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23302#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23303//TD_PERFCOUNTER0_SELECT1
23304#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23305#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23306#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
23307#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
23308#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000FFL
23309#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003FC00L
23310#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
23311#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
23312//TD_PERFCOUNTER1_SELECT
23313#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23314#define TD_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
23315#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23316#define TD_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
23317#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23318#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
23319#define TD_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x0003FC00L
23320#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23321#define TD_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
23322#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23323//TCP_PERFCOUNTER0_SELECT
23324#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23325#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23326#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23327#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23328#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23329#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
23330#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
23331#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23332#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23333#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23334//TCP_PERFCOUNTER0_SELECT1
23335#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23336#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23337#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
23338#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
23339#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
23340#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23341#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
23342#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
23343//TCP_PERFCOUNTER1_SELECT
23344#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23345#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
23346#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23347#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
23348#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23349#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
23350#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
23351#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23352#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
23353#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23354//TCP_PERFCOUNTER1_SELECT1
23355#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
23356#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
23357#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
23358#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
23359#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
23360#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23361#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
23362#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
23363//TCP_PERFCOUNTER2_SELECT
23364#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
23365#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
23366#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
23367#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
23368#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
23369#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
23370//TCP_PERFCOUNTER3_SELECT
23371#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
23372#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
23373#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
23374#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
23375#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
23376#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
23377//TCC_PERFCOUNTER0_SELECT
23378#define TCC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23379#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23380#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23381#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23382#define TCC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23383#define TCC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
23384#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
23385#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23386#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23387#define TCC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23388//TCC_PERFCOUNTER0_SELECT1
23389#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23390#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23391#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
23392#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
23393#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
23394#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23395#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
23396#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
23397//TCC_PERFCOUNTER1_SELECT
23398#define TCC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23399#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
23400#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23401#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
23402#define TCC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23403#define TCC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
23404#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
23405#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23406#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
23407#define TCC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23408//TCC_PERFCOUNTER1_SELECT1
23409#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
23410#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
23411#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
23412#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
23413#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
23414#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23415#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
23416#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
23417//TCC_PERFCOUNTER2_SELECT
23418#define TCC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
23419#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
23420#define TCC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
23421#define TCC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
23422#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
23423#define TCC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
23424//TCC_PERFCOUNTER3_SELECT
23425#define TCC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
23426#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
23427#define TCC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
23428#define TCC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
23429#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
23430#define TCC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
23431//TCA_PERFCOUNTER0_SELECT
23432#define TCA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23433#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23434#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23435#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23436#define TCA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23437#define TCA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
23438#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
23439#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23440#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23441#define TCA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23442//TCA_PERFCOUNTER0_SELECT1
23443#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23444#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23445#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
23446#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
23447#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
23448#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23449#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
23450#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
23451//TCA_PERFCOUNTER1_SELECT
23452#define TCA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23453#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
23454#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23455#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
23456#define TCA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23457#define TCA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
23458#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
23459#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23460#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
23461#define TCA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23462//TCA_PERFCOUNTER1_SELECT1
23463#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
23464#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
23465#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
23466#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
23467#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
23468#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23469#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
23470#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
23471//TCA_PERFCOUNTER2_SELECT
23472#define TCA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
23473#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
23474#define TCA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
23475#define TCA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
23476#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
23477#define TCA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
23478//TCA_PERFCOUNTER3_SELECT
23479#define TCA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
23480#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
23481#define TCA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
23482#define TCA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
23483#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
23484#define TCA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
23485//CB_PERFCOUNTER_FILTER
23486#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
23487#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
23488#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
23489#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
23490#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
23491#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
23492#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
23493#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
23494#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
23495#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
23496#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
23497#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
23498#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x00000001L
23499#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0x0000000EL
23500#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x00000010L
23501#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x000003E0L
23502#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x00000400L
23503#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x00000800L
23504#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x00001000L
23505#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0x0000E000L
23506#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x00020000L
23507#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x001C0000L
23508#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x00200000L
23509#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0x00C00000L
23510//CB_PERFCOUNTER0_SELECT
23511#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23512#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23513#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23514#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23515#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23516#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
23517#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
23518#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23519#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23520#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23521//CB_PERFCOUNTER0_SELECT1
23522#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23523#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23524#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
23525#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
23526#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
23527#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
23528#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
23529#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
23530//CB_PERFCOUNTER1_SELECT
23531#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23532#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23533#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
23534#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23535//CB_PERFCOUNTER2_SELECT
23536#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
23537#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
23538#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
23539#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
23540//CB_PERFCOUNTER3_SELECT
23541#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
23542#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
23543#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
23544#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
23545//DB_PERFCOUNTER0_SELECT
23546#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23547#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23548#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23549#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23550#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23551#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
23552#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
23553#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23554#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23555#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23556//DB_PERFCOUNTER0_SELECT1
23557#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23558#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23559#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
23560#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
23561#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
23562#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23563#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
23564#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
23565//DB_PERFCOUNTER1_SELECT
23566#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23567#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
23568#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
23569#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
23570#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23571#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
23572#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
23573#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
23574#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
23575#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23576//DB_PERFCOUNTER1_SELECT1
23577#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
23578#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
23579#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
23580#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
23581#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
23582#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
23583#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
23584#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
23585//DB_PERFCOUNTER2_SELECT
23586#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
23587#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
23588#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
23589#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
23590#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
23591#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
23592#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
23593#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
23594#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
23595#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
23596//DB_PERFCOUNTER3_SELECT
23597#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
23598#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
23599#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
23600#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
23601#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
23602#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
23603#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
23604#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
23605#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
23606#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
23607//RLC_SPM_PERFMON_CNTL
23608#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x2
23609#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
23610#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xe
23611#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
23612#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0x00000FFCL
23613#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x00003000L
23614#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0x0000C000L
23615#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xFFFF0000L
23616//RLC_SPM_PERFMON_RING_BASE_LO
23617#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
23618#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xFFFFFFFFL
23619//RLC_SPM_PERFMON_RING_BASE_HI
23620#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
23621#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
23622#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0x0000FFFFL
23623#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xFFFF0000L
23624//RLC_SPM_PERFMON_RING_SIZE
23625#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
23626#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xFFFFFFFFL
23627//RLC_SPM_PERFMON_SEGMENT_SIZE
23628#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE__SHIFT 0x0
23629#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1__SHIFT 0x8
23630#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE__SHIFT 0xb
23631#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE__SHIFT 0x10
23632#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE__SHIFT 0x15
23633#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE__SHIFT 0x1a
23634#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED__SHIFT 0x1f
23635#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE_MASK 0x000000FFL
23636#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1_MASK 0x00000700L
23637#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE_MASK 0x0000F800L
23638#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE_MASK 0x001F0000L
23639#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE_MASK 0x03E00000L
23640#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE_MASK 0x7C000000L
23641#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED_MASK 0x80000000L
23642//RLC_SPM_SE_MUXSEL_ADDR
23643#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
23644#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xFFFFFFFFL
23645//RLC_SPM_SE_MUXSEL_DATA
23646#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
23647#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
23648//RLC_SPM_CPG_PERFMON_SAMPLE_DELAY
23649#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23650#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23651#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23652#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23653//RLC_SPM_CPC_PERFMON_SAMPLE_DELAY
23654#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23655#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23656#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23657#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23658//RLC_SPM_CPF_PERFMON_SAMPLE_DELAY
23659#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23660#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23661#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23662#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23663//RLC_SPM_CB_PERFMON_SAMPLE_DELAY
23664#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23665#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23666#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23667#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23668//RLC_SPM_DB_PERFMON_SAMPLE_DELAY
23669#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23670#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23671#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23672#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23673//RLC_SPM_PA_PERFMON_SAMPLE_DELAY
23674#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23675#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23676#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23677#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23678//RLC_SPM_GDS_PERFMON_SAMPLE_DELAY
23679#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23680#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23681#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23682#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23683//RLC_SPM_IA_PERFMON_SAMPLE_DELAY
23684#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23685#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23686#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23687#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23688//RLC_SPM_SC_PERFMON_SAMPLE_DELAY
23689#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23690#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23691#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23692#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23693//RLC_SPM_TCC_PERFMON_SAMPLE_DELAY
23694#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23695#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23696#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23697#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23698//RLC_SPM_TCA_PERFMON_SAMPLE_DELAY
23699#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23700#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23701#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23702#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23703//RLC_SPM_TCP_PERFMON_SAMPLE_DELAY
23704#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23705#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23706#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23707#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23708//RLC_SPM_TA_PERFMON_SAMPLE_DELAY
23709#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23710#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23711#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23712#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23713//RLC_SPM_TD_PERFMON_SAMPLE_DELAY
23714#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23715#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23716#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23717#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23718//RLC_SPM_VGT_PERFMON_SAMPLE_DELAY
23719#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23720#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23721#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23722#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23723//RLC_SPM_SPI_PERFMON_SAMPLE_DELAY
23724#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23725#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23726#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23727#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23728//RLC_SPM_SQG_PERFMON_SAMPLE_DELAY
23729#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23730#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23731#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23732#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23733//RLC_SPM_SX_PERFMON_SAMPLE_DELAY
23734#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23735#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23736#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23737#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23738//RLC_SPM_GLOBAL_MUXSEL_ADDR
23739#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
23740#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xFFFFFFFFL
23741//RLC_SPM_GLOBAL_MUXSEL_DATA
23742#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
23743#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
23744//RLC_SPM_RING_RDPTR
23745#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
23746#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xFFFFFFFFL
23747//RLC_SPM_SEGMENT_THRESHOLD
23748#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
23749#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0xFFFFFFFFL
23750//RLC_SPM_RMI_PERFMON_SAMPLE_DELAY
23751#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
23752#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
23753#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
23754#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
23755//RLC_PERFMON_CLK_CNTL
23756#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE__SHIFT 0x0
23757#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE_MASK 0x00000001L
23758//RLC_PERFMON_CNTL
23759#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
23760#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
23761#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
23762#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
23763//RLC_PERFCOUNTER0_SELECT
23764#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23765#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
23766//RLC_PERFCOUNTER1_SELECT
23767#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
23768#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
23769//RLC_GPU_IOV_PERF_CNT_CNTL
23770#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE__SHIFT 0x0
23771#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT__SHIFT 0x1
23772#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET__SHIFT 0x2
23773#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED__SHIFT 0x3
23774#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE_MASK 0x00000001L
23775#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT_MASK 0x00000002L
23776#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET_MASK 0x00000004L
23777#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED_MASK 0xFFFFFFF8L
23778//RLC_GPU_IOV_PERF_CNT_WR_ADDR
23779#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID__SHIFT 0x0
23780#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID__SHIFT 0x4
23781#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED__SHIFT 0x6
23782#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID_MASK 0x0000000FL
23783#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID_MASK 0x00000030L
23784#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED_MASK 0xFFFFFFC0L
23785//RLC_GPU_IOV_PERF_CNT_WR_DATA
23786#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA__SHIFT 0x0
23787#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA_MASK 0x0000000FL
23788//RLC_GPU_IOV_PERF_CNT_RD_ADDR
23789#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID__SHIFT 0x0
23790#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID__SHIFT 0x4
23791#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED__SHIFT 0x6
23792#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID_MASK 0x0000000FL
23793#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID_MASK 0x00000030L
23794#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED_MASK 0xFFFFFFC0L
23795//RLC_GPU_IOV_PERF_CNT_RD_DATA
23796#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA__SHIFT 0x0
23797#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA_MASK 0x0000000FL
23798//RMI_PERFCOUNTER0_SELECT
23799#define RMI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
23800#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
23801#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
23802#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
23803#define RMI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
23804#define RMI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
23805#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
23806#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
23807#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
23808#define RMI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
23809//RMI_PERFCOUNTER0_SELECT1
23810#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
23811#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
23812#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
23813#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
23814#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
23815#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
23816#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
23817#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
23818//RMI_PERFCOUNTER1_SELECT
23819#define RMI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
23820#define RMI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
23821#define RMI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
23822#define RMI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
23823//RMI_PERFCOUNTER2_SELECT
23824#define RMI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
23825#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
23826#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
23827#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
23828#define RMI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
23829#define RMI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
23830#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x0007FC00L
23831#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
23832#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
23833#define RMI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
23834//RMI_PERFCOUNTER2_SELECT1
23835#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
23836#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
23837#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
23838#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
23839#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000001FFL
23840#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x0007FC00L
23841#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
23842#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
23843//RMI_PERFCOUNTER3_SELECT
23844#define RMI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
23845#define RMI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
23846#define RMI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
23847#define RMI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
23848//RMI_PERF_COUNTER_CNTL
23849#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL__SHIFT 0x0
23850#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL__SHIFT 0x2
23851#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL__SHIFT 0x4
23852#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0__SHIFT 0x6
23853#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1__SHIFT 0x8
23854#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID__SHIFT 0xa
23855#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID__SHIFT 0xe
23856#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD__SHIFT 0x13
23857#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET__SHIFT 0x19
23858#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL__SHIFT 0x1a
23859#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL_MASK 0x00000003L
23860#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL_MASK 0x0000000CL
23861#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL_MASK 0x00000030L
23862#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0_MASK 0x000000C0L
23863#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1_MASK 0x00000300L
23864#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID_MASK 0x00003C00L
23865#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID_MASK 0x0007C000L
23866#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD_MASK 0x01F80000L
23867#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET_MASK 0x02000000L
23868#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL_MASK 0x04000000L
23869
23870
23871// addressBlock: gc_utcl2_atcl2pfcntldec
23872//ATC_L2_PERFCOUNTER0_CFG
23873#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
23874#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
23875#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
23876#define ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
23877#define ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
23878#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
23879#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
23880#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
23881#define ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
23882#define ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
23883//ATC_L2_PERFCOUNTER1_CFG
23884#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
23885#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
23886#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
23887#define ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
23888#define ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
23889#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
23890#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
23891#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
23892#define ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
23893#define ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
23894//ATC_L2_PERFCOUNTER_RSLT_CNTL
23895#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
23896#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
23897#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
23898#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
23899#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
23900#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
23901#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
23902#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
23903#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
23904#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
23905#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
23906#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
23907
23908
23909// addressBlock: gc_utcl2_vml2pldec
23910//MC_VM_L2_PERFCOUNTER0_CFG
23911#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
23912#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
23913#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
23914#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
23915#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
23916#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
23917#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
23918#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
23919#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
23920#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
23921//MC_VM_L2_PERFCOUNTER1_CFG
23922#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
23923#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
23924#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
23925#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
23926#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
23927#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
23928#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
23929#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
23930#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
23931#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
23932//MC_VM_L2_PERFCOUNTER2_CFG
23933#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
23934#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
23935#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
23936#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
23937#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
23938#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
23939#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
23940#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
23941#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
23942#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
23943//MC_VM_L2_PERFCOUNTER3_CFG
23944#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
23945#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
23946#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
23947#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
23948#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
23949#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
23950#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
23951#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
23952#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
23953#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
23954//MC_VM_L2_PERFCOUNTER4_CFG
23955#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
23956#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
23957#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
23958#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
23959#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
23960#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
23961#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
23962#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
23963#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
23964#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
23965//MC_VM_L2_PERFCOUNTER5_CFG
23966#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
23967#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
23968#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
23969#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
23970#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
23971#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
23972#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
23973#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
23974#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
23975#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
23976//MC_VM_L2_PERFCOUNTER6_CFG
23977#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
23978#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
23979#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
23980#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
23981#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
23982#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
23983#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
23984#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
23985#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
23986#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
23987//MC_VM_L2_PERFCOUNTER7_CFG
23988#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
23989#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
23990#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
23991#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
23992#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
23993#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
23994#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
23995#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
23996#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
23997#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
23998//MC_VM_L2_PERFCOUNTER_RSLT_CNTL
23999#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
24000#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
24001#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
24002#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
24003#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
24004#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
24005#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
24006#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
24007#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
24008#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
24009#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
24010#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
24011
24012
24013// addressBlock: gc_rlcpdec
24014//RLC_CNTL
24015#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
24016#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
24017#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
24018#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
24019#define RLC_CNTL__RESERVED__SHIFT 0x4
24020#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
24021#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
24022#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
24023#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
24024#define RLC_CNTL__RESERVED_MASK 0xFFFFFFF0L
24025//RLC_STAT
24026#define RLC_STAT__RLC_BUSY__SHIFT 0x0
24027#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x1
24028#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x2
24029#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x3
24030#define RLC_STAT__MC_BUSY__SHIFT 0x4
24031#define RLC_STAT__RLC_THREAD_0_BUSY__SHIFT 0x5
24032#define RLC_STAT__RLC_THREAD_1_BUSY__SHIFT 0x6
24033#define RLC_STAT__RLC_THREAD_2_BUSY__SHIFT 0x7
24034#define RLC_STAT__RESERVED__SHIFT 0x8
24035#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
24036#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000002L
24037#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000004L
24038#define RLC_STAT__RLC_SRM_BUSY_MASK 0x00000008L
24039#define RLC_STAT__MC_BUSY_MASK 0x00000010L
24040#define RLC_STAT__RLC_THREAD_0_BUSY_MASK 0x00000020L
24041#define RLC_STAT__RLC_THREAD_1_BUSY_MASK 0x00000040L
24042#define RLC_STAT__RLC_THREAD_2_BUSY_MASK 0x00000080L
24043#define RLC_STAT__RESERVED_MASK 0xFFFFFF00L
24044//RLC_SAFE_MODE
24045#define RLC_SAFE_MODE__CMD__SHIFT 0x0
24046#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
24047#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
24048#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
24049#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
24050#define RLC_SAFE_MODE__CMD_MASK 0x00000001L
24051#define RLC_SAFE_MODE__MESSAGE_MASK 0x0000001EL
24052#define RLC_SAFE_MODE__RESERVED1_MASK 0x000000E0L
24053#define RLC_SAFE_MODE__RESPONSE_MASK 0x00000F00L
24054#define RLC_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
24055//RLC_MEM_SLP_CNTL
24056#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
24057#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
24058#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
24059#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
24060#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
24061#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
24062#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
24063#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
24064#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
24065#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
24066#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
24067#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000FF00L
24068#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
24069#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
24070//SMU_RLC_RESPONSE
24071#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
24072#define SMU_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
24073//RLC_RLCV_SAFE_MODE
24074#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
24075#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
24076#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
24077#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
24078#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
24079#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x00000001L
24080#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x0000001EL
24081#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0x000000E0L
24082#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0x00000F00L
24083#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
24084//RLC_SMU_SAFE_MODE
24085#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
24086#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
24087#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
24088#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
24089#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
24090#define RLC_SMU_SAFE_MODE__CMD_MASK 0x00000001L
24091#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x0000001EL
24092#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0x000000E0L
24093#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0x00000F00L
24094#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
24095//RLC_RLCV_COMMAND
24096#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
24097#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
24098#define RLC_RLCV_COMMAND__CMD_MASK 0x0000000FL
24099#define RLC_RLCV_COMMAND__RESERVED_MASK 0xFFFFFFF0L
24100//RLC_REFCLOCK_TIMESTAMP_LSB
24101#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB__SHIFT 0x0
24102#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB_MASK 0xFFFFFFFFL
24103//RLC_REFCLOCK_TIMESTAMP_MSB
24104#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB__SHIFT 0x0
24105#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB_MASK 0xFFFFFFFFL
24106//RLC_GPM_TIMER_INT_0
24107#define RLC_GPM_TIMER_INT_0__TIMER__SHIFT 0x0
24108#define RLC_GPM_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
24109//RLC_GPM_TIMER_INT_1
24110#define RLC_GPM_TIMER_INT_1__TIMER__SHIFT 0x0
24111#define RLC_GPM_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
24112//RLC_GPM_TIMER_INT_2
24113#define RLC_GPM_TIMER_INT_2__TIMER__SHIFT 0x0
24114#define RLC_GPM_TIMER_INT_2__TIMER_MASK 0xFFFFFFFFL
24115//RLC_GPM_TIMER_CTRL
24116#define RLC_GPM_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
24117#define RLC_GPM_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
24118#define RLC_GPM_TIMER_CTRL__TIMER_2_EN__SHIFT 0x2
24119#define RLC_GPM_TIMER_CTRL__TIMER_3_EN__SHIFT 0x3
24120#define RLC_GPM_TIMER_CTRL__RESERVED__SHIFT 0x4
24121#define RLC_GPM_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
24122#define RLC_GPM_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
24123#define RLC_GPM_TIMER_CTRL__TIMER_2_EN_MASK 0x00000004L
24124#define RLC_GPM_TIMER_CTRL__TIMER_3_EN_MASK 0x00000008L
24125#define RLC_GPM_TIMER_CTRL__RESERVED_MASK 0xFFFFFFF0L
24126//RLC_LB_CNTR_MAX
24127#define RLC_LB_CNTR_MAX__LB_CNTR_MAX__SHIFT 0x0
24128#define RLC_LB_CNTR_MAX__LB_CNTR_MAX_MASK 0xFFFFFFFFL
24129//RLC_GPM_TIMER_STAT
24130#define RLC_GPM_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
24131#define RLC_GPM_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
24132#define RLC_GPM_TIMER_STAT__TIMER_2_STAT__SHIFT 0x2
24133#define RLC_GPM_TIMER_STAT__TIMER_3_STAT__SHIFT 0x3
24134#define RLC_GPM_TIMER_STAT__RESERVED__SHIFT 0x4
24135#define RLC_GPM_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
24136#define RLC_GPM_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
24137#define RLC_GPM_TIMER_STAT__TIMER_2_STAT_MASK 0x00000004L
24138#define RLC_GPM_TIMER_STAT__TIMER_3_STAT_MASK 0x00000008L
24139#define RLC_GPM_TIMER_STAT__RESERVED_MASK 0xFFFFFFF0L
24140//RLC_GPM_TIMER_INT_3
24141#define RLC_GPM_TIMER_INT_3__TIMER__SHIFT 0x0
24142#define RLC_GPM_TIMER_INT_3__TIMER_MASK 0xFFFFFFFFL
24143//RLC_SERDES_WR_NONCU_MASTER_MASK_1
24144#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1__SHIFT 0x0
24145#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1__SHIFT 0x10
24146#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1__SHIFT 0x11
24147#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK__SHIFT 0x12
24148#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1__SHIFT 0x13
24149#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK__SHIFT 0x14
24150#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK__SHIFT 0x15
24151#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK__SHIFT 0x16
24152#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK__SHIFT 0x17
24153#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK__SHIFT 0x18
24154#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED__SHIFT 0x19
24155#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1_MASK 0x0000FFFFL
24156#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1_MASK 0x00010000L
24157#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1_MASK 0x00020000L
24158#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK_MASK 0x00040000L
24159#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1_MASK 0x00080000L
24160#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK_MASK 0x00100000L
24161#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK_MASK 0x00200000L
24162#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK_MASK 0x00400000L
24163#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK_MASK 0x00800000L
24164#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK_MASK 0x01000000L
24165#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_MASK 0xFE000000L
24166//RLC_SERDES_NONCU_MASTER_BUSY_1
24167#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1__SHIFT 0x0
24168#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1__SHIFT 0x10
24169#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1__SHIFT 0x11
24170#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1__SHIFT 0x12
24171#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1__SHIFT 0x13
24172#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY__SHIFT 0x14
24173#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY__SHIFT 0x15
24174#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY__SHIFT 0x16
24175#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY__SHIFT 0x17
24176#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY__SHIFT 0x18
24177#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED__SHIFT 0x19
24178#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1_MASK 0x0000FFFFL
24179#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1_MASK 0x00010000L
24180#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1_MASK 0x00020000L
24181#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1_MASK 0x00040000L
24182#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1_MASK 0x00080000L
24183#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY_MASK 0x00100000L
24184#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY_MASK 0x00200000L
24185#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY_MASK 0x00400000L
24186#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY_MASK 0x00800000L
24187#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY_MASK 0x01000000L
24188#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_MASK 0xFE000000L
24189//RLC_INT_STAT
24190#define RLC_INT_STAT__LAST_CP_RLC_INT_ID__SHIFT 0x0
24191#define RLC_INT_STAT__CP_RLC_INT_PENDING__SHIFT 0x8
24192#define RLC_INT_STAT__RESERVED__SHIFT 0x9
24193#define RLC_INT_STAT__LAST_CP_RLC_INT_ID_MASK 0x000000FFL
24194#define RLC_INT_STAT__CP_RLC_INT_PENDING_MASK 0x00000100L
24195#define RLC_INT_STAT__RESERVED_MASK 0xFFFFFE00L
24196//RLC_LB_CNTL
24197#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE__SHIFT 0x0
24198#define RLC_LB_CNTL__LB_CNT_CP_BUSY__SHIFT 0x1
24199#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE__SHIFT 0x2
24200#define RLC_LB_CNTL__LB_CNT_REG_INC__SHIFT 0x3
24201#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST__SHIFT 0x4
24202#define RLC_LB_CNTL__RESERVED__SHIFT 0xc
24203#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK 0x00000001L
24204#define RLC_LB_CNTL__LB_CNT_CP_BUSY_MASK 0x00000002L
24205#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK 0x00000004L
24206#define RLC_LB_CNTL__LB_CNT_REG_INC_MASK 0x00000008L
24207#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST_MASK 0x00000FF0L
24208#define RLC_LB_CNTL__RESERVED_MASK 0xFFFFF000L
24209//RLC_MGCG_CTRL
24210#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
24211#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
24212#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
24213#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
24214#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
24215#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL__SHIFT 0xf
24216#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x10
24217#define RLC_MGCG_CTRL__SPARE__SHIFT 0x11
24218#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x00000001L
24219#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x00000002L
24220#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x00000004L
24221#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x00000078L
24222#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x00007F80L
24223#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL_MASK 0x00008000L
24224#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL_MASK 0x00010000L
24225#define RLC_MGCG_CTRL__SPARE_MASK 0xFFFE0000L
24226//RLC_LB_CNTR_INIT
24227#define RLC_LB_CNTR_INIT__LB_CNTR_INIT__SHIFT 0x0
24228#define RLC_LB_CNTR_INIT__LB_CNTR_INIT_MASK 0xFFFFFFFFL
24229//RLC_LOAD_BALANCE_CNTR
24230#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR__SHIFT 0x0
24231#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR_MASK 0xFFFFFFFFL
24232//RLC_JUMP_TABLE_RESTORE
24233#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
24234#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xFFFFFFFFL
24235//RLC_PG_DELAY_2
24236#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
24237#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
24238#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE__SHIFT 0x10
24239#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0x000000FFL
24240#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0x0000FF00L
24241#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE_MASK 0xFFFF0000L
24242//RLC_GPU_CLOCK_COUNT_LSB
24243#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
24244#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
24245//RLC_GPU_CLOCK_COUNT_MSB
24246#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
24247#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
24248//RLC_CAPTURE_GPU_CLOCK_COUNT
24249#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
24250#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
24251#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
24252#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xFFFFFFFEL
24253//RLC_UCODE_CNTL
24254#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
24255#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xFFFFFFFFL
24256//RLC_GPM_THREAD_RESET
24257#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
24258#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
24259#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
24260#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
24261#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
24262#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x00000001L
24263#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x00000002L
24264#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x00000004L
24265#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x00000008L
24266#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xFFFFFFF0L
24267//RLC_GPM_CP_DMA_COMPLETE_T0
24268#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA__SHIFT 0x0
24269#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED__SHIFT 0x1
24270#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA_MASK 0x00000001L
24271#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED_MASK 0xFFFFFFFEL
24272//RLC_GPM_CP_DMA_COMPLETE_T1
24273#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA__SHIFT 0x0
24274#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED__SHIFT 0x1
24275#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA_MASK 0x00000001L
24276#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED_MASK 0xFFFFFFFEL
24277//RLC_FIREWALL_VIOLATION
24278#define RLC_FIREWALL_VIOLATION__ADDR__SHIFT 0x0
24279#define RLC_FIREWALL_VIOLATION__ADDR_MASK 0xFFFFFFFFL
24280//RLC_GPM_STAT
24281#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
24282#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
24283#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
24284#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
24285#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
24286#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
24287#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
24288#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
24289#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
24290#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
24291#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
24292#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
24293#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
24294#define RLC_GPM_STAT__STATIC_CU_POWERING_UP__SHIFT 0xd
24295#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN__SHIFT 0xe
24296#define RLC_GPM_STAT__DYN_CU_POWERING_UP__SHIFT 0xf
24297#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
24298#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
24299#define RLC_GPM_STAT__CMP_power_status__SHIFT 0x12
24300#define RLC_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
24301#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
24302#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
24303#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
24304#define RLC_GPM_STAT__RESERVED__SHIFT 0x17
24305#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
24306#define RLC_GPM_STAT__RLC_BUSY_MASK 0x00000001L
24307#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
24308#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
24309#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
24310#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
24311#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
24312#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
24313#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
24314#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
24315#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
24316#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
24317#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
24318#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
24319#define RLC_GPM_STAT__STATIC_CU_POWERING_UP_MASK 0x00002000L
24320#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN_MASK 0x00004000L
24321#define RLC_GPM_STAT__DYN_CU_POWERING_UP_MASK 0x00008000L
24322#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN_MASK 0x00010000L
24323#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
24324#define RLC_GPM_STAT__CMP_power_status_MASK 0x00040000L
24325#define RLC_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
24326#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
24327#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
24328#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
24329#define RLC_GPM_STAT__RESERVED_MASK 0x00800000L
24330#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
24331//RLC_GPU_CLOCK_32_RES_SEL
24332#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
24333#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
24334#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003FL
24335#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xFFFFFFC0L
24336//RLC_GPU_CLOCK_32
24337#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
24338#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xFFFFFFFFL
24339//RLC_PG_CNTL
24340#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
24341#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
24342#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE__SHIFT 0x2
24343#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE__SHIFT 0x3
24344#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
24345#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
24346#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
24347#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
24348#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
24349#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
24350#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
24351#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
24352#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
24353#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
24354#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
24355#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK 0x00000004L
24356#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK 0x00000008L
24357#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x00000010L
24358#define RLC_PG_CNTL__RESERVED_MASK 0x00003FE0L
24359#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x00004000L
24360#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x00008000L
24361#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
24362#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
24363#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
24364#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x00080000L
24365#define RLC_PG_CNTL__RESERVED1_MASK 0x00F00000L
24366//RLC_GPM_THREAD_PRIORITY
24367#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
24368#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
24369#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
24370#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
24371#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0x000000FFL
24372#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0x0000FF00L
24373#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0x00FF0000L
24374#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xFF000000L
24375//RLC_GPM_THREAD_ENABLE
24376#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
24377#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
24378#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
24379#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
24380#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
24381#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x00000001L
24382#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x00000002L
24383#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x00000004L
24384#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x00000008L
24385#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xFFFFFFF0L
24386//RLC_CGTT_MGCG_OVERRIDE
24387#define RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE__SHIFT 0x0
24388#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
24389#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
24390#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
24391#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
24392#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
24393#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
24394#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
24395#define RLC_CGTT_MGCG_OVERRIDE__RESERVED__SHIFT 0x8
24396#define RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK 0x00000001L
24397#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
24398#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
24399#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
24400#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
24401#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
24402#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
24403#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
24404#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_MASK 0xFFFFFF00L
24405//RLC_CGCG_CGLS_CTRL
24406#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
24407#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
24408#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
24409#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
24410#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
24411#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
24412#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
24413#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
24414#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
24415#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
24416#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
24417#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
24418#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
24419#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
24420#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
24421#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000L
24422//RLC_CGCG_RAMP_CTRL
24423#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
24424#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
24425#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
24426#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
24427#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
24428#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
24429#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000FL
24430#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
24431#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000F00L
24432#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000F000L
24433#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0FFF0000L
24434#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xF0000000L
24435//RLC_DYN_PG_STATUS
24436#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
24437#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
24438//RLC_DYN_PG_REQUEST
24439#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK__SHIFT 0x0
24440#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK_MASK 0xFFFFFFFFL
24441//RLC_PG_DELAY
24442#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
24443#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
24444#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
24445#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
24446#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0x000000FFL
24447#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0x0000FF00L
24448#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0x00FF0000L
24449#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xFF000000L
24450//RLC_CU_STATUS
24451#define RLC_CU_STATUS__WORK_PENDING__SHIFT 0x0
24452#define RLC_CU_STATUS__WORK_PENDING_MASK 0xFFFFFFFFL
24453//RLC_LB_INIT_CU_MASK
24454#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK__SHIFT 0x0
24455#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK_MASK 0xFFFFFFFFL
24456//RLC_LB_ALWAYS_ACTIVE_CU_MASK
24457#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK__SHIFT 0x0
24458#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK_MASK 0xFFFFFFFFL
24459//RLC_LB_PARAMS
24460#define RLC_LB_PARAMS__SKIP_L2_CHECK__SHIFT 0x0
24461#define RLC_LB_PARAMS__FIFO_SAMPLES__SHIFT 0x1
24462#define RLC_LB_PARAMS__PG_IDLE_SAMPLES__SHIFT 0x8
24463#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL__SHIFT 0x10
24464#define RLC_LB_PARAMS__SKIP_L2_CHECK_MASK 0x00000001L
24465#define RLC_LB_PARAMS__FIFO_SAMPLES_MASK 0x000000FEL
24466#define RLC_LB_PARAMS__PG_IDLE_SAMPLES_MASK 0x0000FF00L
24467#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL_MASK 0xFFFF0000L
24468//RLC_THREAD1_DELAY
24469#define RLC_THREAD1_DELAY__CU_IDEL_DELAY__SHIFT 0x0
24470#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY__SHIFT 0x8
24471#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY__SHIFT 0x10
24472#define RLC_THREAD1_DELAY__SPARE__SHIFT 0x18
24473#define RLC_THREAD1_DELAY__CU_IDEL_DELAY_MASK 0x000000FFL
24474#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY_MASK 0x0000FF00L
24475#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY_MASK 0x00FF0000L
24476#define RLC_THREAD1_DELAY__SPARE_MASK 0xFF000000L
24477//RLC_PG_ALWAYS_ON_CU_MASK
24478#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK__SHIFT 0x0
24479#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK_MASK 0xFFFFFFFFL
24480//RLC_MAX_PG_CU
24481#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT 0x0
24482#define RLC_MAX_PG_CU__SPARE__SHIFT 0x8
24483#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK 0x000000FFL
24484#define RLC_MAX_PG_CU__SPARE_MASK 0xFFFFFF00L
24485//RLC_AUTO_PG_CTRL
24486#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
24487#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
24488#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
24489#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
24490#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
24491#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
24492#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
24493#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
24494#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007FFF8L
24495#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xFFF80000L
24496//RLC_SMU_GRBM_REG_SAVE_CTRL
24497#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE__SHIFT 0x0
24498#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE__SHIFT 0x1
24499#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE_MASK 0x00000001L
24500#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE_MASK 0xFFFFFFFEL
24501//RLC_SERDES_RD_MASTER_INDEX
24502#define RLC_SERDES_RD_MASTER_INDEX__CU_ID__SHIFT 0x0
24503#define RLC_SERDES_RD_MASTER_INDEX__SH_ID__SHIFT 0x4
24504#define RLC_SERDES_RD_MASTER_INDEX__SE_ID__SHIFT 0x6
24505#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID__SHIFT 0x9
24506#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU__SHIFT 0xc
24507#define RLC_SERDES_RD_MASTER_INDEX__NON_SE__SHIFT 0xd
24508#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID__SHIFT 0x11
24509#define RLC_SERDES_RD_MASTER_INDEX__SPARE__SHIFT 0x13
24510#define RLC_SERDES_RD_MASTER_INDEX__CU_ID_MASK 0x0000000FL
24511#define RLC_SERDES_RD_MASTER_INDEX__SH_ID_MASK 0x00000030L
24512#define RLC_SERDES_RD_MASTER_INDEX__SE_ID_MASK 0x000001C0L
24513#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID_MASK 0x00000E00L
24514#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_MASK 0x00001000L
24515#define RLC_SERDES_RD_MASTER_INDEX__NON_SE_MASK 0x0001E000L
24516#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID_MASK 0x00060000L
24517#define RLC_SERDES_RD_MASTER_INDEX__SPARE_MASK 0xFFF80000L
24518//RLC_SERDES_RD_DATA_0
24519#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
24520#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xFFFFFFFFL
24521//RLC_SERDES_RD_DATA_1
24522#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
24523#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xFFFFFFFFL
24524//RLC_SERDES_RD_DATA_2
24525#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
24526#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xFFFFFFFFL
24527//RLC_SERDES_WR_CU_MASTER_MASK
24528#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK__SHIFT 0x0
24529#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK_MASK 0xFFFFFFFFL
24530//RLC_SERDES_WR_NONCU_MASTER_MASK
24531#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK__SHIFT 0x0
24532#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK__SHIFT 0x10
24533#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK__SHIFT 0x11
24534#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK__SHIFT 0x12
24535#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK__SHIFT 0x13
24536#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK__SHIFT 0x14
24537#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK__SHIFT 0x15
24538#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK__SHIFT 0x16
24539#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK__SHIFT 0x17
24540#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK__SHIFT 0x18
24541#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK__SHIFT 0x19
24542#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED__SHIFT 0x1a
24543#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK_MASK 0x0000FFFFL
24544#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK_MASK 0x00010000L
24545#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK_MASK 0x00020000L
24546#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK_MASK 0x00040000L
24547#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK_MASK 0x00080000L
24548#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK_MASK 0x00100000L
24549#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK_MASK 0x00200000L
24550#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK_MASK 0x00400000L
24551#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK_MASK 0x00800000L
24552#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK_MASK 0x01000000L
24553#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK_MASK 0x02000000L
24554#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED_MASK 0xFC000000L
24555//RLC_SERDES_WR_CTRL
24556#define RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT 0x0
24557#define RLC_SERDES_WR_CTRL__POWER_DOWN__SHIFT 0x8
24558#define RLC_SERDES_WR_CTRL__POWER_UP__SHIFT 0x9
24559#define RLC_SERDES_WR_CTRL__P1_SELECT__SHIFT 0xa
24560#define RLC_SERDES_WR_CTRL__P2_SELECT__SHIFT 0xb
24561#define RLC_SERDES_WR_CTRL__WRITE_COMMAND__SHIFT 0xc
24562#define RLC_SERDES_WR_CTRL__READ_COMMAND__SHIFT 0xd
24563#define RLC_SERDES_WR_CTRL__RDDATA_RESET__SHIFT 0xe
24564#define RLC_SERDES_WR_CTRL__SHORT_FORMAT__SHIFT 0xf
24565#define RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT 0x10
24566#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE__SHIFT 0x1a
24567#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR__SHIFT 0x1b
24568#define RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT 0x1c
24569#define RLC_SERDES_WR_CTRL__BPM_ADDR_MASK 0x000000FFL
24570#define RLC_SERDES_WR_CTRL__POWER_DOWN_MASK 0x00000100L
24571#define RLC_SERDES_WR_CTRL__POWER_UP_MASK 0x00000200L
24572#define RLC_SERDES_WR_CTRL__P1_SELECT_MASK 0x00000400L
24573#define RLC_SERDES_WR_CTRL__P2_SELECT_MASK 0x00000800L
24574#define RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK 0x00001000L
24575#define RLC_SERDES_WR_CTRL__READ_COMMAND_MASK 0x00002000L
24576#define RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK 0x00004000L
24577#define RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK 0x00008000L
24578#define RLC_SERDES_WR_CTRL__BPM_DATA_MASK 0x03FF0000L
24579#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK 0x04000000L
24580#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK 0x08000000L
24581#define RLC_SERDES_WR_CTRL__REG_ADDR_MASK 0xF0000000L
24582//RLC_SERDES_WR_DATA
24583#define RLC_SERDES_WR_DATA__DATA__SHIFT 0x0
24584#define RLC_SERDES_WR_DATA__DATA_MASK 0xFFFFFFFFL
24585//RLC_SERDES_CU_MASTER_BUSY
24586#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY__SHIFT 0x0
24587#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY_MASK 0xFFFFFFFFL
24588//RLC_SERDES_NONCU_MASTER_BUSY
24589#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY__SHIFT 0x0
24590#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY__SHIFT 0x10
24591#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY__SHIFT 0x11
24592#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY__SHIFT 0x12
24593#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY__SHIFT 0x13
24594#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY__SHIFT 0x14
24595#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY__SHIFT 0x15
24596#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY__SHIFT 0x16
24597#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY__SHIFT 0x17
24598#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY__SHIFT 0x18
24599#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY__SHIFT 0x19
24600#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED__SHIFT 0x1a
24601#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK 0x0000FFFFL
24602#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK 0x00010000L
24603#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY_MASK 0x00020000L
24604#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK 0x00040000L
24605#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK 0x00080000L
24606#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY_MASK 0x00100000L
24607#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY_MASK 0x00200000L
24608#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY_MASK 0x00400000L
24609#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY_MASK 0x00800000L
24610#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY_MASK 0x01000000L
24611#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY_MASK 0x02000000L
24612#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED_MASK 0xFC000000L
24613//RLC_GPM_GENERAL_0
24614#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
24615#define RLC_GPM_GENERAL_0__DATA_MASK 0xFFFFFFFFL
24616//RLC_GPM_GENERAL_1
24617#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
24618#define RLC_GPM_GENERAL_1__DATA_MASK 0xFFFFFFFFL
24619//RLC_GPM_GENERAL_2
24620#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
24621#define RLC_GPM_GENERAL_2__DATA_MASK 0xFFFFFFFFL
24622//RLC_GPM_GENERAL_3
24623#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
24624#define RLC_GPM_GENERAL_3__DATA_MASK 0xFFFFFFFFL
24625//RLC_GPM_GENERAL_4
24626#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
24627#define RLC_GPM_GENERAL_4__DATA_MASK 0xFFFFFFFFL
24628//RLC_GPM_GENERAL_5
24629#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
24630#define RLC_GPM_GENERAL_5__DATA_MASK 0xFFFFFFFFL
24631//RLC_GPM_GENERAL_6
24632#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
24633#define RLC_GPM_GENERAL_6__DATA_MASK 0xFFFFFFFFL
24634//RLC_GPM_GENERAL_7
24635#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
24636#define RLC_GPM_GENERAL_7__DATA_MASK 0xFFFFFFFFL
24637//RLC_GPM_SCRATCH_ADDR
24638#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
24639#define RLC_GPM_SCRATCH_ADDR__RESERVED__SHIFT 0x9
24640#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
24641#define RLC_GPM_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
24642//RLC_GPM_SCRATCH_DATA
24643#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
24644#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
24645//RLC_STATIC_PG_STATUS
24646#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
24647#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
24648//RLC_SPM_MC_CNTL
24649#define RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT 0x0
24650#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY__SHIFT 0x4
24651#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR__SHIFT 0x5
24652#define RLC_SPM_MC_CNTL__RLC_SPM_FED__SHIFT 0x6
24653#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER__SHIFT 0x7
24654#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE__SHIFT 0x8
24655#define RLC_SPM_MC_CNTL__RESERVED__SHIFT 0xa
24656#define RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK 0x0000000FL
24657#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY_MASK 0x00000010L
24658#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR_MASK 0x00000020L
24659#define RLC_SPM_MC_CNTL__RLC_SPM_FED_MASK 0x00000040L
24660#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER_MASK 0x00000080L
24661#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_MASK 0x00000300L
24662#define RLC_SPM_MC_CNTL__RESERVED_MASK 0xFFFFFC00L
24663//RLC_SPM_INT_CNTL
24664#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
24665#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
24666#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x00000001L
24667#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xFFFFFFFEL
24668//RLC_SPM_INT_STATUS
24669#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
24670#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
24671#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x00000001L
24672#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xFFFFFFFEL
24673//RLC_SMU_MESSAGE
24674#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
24675#define RLC_SMU_MESSAGE__CMD_MASK 0xFFFFFFFFL
24676//RLC_GPM_LOG_SIZE
24677#define RLC_GPM_LOG_SIZE__SIZE__SHIFT 0x0
24678#define RLC_GPM_LOG_SIZE__SIZE_MASK 0xFFFFFFFFL
24679//RLC_PG_DELAY_3
24680#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
24681#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
24682#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0x000000FFL
24683#define RLC_PG_DELAY_3__RESERVED_MASK 0xFFFFFF00L
24684//RLC_GPR_REG1
24685#define RLC_GPR_REG1__DATA__SHIFT 0x0
24686#define RLC_GPR_REG1__DATA_MASK 0xFFFFFFFFL
24687//RLC_GPR_REG2
24688#define RLC_GPR_REG2__DATA__SHIFT 0x0
24689#define RLC_GPR_REG2__DATA_MASK 0xFFFFFFFFL
24690//RLC_GPM_LOG_CONT
24691#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
24692#define RLC_GPM_LOG_CONT__CONT_MASK 0xFFFFFFFFL
24693//RLC_GPM_INT_DISABLE_TH0
24694#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
24695#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xFFFFFFFFL
24696//RLC_GPM_INT_DISABLE_TH1
24697#define RLC_GPM_INT_DISABLE_TH1__DISABLE__SHIFT 0x0
24698#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xFFFFFFFFL
24699//RLC_GPM_INT_FORCE_TH0
24700#define RLC_GPM_INT_FORCE_TH0__FORCE__SHIFT 0x0
24701#define RLC_GPM_INT_FORCE_TH0__FORCE_MASK 0xFFFFFFFFL
24702//RLC_GPM_INT_FORCE_TH1
24703#define RLC_GPM_INT_FORCE_TH1__FORCE__SHIFT 0x0
24704#define RLC_GPM_INT_FORCE_TH1__FORCE_MASK 0xFFFFFFFFL
24705//RLC_SRM_CNTL
24706#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
24707#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
24708#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
24709#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x00000001L
24710#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x00000002L
24711#define RLC_SRM_CNTL__RESERVED_MASK 0xFFFFFFFCL
24712//RLC_SRM_ARAM_ADDR
24713#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
24714#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xc
24715#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x00000FFFL
24716#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xFFFFF000L
24717//RLC_SRM_ARAM_DATA
24718#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
24719#define RLC_SRM_ARAM_DATA__DATA_MASK 0xFFFFFFFFL
24720//RLC_SRM_DRAM_ADDR
24721#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
24722#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xc
24723#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x00000FFFL
24724#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xFFFFF000L
24725//RLC_SRM_DRAM_DATA
24726#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
24727#define RLC_SRM_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
24728//RLC_SRM_GPM_COMMAND
24729#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
24730#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
24731#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
24732#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
24733#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x11
24734#define RLC_SRM_GPM_COMMAND__RESERVED1__SHIFT 0x1d
24735#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
24736#define RLC_SRM_GPM_COMMAND__OP_MASK 0x00000001L
24737#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x00000002L
24738#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
24739#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x0001FFE0L
24740#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x1FFE0000L
24741#define RLC_SRM_GPM_COMMAND__RESERVED1_MASK 0x60000000L
24742#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000L
24743//RLC_SRM_GPM_COMMAND_STATUS
24744#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
24745#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
24746#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
24747#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
24748#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
24749#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
24750//RLC_SRM_RLCV_COMMAND
24751#define RLC_SRM_RLCV_COMMAND__OP__SHIFT 0x0
24752#define RLC_SRM_RLCV_COMMAND__RESERVED__SHIFT 0x1
24753#define RLC_SRM_RLCV_COMMAND__SIZE__SHIFT 0x4
24754#define RLC_SRM_RLCV_COMMAND__START_OFFSET__SHIFT 0x10
24755#define RLC_SRM_RLCV_COMMAND__RESERVED1__SHIFT 0x1c
24756#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY__SHIFT 0x1f
24757#define RLC_SRM_RLCV_COMMAND__OP_MASK 0x00000001L
24758#define RLC_SRM_RLCV_COMMAND__RESERVED_MASK 0x0000000EL
24759#define RLC_SRM_RLCV_COMMAND__SIZE_MASK 0x0000FFF0L
24760#define RLC_SRM_RLCV_COMMAND__START_OFFSET_MASK 0x0FFF0000L
24761#define RLC_SRM_RLCV_COMMAND__RESERVED1_MASK 0x70000000L
24762#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY_MASK 0x80000000L
24763//RLC_SRM_RLCV_COMMAND_STATUS
24764#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
24765#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
24766#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED__SHIFT 0x2
24767#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
24768#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
24769#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
24770//RLC_SRM_INDEX_CNTL_ADDR_0
24771#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
24772#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED__SHIFT 0x10
24773#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0x0000FFFFL
24774#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED_MASK 0xFFFF0000L
24775//RLC_SRM_INDEX_CNTL_ADDR_1
24776#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
24777#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED__SHIFT 0x10
24778#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0x0000FFFFL
24779#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED_MASK 0xFFFF0000L
24780//RLC_SRM_INDEX_CNTL_ADDR_2
24781#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
24782#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED__SHIFT 0x10
24783#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0x0000FFFFL
24784#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED_MASK 0xFFFF0000L
24785//RLC_SRM_INDEX_CNTL_ADDR_3
24786#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
24787#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED__SHIFT 0x10
24788#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0x0000FFFFL
24789#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED_MASK 0xFFFF0000L
24790//RLC_SRM_INDEX_CNTL_ADDR_4
24791#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
24792#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED__SHIFT 0x10
24793#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0x0000FFFFL
24794#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED_MASK 0xFFFF0000L
24795//RLC_SRM_INDEX_CNTL_ADDR_5
24796#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
24797#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED__SHIFT 0x10
24798#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0x0000FFFFL
24799#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED_MASK 0xFFFF0000L
24800//RLC_SRM_INDEX_CNTL_ADDR_6
24801#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
24802#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED__SHIFT 0x10
24803#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0x0000FFFFL
24804#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED_MASK 0xFFFF0000L
24805//RLC_SRM_INDEX_CNTL_ADDR_7
24806#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
24807#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED__SHIFT 0x10
24808#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0x0000FFFFL
24809#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED_MASK 0xFFFF0000L
24810//RLC_SRM_INDEX_CNTL_DATA_0
24811#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
24812#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xFFFFFFFFL
24813//RLC_SRM_INDEX_CNTL_DATA_1
24814#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
24815#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xFFFFFFFFL
24816//RLC_SRM_INDEX_CNTL_DATA_2
24817#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
24818#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xFFFFFFFFL
24819//RLC_SRM_INDEX_CNTL_DATA_3
24820#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
24821#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xFFFFFFFFL
24822//RLC_SRM_INDEX_CNTL_DATA_4
24823#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
24824#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xFFFFFFFFL
24825//RLC_SRM_INDEX_CNTL_DATA_5
24826#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
24827#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xFFFFFFFFL
24828//RLC_SRM_INDEX_CNTL_DATA_6
24829#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
24830#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xFFFFFFFFL
24831//RLC_SRM_INDEX_CNTL_DATA_7
24832#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
24833#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xFFFFFFFFL
24834//RLC_SRM_STAT
24835#define RLC_SRM_STAT__SRM_BUSY__SHIFT 0x0
24836#define RLC_SRM_STAT__SRM_BUSY_DELAY__SHIFT 0x1
24837#define RLC_SRM_STAT__RESERVED__SHIFT 0x2
24838#define RLC_SRM_STAT__SRM_BUSY_MASK 0x00000001L
24839#define RLC_SRM_STAT__SRM_BUSY_DELAY_MASK 0x00000002L
24840#define RLC_SRM_STAT__RESERVED_MASK 0xFFFFFFFCL
24841//RLC_SRM_GPM_ABORT
24842#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
24843#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
24844#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x00000001L
24845#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xFFFFFFFEL
24846//RLC_CSIB_ADDR_LO
24847#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
24848#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xFFFFFFFFL
24849//RLC_CSIB_ADDR_HI
24850#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
24851#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0x0000FFFFL
24852//RLC_CSIB_LENGTH
24853#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
24854#define RLC_CSIB_LENGTH__LENGTH_MASK 0xFFFFFFFFL
24855//RLC_SMU_COMMAND
24856#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
24857#define RLC_SMU_COMMAND__CMD_MASK 0xFFFFFFFFL
24858//RLC_CP_SCHEDULERS
24859#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
24860#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
24861#define RLC_CP_SCHEDULERS__scheduler2__SHIFT 0x10
24862#define RLC_CP_SCHEDULERS__scheduler3__SHIFT 0x18
24863#define RLC_CP_SCHEDULERS__scheduler0_MASK 0x000000FFL
24864#define RLC_CP_SCHEDULERS__scheduler1_MASK 0x0000FF00L
24865#define RLC_CP_SCHEDULERS__scheduler2_MASK 0x00FF0000L
24866#define RLC_CP_SCHEDULERS__scheduler3_MASK 0xFF000000L
24867//RLC_SMU_ARGUMENT_1
24868#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
24869#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xFFFFFFFFL
24870//RLC_SMU_ARGUMENT_2
24871#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
24872#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xFFFFFFFFL
24873//RLC_GPM_GENERAL_8
24874#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
24875#define RLC_GPM_GENERAL_8__DATA_MASK 0xFFFFFFFFL
24876//RLC_GPM_GENERAL_9
24877#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
24878#define RLC_GPM_GENERAL_9__DATA_MASK 0xFFFFFFFFL
24879//RLC_GPM_GENERAL_10
24880#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
24881#define RLC_GPM_GENERAL_10__DATA_MASK 0xFFFFFFFFL
24882//RLC_GPM_GENERAL_11
24883#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
24884#define RLC_GPM_GENERAL_11__DATA_MASK 0xFFFFFFFFL
24885//RLC_GPM_GENERAL_12
24886#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
24887#define RLC_GPM_GENERAL_12__DATA_MASK 0xFFFFFFFFL
24888//RLC_GPM_UTCL1_CNTL_0
24889#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT__SHIFT 0x0
24890#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE__SHIFT 0x18
24891#define RLC_GPM_UTCL1_CNTL_0__BYPASS__SHIFT 0x19
24892#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE__SHIFT 0x1a
24893#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE__SHIFT 0x1b
24894#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP__SHIFT 0x1c
24895#define RLC_GPM_UTCL1_CNTL_0__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
24896#define RLC_GPM_UTCL1_CNTL_0__RESERVED__SHIFT 0x1e
24897#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
24898#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE_MASK 0x01000000L
24899#define RLC_GPM_UTCL1_CNTL_0__BYPASS_MASK 0x02000000L
24900#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE_MASK 0x04000000L
24901#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE_MASK 0x08000000L
24902#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP_MASK 0x10000000L
24903#define RLC_GPM_UTCL1_CNTL_0__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
24904#define RLC_GPM_UTCL1_CNTL_0__RESERVED_MASK 0xC0000000L
24905//RLC_GPM_UTCL1_CNTL_1
24906#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT__SHIFT 0x0
24907#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE__SHIFT 0x18
24908#define RLC_GPM_UTCL1_CNTL_1__BYPASS__SHIFT 0x19
24909#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE__SHIFT 0x1a
24910#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE__SHIFT 0x1b
24911#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP__SHIFT 0x1c
24912#define RLC_GPM_UTCL1_CNTL_1__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
24913#define RLC_GPM_UTCL1_CNTL_1__RESERVED__SHIFT 0x1e
24914#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
24915#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE_MASK 0x01000000L
24916#define RLC_GPM_UTCL1_CNTL_1__BYPASS_MASK 0x02000000L
24917#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE_MASK 0x04000000L
24918#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE_MASK 0x08000000L
24919#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP_MASK 0x10000000L
24920#define RLC_GPM_UTCL1_CNTL_1__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
24921#define RLC_GPM_UTCL1_CNTL_1__RESERVED_MASK 0xC0000000L
24922//RLC_GPM_UTCL1_CNTL_2
24923#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT__SHIFT 0x0
24924#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE__SHIFT 0x18
24925#define RLC_GPM_UTCL1_CNTL_2__BYPASS__SHIFT 0x19
24926#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE__SHIFT 0x1a
24927#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE__SHIFT 0x1b
24928#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP__SHIFT 0x1c
24929#define RLC_GPM_UTCL1_CNTL_2__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
24930#define RLC_GPM_UTCL1_CNTL_2__RESERVED__SHIFT 0x1e
24931#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
24932#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE_MASK 0x01000000L
24933#define RLC_GPM_UTCL1_CNTL_2__BYPASS_MASK 0x02000000L
24934#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE_MASK 0x04000000L
24935#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE_MASK 0x08000000L
24936#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP_MASK 0x10000000L
24937#define RLC_GPM_UTCL1_CNTL_2__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
24938#define RLC_GPM_UTCL1_CNTL_2__RESERVED_MASK 0xC0000000L
24939//RLC_SPM_UTCL1_CNTL
24940#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
24941#define RLC_SPM_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
24942#define RLC_SPM_UTCL1_CNTL__BYPASS__SHIFT 0x19
24943#define RLC_SPM_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
24944#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
24945#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
24946#define RLC_SPM_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
24947#define RLC_SPM_UTCL1_CNTL__RESERVED__SHIFT 0x1e
24948#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
24949#define RLC_SPM_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
24950#define RLC_SPM_UTCL1_CNTL__BYPASS_MASK 0x02000000L
24951#define RLC_SPM_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
24952#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
24953#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
24954#define RLC_SPM_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
24955#define RLC_SPM_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
24956//RLC_UTCL1_STATUS_2
24957#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY__SHIFT 0x0
24958#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY__SHIFT 0x1
24959#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY__SHIFT 0x2
24960#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY__SHIFT 0x3
24961#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY__SHIFT 0x4
24962#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans__SHIFT 0x5
24963#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans__SHIFT 0x6
24964#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans__SHIFT 0x7
24965#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans__SHIFT 0x8
24966#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans__SHIFT 0x9
24967#define RLC_UTCL1_STATUS_2__RESERVED__SHIFT 0xa
24968#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY_MASK 0x00000001L
24969#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY_MASK 0x00000002L
24970#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY_MASK 0x00000004L
24971#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY_MASK 0x00000008L
24972#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY_MASK 0x00000010L
24973#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans_MASK 0x00000020L
24974#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans_MASK 0x00000040L
24975#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans_MASK 0x00000080L
24976#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans_MASK 0x00000100L
24977#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans_MASK 0x00000200L
24978#define RLC_UTCL1_STATUS_2__RESERVED_MASK 0xFFFFFC00L
24979//RLC_LB_THR_CONFIG_2
24980#define RLC_LB_THR_CONFIG_2__DATA__SHIFT 0x0
24981#define RLC_LB_THR_CONFIG_2__DATA_MASK 0xFFFFFFFFL
24982//RLC_LB_THR_CONFIG_3
24983#define RLC_LB_THR_CONFIG_3__DATA__SHIFT 0x0
24984#define RLC_LB_THR_CONFIG_3__DATA_MASK 0xFFFFFFFFL
24985//RLC_LB_THR_CONFIG_4
24986#define RLC_LB_THR_CONFIG_4__DATA__SHIFT 0x0
24987#define RLC_LB_THR_CONFIG_4__DATA_MASK 0xFFFFFFFFL
24988//RLC_SPM_UTCL1_ERROR_1
24989#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError__SHIFT 0x0
24990#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
24991#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
24992#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError_MASK 0x00000003L
24993#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
24994#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
24995//RLC_SPM_UTCL1_ERROR_2
24996#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
24997#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
24998//RLC_GPM_UTCL1_TH0_ERROR_1
24999#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError__SHIFT 0x0
25000#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
25001#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
25002#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError_MASK 0x00000003L
25003#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
25004#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
25005//RLC_LB_THR_CONFIG_1
25006#define RLC_LB_THR_CONFIG_1__DATA__SHIFT 0x0
25007#define RLC_LB_THR_CONFIG_1__DATA_MASK 0xFFFFFFFFL
25008//RLC_GPM_UTCL1_TH0_ERROR_2
25009#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
25010#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
25011//RLC_GPM_UTCL1_TH1_ERROR_1
25012#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError__SHIFT 0x0
25013#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
25014#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
25015#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError_MASK 0x00000003L
25016#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
25017#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
25018//RLC_GPM_UTCL1_TH1_ERROR_2
25019#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
25020#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
25021//RLC_GPM_UTCL1_TH2_ERROR_1
25022#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError__SHIFT 0x0
25023#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
25024#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
25025#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError_MASK 0x00000003L
25026#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
25027#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
25028//RLC_GPM_UTCL1_TH2_ERROR_2
25029#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
25030#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
25031//RLC_CGCG_CGLS_CTRL_3D
25032#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN__SHIFT 0x0
25033#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN__SHIFT 0x1
25034#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
25035#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
25036#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER__SHIFT 0x1b
25037#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL__SHIFT 0x1c
25038#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE__SHIFT 0x1d
25039#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN__SHIFT 0x1f
25040#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK 0x00000001L
25041#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK 0x00000002L
25042#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
25043#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
25044#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER_MASK 0x08000000L
25045#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL_MASK 0x10000000L
25046#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE_MASK 0x60000000L
25047#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN_MASK 0x80000000L
25048//RLC_CGCG_RAMP_CTRL_3D
25049#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT__SHIFT 0x0
25050#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT__SHIFT 0x4
25051#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT__SHIFT 0x8
25052#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT__SHIFT 0xc
25053#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT__SHIFT 0x10
25054#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT__SHIFT 0x1c
25055#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT_MASK 0x0000000FL
25056#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
25057#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT_MASK 0x00000F00L
25058#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT_MASK 0x0000F000L
25059#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT_MASK 0x0FFF0000L
25060#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT_MASK 0xF0000000L
25061//RLC_SEMAPHORE_0
25062#define RLC_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
25063#define RLC_SEMAPHORE_0__RESERVED__SHIFT 0x5
25064#define RLC_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
25065#define RLC_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
25066//RLC_SEMAPHORE_1
25067#define RLC_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
25068#define RLC_SEMAPHORE_1__RESERVED__SHIFT 0x5
25069#define RLC_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
25070#define RLC_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
25071//RLC_CP_EOF_INT
25072#define RLC_CP_EOF_INT__INTERRUPT__SHIFT 0x0
25073#define RLC_CP_EOF_INT__RESERVED__SHIFT 0x1
25074#define RLC_CP_EOF_INT__INTERRUPT_MASK 0x00000001L
25075#define RLC_CP_EOF_INT__RESERVED_MASK 0xFFFFFFFEL
25076//RLC_CP_EOF_INT_CNT
25077#define RLC_CP_EOF_INT_CNT__CNT__SHIFT 0x0
25078#define RLC_CP_EOF_INT_CNT__CNT_MASK 0xFFFFFFFFL
25079//RLC_SPARE_INT
25080#define RLC_SPARE_INT__INTERRUPT__SHIFT 0x0
25081#define RLC_SPARE_INT__RESERVED__SHIFT 0x1
25082#define RLC_SPARE_INT__INTERRUPT_MASK 0x00000001L
25083#define RLC_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
25084//RLC_PREWALKER_UTCL1_CNTL
25085#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
25086#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
25087#define RLC_PREWALKER_UTCL1_CNTL__BYPASS__SHIFT 0x19
25088#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
25089#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
25090#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
25091#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
25092#define RLC_PREWALKER_UTCL1_CNTL__RESERVED__SHIFT 0x1e
25093#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
25094#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
25095#define RLC_PREWALKER_UTCL1_CNTL__BYPASS_MASK 0x02000000L
25096#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
25097#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
25098#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
25099#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
25100#define RLC_PREWALKER_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
25101//RLC_PREWALKER_UTCL1_TRIG
25102#define RLC_PREWALKER_UTCL1_TRIG__VALID__SHIFT 0x0
25103#define RLC_PREWALKER_UTCL1_TRIG__VMID__SHIFT 0x1
25104#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE__SHIFT 0x5
25105#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM__SHIFT 0x6
25106#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM__SHIFT 0x7
25107#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM__SHIFT 0x8
25108#define RLC_PREWALKER_UTCL1_TRIG__RESERVED__SHIFT 0x9
25109#define RLC_PREWALKER_UTCL1_TRIG__READY__SHIFT 0x1f
25110#define RLC_PREWALKER_UTCL1_TRIG__VALID_MASK 0x00000001L
25111#define RLC_PREWALKER_UTCL1_TRIG__VMID_MASK 0x0000001EL
25112#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE_MASK 0x00000020L
25113#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM_MASK 0x00000040L
25114#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM_MASK 0x00000080L
25115#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM_MASK 0x00000100L
25116#define RLC_PREWALKER_UTCL1_TRIG__RESERVED_MASK 0x7FFFFE00L
25117#define RLC_PREWALKER_UTCL1_TRIG__READY_MASK 0x80000000L
25118//RLC_PREWALKER_UTCL1_ADDR_LSB
25119#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB__SHIFT 0x0
25120#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB_MASK 0xFFFFFFFFL
25121//RLC_PREWALKER_UTCL1_ADDR_MSB
25122#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB__SHIFT 0x0
25123#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB_MASK 0x0000FFFFL
25124//RLC_PREWALKER_UTCL1_SIZE_LSB
25125#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB__SHIFT 0x0
25126#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB_MASK 0xFFFFFFFFL
25127//RLC_PREWALKER_UTCL1_SIZE_MSB
25128#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB__SHIFT 0x0
25129#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB_MASK 0x00000003L
25130//RLC_DSM_TRIG
25131//RLC_UTCL1_STATUS
25132#define RLC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
25133#define RLC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
25134#define RLC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
25135#define RLC_UTCL1_STATUS__RESERVED__SHIFT 0x3
25136#define RLC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
25137#define RLC_UTCL1_STATUS__RESERVED_1__SHIFT 0xe
25138#define RLC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
25139#define RLC_UTCL1_STATUS__RESERVED_2__SHIFT 0x16
25140#define RLC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
25141#define RLC_UTCL1_STATUS__RESERVED_3__SHIFT 0x1e
25142#define RLC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
25143#define RLC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
25144#define RLC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
25145#define RLC_UTCL1_STATUS__RESERVED_MASK 0x000000F8L
25146#define RLC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
25147#define RLC_UTCL1_STATUS__RESERVED_1_MASK 0x0000C000L
25148#define RLC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
25149#define RLC_UTCL1_STATUS__RESERVED_2_MASK 0x00C00000L
25150#define RLC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
25151#define RLC_UTCL1_STATUS__RESERVED_3_MASK 0xC0000000L
25152//RLC_R2I_CNTL_0
25153#define RLC_R2I_CNTL_0__Data__SHIFT 0x0
25154#define RLC_R2I_CNTL_0__Data_MASK 0xFFFFFFFFL
25155//RLC_R2I_CNTL_1
25156#define RLC_R2I_CNTL_1__Data__SHIFT 0x0
25157#define RLC_R2I_CNTL_1__Data_MASK 0xFFFFFFFFL
25158//RLC_R2I_CNTL_2
25159#define RLC_R2I_CNTL_2__Data__SHIFT 0x0
25160#define RLC_R2I_CNTL_2__Data_MASK 0xFFFFFFFFL
25161//RLC_R2I_CNTL_3
25162#define RLC_R2I_CNTL_3__Data__SHIFT 0x0
25163#define RLC_R2I_CNTL_3__Data_MASK 0xFFFFFFFFL
25164//RLC_UTCL2_CNTL
25165#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x0
25166#define RLC_UTCL2_CNTL__RESERVED__SHIFT 0x1
25167#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE_MASK 0x00000001L
25168#define RLC_UTCL2_CNTL__RESERVED_MASK 0xFFFFFFFEL
25169//RLC_LBPW_CU_STAT
25170#define RLC_LBPW_CU_STAT__MAX_CU__SHIFT 0x0
25171#define RLC_LBPW_CU_STAT__ON_CU__SHIFT 0x10
25172#define RLC_LBPW_CU_STAT__MAX_CU_MASK 0x0000FFFFL
25173#define RLC_LBPW_CU_STAT__ON_CU_MASK 0xFFFF0000L
25174//RLC_DS_CNTL
25175#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK__SHIFT 0x0
25176#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK__SHIFT 0x1
25177#define RLC_DS_CNTL__RESRVED__SHIFT 0x2
25178#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK__SHIFT 0x10
25179#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK__SHIFT 0x11
25180#define RLC_DS_CNTL__RESRVED_1__SHIFT 0x12
25181#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK_MASK 0x00000001L
25182#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK_MASK 0x00000002L
25183#define RLC_DS_CNTL__RESRVED_MASK 0x0000FFFCL
25184#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK_MASK 0x00010000L
25185#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK_MASK 0x00020000L
25186#define RLC_DS_CNTL__RESRVED_1_MASK 0xFFFC0000L
25187//RLC_RLCV_SPARE_INT
25188#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
25189#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
25190#define RLC_RLCV_SPARE_INT__INTERRUPT_MASK 0x00000001L
25191#define RLC_RLCV_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
25192
25193
25194// addressBlock: gc_pwrdec
25195//CGTS_SM_CTRL_REG
25196#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY__SHIFT 0x0
25197#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY__SHIFT 0x4
25198#define CGTS_SM_CTRL_REG__MGCG_ENABLED__SHIFT 0xc
25199#define CGTS_SM_CTRL_REG__BASE_MODE__SHIFT 0x10
25200#define CGTS_SM_CTRL_REG__SM_MODE__SHIFT 0x11
25201#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE__SHIFT 0x14
25202#define CGTS_SM_CTRL_REG__OVERRIDE__SHIFT 0x15
25203#define CGTS_SM_CTRL_REG__LS_OVERRIDE__SHIFT 0x16
25204#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN__SHIFT 0x17
25205#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT 0x18
25206#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY_MASK 0x0000000FL
25207#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY_MASK 0x00000FF0L
25208#define CGTS_SM_CTRL_REG__MGCG_ENABLED_MASK 0x00001000L
25209#define CGTS_SM_CTRL_REG__BASE_MODE_MASK 0x00010000L
25210#define CGTS_SM_CTRL_REG__SM_MODE_MASK 0x000E0000L
25211#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK 0x00100000L
25212#define CGTS_SM_CTRL_REG__OVERRIDE_MASK 0x00200000L
25213#define CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK 0x00400000L
25214#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK 0x00800000L
25215#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK 0xFF000000L
25216//CGTS_RD_CTRL_REG
25217#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
25218#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x8
25219#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000001FL
25220#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x00001F00L
25221//CGTS_RD_REG
25222#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
25223#define CGTS_RD_REG__READ_DATA_MASK 0x00003FFFL
25224//CGTS_TCC_DISABLE
25225#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
25226#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
25227//CGTS_USER_TCC_DISABLE
25228#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
25229#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
25230//CGTS_CU0_SP0_CTRL_REG
25231#define CGTS_CU0_SP0_CTRL_REG__SP00__SHIFT 0x0
25232#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25233#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25234#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25235#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25236#define CGTS_CU0_SP0_CTRL_REG__SP01__SHIFT 0x10
25237#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25238#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25239#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25240#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25241#define CGTS_CU0_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25242#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25243#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25244#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25245#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25246#define CGTS_CU0_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25247#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25248#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25249#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25250#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25251//CGTS_CU0_LDS_SQ_CTRL_REG
25252#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25253#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25254#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25255#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25256#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25257#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25258#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25259#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25260#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25261#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25262#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25263#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25264#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25265#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25266#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25267#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25268#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25269#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25270#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25271#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25272//CGTS_CU0_TA_SQC_CTRL_REG
25273#define CGTS_CU0_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25274#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25275#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25276#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25277#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25278#define CGTS_CU0_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
25279#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
25280#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
25281#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
25282#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25283#define CGTS_CU0_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25284#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25285#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25286#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25287#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25288#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
25289#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
25290#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
25291#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
25292#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25293//CGTS_CU0_SP1_CTRL_REG
25294#define CGTS_CU0_SP1_CTRL_REG__SP10__SHIFT 0x0
25295#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25296#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25297#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25298#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25299#define CGTS_CU0_SP1_CTRL_REG__SP11__SHIFT 0x10
25300#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25301#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25302#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25303#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25304#define CGTS_CU0_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25305#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25306#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25307#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25308#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25309#define CGTS_CU0_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25310#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25311#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25312#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25313#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25314//CGTS_CU0_TD_TCP_CTRL_REG
25315#define CGTS_CU0_TD_TCP_CTRL_REG__TD__SHIFT 0x0
25316#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
25317#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
25318#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
25319#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
25320#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
25321#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
25322#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
25323#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
25324#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25325#define CGTS_CU0_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
25326#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
25327#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
25328#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
25329#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25330#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
25331#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
25332#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
25333#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
25334#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25335//CGTS_CU1_SP0_CTRL_REG
25336#define CGTS_CU1_SP0_CTRL_REG__SP00__SHIFT 0x0
25337#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25338#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25339#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25340#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25341#define CGTS_CU1_SP0_CTRL_REG__SP01__SHIFT 0x10
25342#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25343#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25344#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25345#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25346#define CGTS_CU1_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25347#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25348#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25349#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25350#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25351#define CGTS_CU1_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25352#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25353#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25354#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25355#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25356//CGTS_CU1_LDS_SQ_CTRL_REG
25357#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25358#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25359#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25360#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25361#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25362#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25363#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25364#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25365#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25366#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25367#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25368#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25369#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25370#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25371#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25372#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25373#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25374#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25375#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25376#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25377//CGTS_CU1_TA_SQC_CTRL_REG
25378#define CGTS_CU1_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25379#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25380#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25381#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25382#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25383#define CGTS_CU1_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25384#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25385#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25386#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25387#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25388//CGTS_CU1_SP1_CTRL_REG
25389#define CGTS_CU1_SP1_CTRL_REG__SP10__SHIFT 0x0
25390#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25391#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25392#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25393#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25394#define CGTS_CU1_SP1_CTRL_REG__SP11__SHIFT 0x10
25395#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25396#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25397#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25398#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25399#define CGTS_CU1_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25400#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25401#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25402#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25403#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25404#define CGTS_CU1_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25405#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25406#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25407#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25408#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25409//CGTS_CU1_TD_TCP_CTRL_REG
25410#define CGTS_CU1_TD_TCP_CTRL_REG__TD__SHIFT 0x0
25411#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
25412#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
25413#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
25414#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
25415#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
25416#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
25417#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
25418#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
25419#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25420#define CGTS_CU1_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
25421#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
25422#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
25423#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
25424#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25425#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
25426#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
25427#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
25428#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
25429#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25430//CGTS_CU2_SP0_CTRL_REG
25431#define CGTS_CU2_SP0_CTRL_REG__SP00__SHIFT 0x0
25432#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25433#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25434#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25435#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25436#define CGTS_CU2_SP0_CTRL_REG__SP01__SHIFT 0x10
25437#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25438#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25439#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25440#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25441#define CGTS_CU2_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25442#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25443#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25444#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25445#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25446#define CGTS_CU2_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25447#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25448#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25449#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25450#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25451//CGTS_CU2_LDS_SQ_CTRL_REG
25452#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25453#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25454#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25455#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25456#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25457#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25458#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25459#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25460#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25461#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25462#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25463#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25464#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25465#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25466#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25467#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25468#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25469#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25470#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25471#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25472//CGTS_CU2_TA_SQC_CTRL_REG
25473#define CGTS_CU2_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25474#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25475#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25476#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25477#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25478#define CGTS_CU2_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25479#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25480#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25481#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25482#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25483//CGTS_CU2_SP1_CTRL_REG
25484#define CGTS_CU2_SP1_CTRL_REG__SP10__SHIFT 0x0
25485#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25486#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25487#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25488#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25489#define CGTS_CU2_SP1_CTRL_REG__SP11__SHIFT 0x10
25490#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25491#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25492#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25493#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25494#define CGTS_CU2_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25495#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25496#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25497#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25498#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25499#define CGTS_CU2_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25500#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25501#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25502#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25503#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25504//CGTS_CU2_TD_TCP_CTRL_REG
25505#define CGTS_CU2_TD_TCP_CTRL_REG__TD__SHIFT 0x0
25506#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
25507#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
25508#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
25509#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
25510#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
25511#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
25512#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
25513#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
25514#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25515#define CGTS_CU2_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
25516#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
25517#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
25518#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
25519#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25520#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
25521#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
25522#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
25523#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
25524#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25525//CGTS_CU3_SP0_CTRL_REG
25526#define CGTS_CU3_SP0_CTRL_REG__SP00__SHIFT 0x0
25527#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25528#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25529#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25530#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25531#define CGTS_CU3_SP0_CTRL_REG__SP01__SHIFT 0x10
25532#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25533#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25534#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25535#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25536#define CGTS_CU3_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25537#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25538#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25539#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25540#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25541#define CGTS_CU3_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25542#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25543#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25544#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25545#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25546//CGTS_CU3_LDS_SQ_CTRL_REG
25547#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25548#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25549#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25550#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25551#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25552#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25553#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25554#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25555#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25556#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25557#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25558#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25559#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25560#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25561#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25562#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25563#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25564#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25565#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25566#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25567//CGTS_CU3_TA_SQC_CTRL_REG
25568#define CGTS_CU3_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25569#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25570#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25571#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25572#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25573#define CGTS_CU3_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
25574#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
25575#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
25576#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
25577#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25578#define CGTS_CU3_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25579#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25580#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25581#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25582#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25583#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
25584#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
25585#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
25586#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
25587#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25588//CGTS_CU3_SP1_CTRL_REG
25589#define CGTS_CU3_SP1_CTRL_REG__SP10__SHIFT 0x0
25590#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25591#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25592#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25593#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25594#define CGTS_CU3_SP1_CTRL_REG__SP11__SHIFT 0x10
25595#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25596#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25597#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25598#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25599#define CGTS_CU3_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25600#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25601#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25602#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25603#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25604#define CGTS_CU3_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25605#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25606#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25607#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25608#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25609//CGTS_CU3_TD_TCP_CTRL_REG
25610#define CGTS_CU3_TD_TCP_CTRL_REG__TD__SHIFT 0x0
25611#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
25612#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
25613#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
25614#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
25615#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
25616#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
25617#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
25618#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
25619#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25620#define CGTS_CU3_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
25621#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
25622#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
25623#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
25624#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25625#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
25626#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
25627#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
25628#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
25629#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25630//CGTS_CU4_SP0_CTRL_REG
25631#define CGTS_CU4_SP0_CTRL_REG__SP00__SHIFT 0x0
25632#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25633#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25634#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25635#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25636#define CGTS_CU4_SP0_CTRL_REG__SP01__SHIFT 0x10
25637#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25638#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25639#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25640#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25641#define CGTS_CU4_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25642#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25643#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25644#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25645#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25646#define CGTS_CU4_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25647#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25648#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25649#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25650#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25651//CGTS_CU4_LDS_SQ_CTRL_REG
25652#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25653#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25654#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25655#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25656#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25657#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25658#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25659#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25660#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25661#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25662#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25663#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25664#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25665#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25666#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25667#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25668#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25669#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25670#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25671#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25672//CGTS_CU4_TA_SQC_CTRL_REG
25673#define CGTS_CU4_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25674#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25675#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25676#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25677#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25678#define CGTS_CU4_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25679#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25680#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25681#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25682#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25683//CGTS_CU4_SP1_CTRL_REG
25684#define CGTS_CU4_SP1_CTRL_REG__SP10__SHIFT 0x0
25685#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25686#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25687#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25688#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25689#define CGTS_CU4_SP1_CTRL_REG__SP11__SHIFT 0x10
25690#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25691#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25692#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25693#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25694#define CGTS_CU4_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25695#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25696#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25697#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25698#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25699#define CGTS_CU4_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25700#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25701#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25702#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25703#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25704//CGTS_CU4_TD_TCP_CTRL_REG
25705#define CGTS_CU4_TD_TCP_CTRL_REG__TD__SHIFT 0x0
25706#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
25707#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
25708#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
25709#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
25710#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
25711#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
25712#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
25713#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
25714#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25715#define CGTS_CU4_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
25716#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
25717#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
25718#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
25719#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25720#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
25721#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
25722#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
25723#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
25724#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25725//CGTS_CU5_SP0_CTRL_REG
25726#define CGTS_CU5_SP0_CTRL_REG__SP00__SHIFT 0x0
25727#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25728#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25729#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25730#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25731#define CGTS_CU5_SP0_CTRL_REG__SP01__SHIFT 0x10
25732#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25733#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25734#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25735#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25736#define CGTS_CU5_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25737#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25738#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25739#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25740#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25741#define CGTS_CU5_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25742#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25743#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25744#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25745#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25746//CGTS_CU5_LDS_SQ_CTRL_REG
25747#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25748#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25749#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25750#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25751#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25752#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25753#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25754#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25755#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25756#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25757#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25758#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25759#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25760#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25761#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25762#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25763#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25764#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25765#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25766#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25767//CGTS_CU5_TA_SQC_CTRL_REG
25768#define CGTS_CU5_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25769#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25770#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25771#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25772#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25773#define CGTS_CU5_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25774#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25775#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25776#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25777#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25778//CGTS_CU5_SP1_CTRL_REG
25779#define CGTS_CU5_SP1_CTRL_REG__SP10__SHIFT 0x0
25780#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25781#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25782#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25783#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25784#define CGTS_CU5_SP1_CTRL_REG__SP11__SHIFT 0x10
25785#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25786#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25787#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25788#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25789#define CGTS_CU5_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25790#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25791#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25792#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25793#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25794#define CGTS_CU5_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25795#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25796#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25797#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25798#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25799//CGTS_CU5_TD_TCP_CTRL_REG
25800#define CGTS_CU5_TD_TCP_CTRL_REG__TD__SHIFT 0x0
25801#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
25802#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
25803#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
25804#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
25805#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
25806#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
25807#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
25808#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
25809#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25810#define CGTS_CU5_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
25811#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
25812#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
25813#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
25814#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25815#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
25816#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
25817#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
25818#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
25819#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25820//CGTS_CU6_SP0_CTRL_REG
25821#define CGTS_CU6_SP0_CTRL_REG__SP00__SHIFT 0x0
25822#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25823#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25824#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25825#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25826#define CGTS_CU6_SP0_CTRL_REG__SP01__SHIFT 0x10
25827#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25828#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25829#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25830#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25831#define CGTS_CU6_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25832#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25833#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25834#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25835#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25836#define CGTS_CU6_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25837#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25838#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25839#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25840#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25841//CGTS_CU6_LDS_SQ_CTRL_REG
25842#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25843#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25844#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25845#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25846#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25847#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25848#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25849#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25850#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25851#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25852#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25853#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25854#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25855#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25856#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25857#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25858#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25859#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25860#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25861#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25862//CGTS_CU6_TA_SQC_CTRL_REG
25863#define CGTS_CU6_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25864#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25865#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25866#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25867#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25868#define CGTS_CU6_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
25869#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
25870#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
25871#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
25872#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25873#define CGTS_CU6_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25874#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25875#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25876#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25877#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25878#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
25879#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
25880#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
25881#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
25882#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25883//CGTS_CU6_SP1_CTRL_REG
25884#define CGTS_CU6_SP1_CTRL_REG__SP10__SHIFT 0x0
25885#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25886#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25887#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25888#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25889#define CGTS_CU6_SP1_CTRL_REG__SP11__SHIFT 0x10
25890#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25891#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25892#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25893#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25894#define CGTS_CU6_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25895#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25896#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25897#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25898#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25899#define CGTS_CU6_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25900#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25901#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25902#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25903#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25904//CGTS_CU6_TD_TCP_CTRL_REG
25905#define CGTS_CU6_TD_TCP_CTRL_REG__TD__SHIFT 0x0
25906#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
25907#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
25908#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
25909#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
25910#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
25911#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
25912#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
25913#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
25914#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25915#define CGTS_CU6_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
25916#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
25917#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
25918#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
25919#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25920#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
25921#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
25922#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
25923#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
25924#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25925//CGTS_CU7_SP0_CTRL_REG
25926#define CGTS_CU7_SP0_CTRL_REG__SP00__SHIFT 0x0
25927#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
25928#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
25929#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
25930#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
25931#define CGTS_CU7_SP0_CTRL_REG__SP01__SHIFT 0x10
25932#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
25933#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
25934#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
25935#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25936#define CGTS_CU7_SP0_CTRL_REG__SP00_MASK 0x0000007FL
25937#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
25938#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
25939#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
25940#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25941#define CGTS_CU7_SP0_CTRL_REG__SP01_MASK 0x007F0000L
25942#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
25943#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
25944#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
25945#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25946//CGTS_CU7_LDS_SQ_CTRL_REG
25947#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
25948#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
25949#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
25950#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
25951#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
25952#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
25953#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
25954#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
25955#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
25956#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25957#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
25958#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
25959#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
25960#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
25961#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25962#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
25963#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
25964#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
25965#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
25966#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25967//CGTS_CU7_TA_SQC_CTRL_REG
25968#define CGTS_CU7_TA_SQC_CTRL_REG__TA__SHIFT 0x0
25969#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
25970#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
25971#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
25972#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
25973#define CGTS_CU7_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
25974#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
25975#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
25976#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
25977#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25978//CGTS_CU7_SP1_CTRL_REG
25979#define CGTS_CU7_SP1_CTRL_REG__SP10__SHIFT 0x0
25980#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
25981#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
25982#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
25983#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
25984#define CGTS_CU7_SP1_CTRL_REG__SP11__SHIFT 0x10
25985#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
25986#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
25987#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
25988#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
25989#define CGTS_CU7_SP1_CTRL_REG__SP10_MASK 0x0000007FL
25990#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
25991#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
25992#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
25993#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
25994#define CGTS_CU7_SP1_CTRL_REG__SP11_MASK 0x007F0000L
25995#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
25996#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
25997#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
25998#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
25999//CGTS_CU7_TD_TCP_CTRL_REG
26000#define CGTS_CU7_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26001#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26002#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26003#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26004#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26005#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26006#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26007#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26008#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26009#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26010#define CGTS_CU7_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26011#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26012#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26013#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26014#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26015#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26016#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26017#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26018#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26019#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26020//CGTS_CU8_SP0_CTRL_REG
26021#define CGTS_CU8_SP0_CTRL_REG__SP00__SHIFT 0x0
26022#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26023#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26024#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26025#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26026#define CGTS_CU8_SP0_CTRL_REG__SP01__SHIFT 0x10
26027#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26028#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26029#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26030#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26031#define CGTS_CU8_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26032#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26033#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26034#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26035#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26036#define CGTS_CU8_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26037#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26038#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26039#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26040#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26041//CGTS_CU8_LDS_SQ_CTRL_REG
26042#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26043#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26044#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26045#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26046#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26047#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26048#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26049#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26050#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26051#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26052#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26053#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26054#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26055#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26056#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26057#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26058#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26059#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26060#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26061#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26062//CGTS_CU8_TA_SQC_CTRL_REG
26063#define CGTS_CU8_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26064#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26065#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26066#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26067#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26068#define CGTS_CU8_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26069#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26070#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26071#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26072#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26073//CGTS_CU8_SP1_CTRL_REG
26074#define CGTS_CU8_SP1_CTRL_REG__SP10__SHIFT 0x0
26075#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26076#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26077#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26078#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26079#define CGTS_CU8_SP1_CTRL_REG__SP11__SHIFT 0x10
26080#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26081#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26082#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26083#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26084#define CGTS_CU8_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26085#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26086#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26087#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26088#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26089#define CGTS_CU8_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26090#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26091#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26092#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26093#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26094//CGTS_CU8_TD_TCP_CTRL_REG
26095#define CGTS_CU8_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26096#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26097#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26098#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26099#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26100#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26101#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26102#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26103#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26104#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26105#define CGTS_CU8_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26106#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26107#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26108#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26109#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26110#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26111#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26112#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26113#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26114#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26115//CGTS_CU9_SP0_CTRL_REG
26116#define CGTS_CU9_SP0_CTRL_REG__SP00__SHIFT 0x0
26117#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26118#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26119#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26120#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26121#define CGTS_CU9_SP0_CTRL_REG__SP01__SHIFT 0x10
26122#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26123#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26124#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26125#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26126#define CGTS_CU9_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26127#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26128#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26129#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26130#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26131#define CGTS_CU9_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26132#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26133#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26134#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26135#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26136//CGTS_CU9_LDS_SQ_CTRL_REG
26137#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26138#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26139#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26140#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26141#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26142#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26143#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26144#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26145#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26146#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26147#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26148#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26149#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26150#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26151#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26152#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26153#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26154#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26155#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26156#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26157//CGTS_CU9_TA_SQC_CTRL_REG
26158#define CGTS_CU9_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26159#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26160#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26161#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26162#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26163#define CGTS_CU9_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
26164#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
26165#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
26166#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
26167#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26168#define CGTS_CU9_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26169#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26170#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26171#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26172#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26173#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
26174#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
26175#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
26176#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
26177#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26178//CGTS_CU9_SP1_CTRL_REG
26179#define CGTS_CU9_SP1_CTRL_REG__SP10__SHIFT 0x0
26180#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26181#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26182#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26183#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26184#define CGTS_CU9_SP1_CTRL_REG__SP11__SHIFT 0x10
26185#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26186#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26187#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26188#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26189#define CGTS_CU9_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26190#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26191#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26192#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26193#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26194#define CGTS_CU9_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26195#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26196#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26197#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26198#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26199//CGTS_CU9_TD_TCP_CTRL_REG
26200#define CGTS_CU9_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26201#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26202#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26203#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26204#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26205#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26206#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26207#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26208#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26209#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26210#define CGTS_CU9_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26211#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26212#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26213#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26214#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26215#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26216#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26217#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26218#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26219#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26220//CGTS_CU10_SP0_CTRL_REG
26221#define CGTS_CU10_SP0_CTRL_REG__SP00__SHIFT 0x0
26222#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26223#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26224#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26225#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26226#define CGTS_CU10_SP0_CTRL_REG__SP01__SHIFT 0x10
26227#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26228#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26229#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26230#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26231#define CGTS_CU10_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26232#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26233#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26234#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26235#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26236#define CGTS_CU10_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26237#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26238#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26239#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26240#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26241//CGTS_CU10_LDS_SQ_CTRL_REG
26242#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26243#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26244#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26245#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26246#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26247#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26248#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26249#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26250#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26251#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26252#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26253#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26254#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26255#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26256#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26257#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26258#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26259#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26260#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26261#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26262//CGTS_CU10_TA_SQC_CTRL_REG
26263#define CGTS_CU10_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26264#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26265#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26266#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26267#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26268#define CGTS_CU10_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26269#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26270#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26271#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26272#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26273//CGTS_CU10_SP1_CTRL_REG
26274#define CGTS_CU10_SP1_CTRL_REG__SP10__SHIFT 0x0
26275#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26276#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26277#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26278#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26279#define CGTS_CU10_SP1_CTRL_REG__SP11__SHIFT 0x10
26280#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26281#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26282#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26283#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26284#define CGTS_CU10_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26285#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26286#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26287#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26288#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26289#define CGTS_CU10_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26290#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26291#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26292#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26293#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26294//CGTS_CU10_TD_TCP_CTRL_REG
26295#define CGTS_CU10_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26296#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26297#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26298#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26299#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26300#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26301#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26302#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26303#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26304#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26305#define CGTS_CU10_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26306#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26307#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26308#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26309#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26310#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26311#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26312#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26313#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26314#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26315//CGTS_CU11_SP0_CTRL_REG
26316#define CGTS_CU11_SP0_CTRL_REG__SP00__SHIFT 0x0
26317#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26318#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26319#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26320#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26321#define CGTS_CU11_SP0_CTRL_REG__SP01__SHIFT 0x10
26322#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26323#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26324#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26325#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26326#define CGTS_CU11_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26327#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26328#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26329#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26330#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26331#define CGTS_CU11_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26332#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26333#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26334#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26335#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26336//CGTS_CU11_LDS_SQ_CTRL_REG
26337#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26338#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26339#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26340#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26341#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26342#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26343#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26344#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26345#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26346#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26347#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26348#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26349#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26350#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26351#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26352#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26353#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26354#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26355#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26356#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26357//CGTS_CU11_TA_SQC_CTRL_REG
26358#define CGTS_CU11_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26359#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26360#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26361#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26362#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26363#define CGTS_CU11_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26364#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26365#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26366#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26367#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26368//CGTS_CU11_SP1_CTRL_REG
26369#define CGTS_CU11_SP1_CTRL_REG__SP10__SHIFT 0x0
26370#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26371#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26372#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26373#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26374#define CGTS_CU11_SP1_CTRL_REG__SP11__SHIFT 0x10
26375#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26376#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26377#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26378#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26379#define CGTS_CU11_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26380#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26381#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26382#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26383#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26384#define CGTS_CU11_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26385#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26386#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26387#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26388#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26389//CGTS_CU11_TD_TCP_CTRL_REG
26390#define CGTS_CU11_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26391#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26392#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26393#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26394#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26395#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26396#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26397#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26398#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26399#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26400#define CGTS_CU11_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26401#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26402#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26403#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26404#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26405#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26406#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26407#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26408#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26409#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26410//CGTS_CU12_SP0_CTRL_REG
26411#define CGTS_CU12_SP0_CTRL_REG__SP00__SHIFT 0x0
26412#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26413#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26414#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26415#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26416#define CGTS_CU12_SP0_CTRL_REG__SP01__SHIFT 0x10
26417#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26418#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26419#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26420#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26421#define CGTS_CU12_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26422#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26423#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26424#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26425#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26426#define CGTS_CU12_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26427#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26428#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26429#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26430#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26431//CGTS_CU12_LDS_SQ_CTRL_REG
26432#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26433#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26434#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26435#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26436#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26437#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26438#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26439#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26440#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26441#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26442#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26443#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26444#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26445#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26446#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26447#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26448#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26449#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26450#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26451#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26452//CGTS_CU12_TA_SQC_CTRL_REG
26453#define CGTS_CU12_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26454#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26455#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26456#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26457#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26458#define CGTS_CU12_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
26459#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
26460#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
26461#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
26462#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26463#define CGTS_CU12_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26464#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26465#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26466#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26467#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26468#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
26469#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
26470#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
26471#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
26472#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26473//CGTS_CU12_SP1_CTRL_REG
26474#define CGTS_CU12_SP1_CTRL_REG__SP10__SHIFT 0x0
26475#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26476#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26477#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26478#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26479#define CGTS_CU12_SP1_CTRL_REG__SP11__SHIFT 0x10
26480#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26481#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26482#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26483#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26484#define CGTS_CU12_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26485#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26486#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26487#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26488#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26489#define CGTS_CU12_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26490#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26491#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26492#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26493#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26494//CGTS_CU12_TD_TCP_CTRL_REG
26495#define CGTS_CU12_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26496#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26497#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26498#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26499#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26500#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26501#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26502#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26503#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26504#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26505#define CGTS_CU12_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26506#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26507#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26508#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26509#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26510#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26511#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26512#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26513#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26514#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26515//CGTS_CU13_SP0_CTRL_REG
26516#define CGTS_CU13_SP0_CTRL_REG__SP00__SHIFT 0x0
26517#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26518#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26519#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26520#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26521#define CGTS_CU13_SP0_CTRL_REG__SP01__SHIFT 0x10
26522#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26523#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26524#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26525#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26526#define CGTS_CU13_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26527#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26528#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26529#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26530#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26531#define CGTS_CU13_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26532#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26533#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26534#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26535#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26536//CGTS_CU13_LDS_SQ_CTRL_REG
26537#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26538#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26539#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26540#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26541#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26542#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26543#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26544#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26545#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26546#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26547#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26548#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26549#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26550#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26551#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26552#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26553#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26554#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26555#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26556#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26557//CGTS_CU13_TA_SQC_CTRL_REG
26558#define CGTS_CU13_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26559#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26560#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26561#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26562#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26563#define CGTS_CU13_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26564#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26565#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26566#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26567#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26568//CGTS_CU13_SP1_CTRL_REG
26569#define CGTS_CU13_SP1_CTRL_REG__SP10__SHIFT 0x0
26570#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26571#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26572#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26573#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26574#define CGTS_CU13_SP1_CTRL_REG__SP11__SHIFT 0x10
26575#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26576#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26577#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26578#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26579#define CGTS_CU13_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26580#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26581#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26582#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26583#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26584#define CGTS_CU13_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26585#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26586#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26587#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26588#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26589//CGTS_CU13_TD_TCP_CTRL_REG
26590#define CGTS_CU13_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26591#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26592#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26593#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26594#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26595#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26596#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26597#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26598#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26599#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26600#define CGTS_CU13_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26601#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26602#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26603#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26604#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26605#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26606#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26607#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26608#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26609#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26610//CGTS_CU14_SP0_CTRL_REG
26611#define CGTS_CU14_SP0_CTRL_REG__SP00__SHIFT 0x0
26612#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26613#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26614#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26615#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26616#define CGTS_CU14_SP0_CTRL_REG__SP01__SHIFT 0x10
26617#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26618#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26619#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26620#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26621#define CGTS_CU14_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26622#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26623#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26624#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26625#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26626#define CGTS_CU14_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26627#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26628#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26629#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26630#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26631//CGTS_CU14_LDS_SQ_CTRL_REG
26632#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26633#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26634#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26635#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26636#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26637#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26638#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26639#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26640#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26641#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26642#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26643#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26644#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26645#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26646#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26647#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26648#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26649#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26650#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26651#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26652//CGTS_CU14_TA_SQC_CTRL_REG
26653#define CGTS_CU14_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26654#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26655#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26656#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26657#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26658#define CGTS_CU14_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26659#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26660#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26661#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26662#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26663//CGTS_CU14_SP1_CTRL_REG
26664#define CGTS_CU14_SP1_CTRL_REG__SP10__SHIFT 0x0
26665#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26666#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26667#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26668#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26669#define CGTS_CU14_SP1_CTRL_REG__SP11__SHIFT 0x10
26670#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26671#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26672#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26673#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26674#define CGTS_CU14_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26675#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26676#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26677#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26678#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26679#define CGTS_CU14_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26680#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26681#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26682#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26683#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26684//CGTS_CU14_TD_TCP_CTRL_REG
26685#define CGTS_CU14_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26686#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26687#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26688#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26689#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26690#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26691#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26692#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26693#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26694#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26695#define CGTS_CU14_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26696#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26697#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26698#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26699#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26700#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26701#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26702#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26703#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26704#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26705//CGTS_CU15_SP0_CTRL_REG
26706#define CGTS_CU15_SP0_CTRL_REG__SP00__SHIFT 0x0
26707#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
26708#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
26709#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
26710#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
26711#define CGTS_CU15_SP0_CTRL_REG__SP01__SHIFT 0x10
26712#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
26713#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
26714#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
26715#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26716#define CGTS_CU15_SP0_CTRL_REG__SP00_MASK 0x0000007FL
26717#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
26718#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
26719#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
26720#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26721#define CGTS_CU15_SP0_CTRL_REG__SP01_MASK 0x007F0000L
26722#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
26723#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
26724#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
26725#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26726//CGTS_CU15_LDS_SQ_CTRL_REG
26727#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
26728#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
26729#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
26730#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
26731#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
26732#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
26733#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
26734#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
26735#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
26736#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26737#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
26738#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
26739#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
26740#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
26741#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26742#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
26743#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
26744#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
26745#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
26746#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26747//CGTS_CU15_TA_SQC_CTRL_REG
26748#define CGTS_CU15_TA_SQC_CTRL_REG__TA__SHIFT 0x0
26749#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
26750#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
26751#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
26752#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
26753#define CGTS_CU15_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
26754#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
26755#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
26756#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
26757#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26758#define CGTS_CU15_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
26759#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
26760#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
26761#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
26762#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26763#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
26764#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
26765#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
26766#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
26767#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26768//CGTS_CU15_SP1_CTRL_REG
26769#define CGTS_CU15_SP1_CTRL_REG__SP10__SHIFT 0x0
26770#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
26771#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
26772#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
26773#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
26774#define CGTS_CU15_SP1_CTRL_REG__SP11__SHIFT 0x10
26775#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
26776#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
26777#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
26778#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26779#define CGTS_CU15_SP1_CTRL_REG__SP10_MASK 0x0000007FL
26780#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
26781#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
26782#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
26783#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26784#define CGTS_CU15_SP1_CTRL_REG__SP11_MASK 0x007F0000L
26785#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
26786#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
26787#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
26788#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26789//CGTS_CU15_TD_TCP_CTRL_REG
26790#define CGTS_CU15_TD_TCP_CTRL_REG__TD__SHIFT 0x0
26791#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
26792#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
26793#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
26794#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
26795#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
26796#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
26797#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
26798#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
26799#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
26800#define CGTS_CU15_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
26801#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
26802#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
26803#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
26804#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26805#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
26806#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
26807#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
26808#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
26809#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
26810//CGTS_CU0_TCPI_CTRL_REG
26811#define CGTS_CU0_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26812#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26813#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26814#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26815#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26816#define CGTS_CU0_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26817#define CGTS_CU0_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26818#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26819#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26820#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26821#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26822#define CGTS_CU0_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26823//CGTS_CU1_TCPI_CTRL_REG
26824#define CGTS_CU1_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26825#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26826#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26827#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26828#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26829#define CGTS_CU1_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26830#define CGTS_CU1_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26831#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26832#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26833#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26834#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26835#define CGTS_CU1_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26836//CGTS_CU2_TCPI_CTRL_REG
26837#define CGTS_CU2_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26838#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26839#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26840#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26841#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26842#define CGTS_CU2_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26843#define CGTS_CU2_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26844#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26845#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26846#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26847#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26848#define CGTS_CU2_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26849//CGTS_CU3_TCPI_CTRL_REG
26850#define CGTS_CU3_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26851#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26852#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26853#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26854#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26855#define CGTS_CU3_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26856#define CGTS_CU3_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26857#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26858#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26859#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26860#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26861#define CGTS_CU3_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26862//CGTS_CU4_TCPI_CTRL_REG
26863#define CGTS_CU4_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26864#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26865#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26866#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26867#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26868#define CGTS_CU4_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26869#define CGTS_CU4_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26870#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26871#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26872#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26873#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26874#define CGTS_CU4_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26875//CGTS_CU5_TCPI_CTRL_REG
26876#define CGTS_CU5_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26877#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26878#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26879#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26880#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26881#define CGTS_CU5_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26882#define CGTS_CU5_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26883#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26884#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26885#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26886#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26887#define CGTS_CU5_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26888//CGTS_CU6_TCPI_CTRL_REG
26889#define CGTS_CU6_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26890#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26891#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26892#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26893#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26894#define CGTS_CU6_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26895#define CGTS_CU6_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26896#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26897#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26898#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26899#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26900#define CGTS_CU6_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26901//CGTS_CU7_TCPI_CTRL_REG
26902#define CGTS_CU7_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26903#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26904#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26905#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26906#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26907#define CGTS_CU7_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26908#define CGTS_CU7_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26909#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26910#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26911#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26912#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26913#define CGTS_CU7_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26914//CGTS_CU8_TCPI_CTRL_REG
26915#define CGTS_CU8_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26916#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26917#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26918#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26919#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26920#define CGTS_CU8_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26921#define CGTS_CU8_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26922#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26923#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26924#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26925#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26926#define CGTS_CU8_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26927//CGTS_CU9_TCPI_CTRL_REG
26928#define CGTS_CU9_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26929#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26930#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26931#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26932#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26933#define CGTS_CU9_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26934#define CGTS_CU9_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26935#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26936#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26937#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26938#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26939#define CGTS_CU9_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26940//CGTS_CU10_TCPI_CTRL_REG
26941#define CGTS_CU10_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26942#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26943#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26944#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26945#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26946#define CGTS_CU10_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26947#define CGTS_CU10_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26948#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26949#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26950#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26951#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26952#define CGTS_CU10_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26953//CGTS_CU11_TCPI_CTRL_REG
26954#define CGTS_CU11_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26955#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26956#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26957#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26958#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26959#define CGTS_CU11_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26960#define CGTS_CU11_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26961#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26962#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26963#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26964#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26965#define CGTS_CU11_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26966//CGTS_CU12_TCPI_CTRL_REG
26967#define CGTS_CU12_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26968#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26969#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26970#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26971#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26972#define CGTS_CU12_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26973#define CGTS_CU12_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26974#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26975#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26976#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26977#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26978#define CGTS_CU12_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26979//CGTS_CU13_TCPI_CTRL_REG
26980#define CGTS_CU13_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26981#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26982#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26983#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26984#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26985#define CGTS_CU13_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26986#define CGTS_CU13_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
26987#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
26988#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
26989#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
26990#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
26991#define CGTS_CU13_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
26992//CGTS_CU14_TCPI_CTRL_REG
26993#define CGTS_CU14_TCPI_CTRL_REG__TCPI__SHIFT 0x0
26994#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
26995#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
26996#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
26997#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
26998#define CGTS_CU14_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
26999#define CGTS_CU14_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
27000#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
27001#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
27002#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
27003#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
27004#define CGTS_CU14_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
27005//CGTS_CU15_TCPI_CTRL_REG
27006#define CGTS_CU15_TCPI_CTRL_REG__TCPI__SHIFT 0x0
27007#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
27008#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
27009#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
27010#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
27011#define CGTS_CU15_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
27012#define CGTS_CU15_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
27013#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
27014#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
27015#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
27016#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
27017#define CGTS_CU15_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
27018//CGTT_SPI_CLK_CTRL
27019#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
27020#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27021#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
27022#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
27023#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x1a
27024#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
27025#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
27026#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
27027#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
27028#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27029#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27030#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27031#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00FC0000L
27032#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
27033#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x04000000L
27034#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
27035#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
27036#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
27037#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
27038#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27039//CGTT_PC_CLK_CTRL
27040#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
27041#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27042#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
27043#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
27044#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE__SHIFT 0x19
27045#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE__SHIFT 0x1a
27046#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
27047#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
27048#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
27049#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
27050#define CGTT_PC_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27051#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27052#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27053#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00FC0000L
27054#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
27055#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE_MASK 0x02000000L
27056#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE_MASK 0x04000000L
27057#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
27058#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
27059#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
27060#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
27061#define CGTT_PC_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27062//CGTT_BCI_CLK_CTRL
27063#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
27064#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27065#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
27066#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27067#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27068#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27069#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27070#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27071#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27072#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27073#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27074#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
27075#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
27076#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
27077#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
27078#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
27079#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
27080#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
27081#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27082#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27083#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27084#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x0000F000L
27085#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27086#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27087#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27088#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27089#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27090#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27091#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27092#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27093#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
27094#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
27095#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
27096#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
27097#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
27098#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
27099#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
27100#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27101//CGTT_VGT_CLK_CTRL
27102#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
27103#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27104#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
27105#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27106#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27107#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27108#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27109#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27110#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27111#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27112#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9__SHIFT 0x18
27113#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
27114#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
27115#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
27116#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
27117#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE__SHIFT 0x1d
27118#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
27119#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27120#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27121#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27122#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
27123#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27124#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27125#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27126#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27127#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27128#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27129#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27130#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9_MASK 0x01000000L
27131#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
27132#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
27133#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
27134#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
27135#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE_MASK 0x20000000L
27136#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
27137#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27138//CGTT_IA_CLK_CTRL
27139#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
27140#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27141#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27142#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27143#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27144#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27145#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27146#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27147#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27148#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27149#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27150#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
27151#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27152#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27153#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27154#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
27155#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27156#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27157#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27158#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27159#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27160#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27161#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27162#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27163#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27164#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27165#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27166#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27167#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
27168#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27169#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27170#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27171#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
27172#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27173//CGTT_WD_CLK_CTRL
27174#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
27175#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27176#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
27177#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27178#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27179#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27180#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27181#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27182#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27183#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27184#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
27185#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
27186#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
27187#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
27188#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
27189#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
27190#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27191#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27192#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27193#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
27194#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27195#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27196#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27197#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27198#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27199#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27200#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27201#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
27202#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
27203#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
27204#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
27205#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
27206#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
27207#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27208//CGTT_PA_CLK_CTRL
27209#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
27210#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27211#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27212#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27213#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27214#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27215#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27216#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27217#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27218#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27219#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27220#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27221#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27222#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27223#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
27224#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
27225#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
27226#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27227#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27228#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27229#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27230#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27231#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27232#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27233#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27234#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27235#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27236#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27237#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27238#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27239#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27240#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
27241#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
27242#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
27243//CGTT_SC_CLK_CTRL0
27244#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
27245#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
27246#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
27247#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
27248#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
27249#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
27250#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
27251#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
27252#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
27253#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
27254#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
27255#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
27256#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
27257#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
27258#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
27259#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
27260#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
27261#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
27262#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
27263#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
27264#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
27265#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
27266#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
27267#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
27268#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
27269#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
27270#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
27271#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
27272#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
27273#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
27274#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
27275#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
27276#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
27277#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
27278#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
27279#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
27280//CGTT_SC_CLK_CTRL1
27281#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
27282#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
27283#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
27284#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
27285#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
27286#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
27287#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
27288#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
27289#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
27290#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
27291#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
27292#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
27293#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
27294#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
27295#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
27296#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
27297#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
27298#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
27299#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
27300#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
27301#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
27302#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
27303#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
27304#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
27305#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
27306#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
27307#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
27308#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
27309//CGTT_SQ_CLK_CTRL
27310#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
27311#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27312#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27313#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27314#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27315#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27316#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27317#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27318#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27319#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27320#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
27321#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
27322#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27323#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27324#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27325#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27326#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27327#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27328#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27329#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27330#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27331#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27332#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27333#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
27334#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
27335#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27336//CGTT_SQG_CLK_CTRL
27337#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
27338#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27339#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27340#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27341#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27342#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27343#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27344#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27345#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27346#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27347#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
27348#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
27349#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
27350#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
27351#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27352#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27353#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27354#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27355#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27356#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27357#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27358#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27359#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27360#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27361#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
27362#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
27363#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
27364#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
27365//SQ_ALU_CLK_CTRL
27366#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
27367#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
27368#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
27369#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
27370//SQ_TEX_CLK_CTRL
27371#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
27372#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
27373#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
27374#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
27375//SQ_LDS_CLK_CTRL
27376#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
27377#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
27378#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
27379#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
27380//SQ_POWER_THROTTLE
27381#define SQ_POWER_THROTTLE__MIN_POWER__SHIFT 0x0
27382#define SQ_POWER_THROTTLE__MAX_POWER__SHIFT 0x10
27383#define SQ_POWER_THROTTLE__PHASE_OFFSET__SHIFT 0x1e
27384#define SQ_POWER_THROTTLE__MIN_POWER_MASK 0x00003FFFL
27385#define SQ_POWER_THROTTLE__MAX_POWER_MASK 0x3FFF0000L
27386#define SQ_POWER_THROTTLE__PHASE_OFFSET_MASK 0xC0000000L
27387//SQ_POWER_THROTTLE2
27388#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT 0x0
27389#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
27390#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
27391#define SQ_POWER_THROTTLE2__USE_REF_CLOCK__SHIFT 0x1f
27392#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK 0x00003FFFL
27393#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
27394#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
27395#define SQ_POWER_THROTTLE2__USE_REF_CLOCK_MASK 0x80000000L
27396//CGTT_SX_CLK_CTRL0
27397#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x0
27398#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
27399#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0xc
27400#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27401#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27402#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27403#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27404#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27405#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27406#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27407#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27408#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
27409#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
27410#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
27411#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
27412#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
27413#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
27414#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
27415#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
27416#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
27417#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
27418#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x0000F000L
27419#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27420#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27421#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27422#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27423#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27424#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27425#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27426#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27427#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
27428#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
27429#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
27430#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
27431#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
27432#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
27433#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
27434#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
27435//CGTT_SX_CLK_CTRL1
27436#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x0
27437#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
27438#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0xc
27439#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27440#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27441#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27442#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27443#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27444#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27445#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27446#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27447#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
27448#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
27449#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
27450#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
27451#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
27452#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
27453#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x1f
27454#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
27455#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
27456#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x0000F000L
27457#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27458#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27459#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27460#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27461#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27462#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27463#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27464#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27465#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
27466#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
27467#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
27468#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
27469#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
27470#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
27471#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
27472//CGTT_SX_CLK_CTRL2
27473#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x0
27474#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
27475#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0xd
27476#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27477#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27478#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27479#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27480#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27481#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27482#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27483#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27484#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
27485#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
27486#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
27487#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
27488#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
27489#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
27490#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x1f
27491#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
27492#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
27493#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x0000E000L
27494#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27495#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27496#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27497#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27498#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27499#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27500#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27501#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27502#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
27503#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
27504#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
27505#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
27506#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
27507#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
27508#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
27509//CGTT_SX_CLK_CTRL3
27510#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x0
27511#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
27512#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0xd
27513#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27514#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27515#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27516#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27517#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27518#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27519#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27520#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27521#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
27522#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
27523#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
27524#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
27525#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
27526#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
27527#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x1f
27528#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
27529#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
27530#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x0000E000L
27531#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27532#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27533#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27534#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27535#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27536#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27537#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27538#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27539#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
27540#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
27541#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
27542#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
27543#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
27544#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
27545#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
27546//CGTT_SX_CLK_CTRL4
27547#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x0
27548#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x4
27549#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0xc
27550#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27551#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27552#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27553#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27554#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27555#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27556#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27557#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27558#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x19
27559#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x1a
27560#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x1b
27561#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x1c
27562#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x1d
27563#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x1e
27564#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x1f
27565#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000FL
27566#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000FF0L
27567#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x0000F000L
27568#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27569#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27570#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27571#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27572#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27573#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27574#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27575#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27576#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
27577#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
27578#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
27579#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
27580#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
27581#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
27582#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
27583//TD_CGTT_CTRL
27584#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
27585#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27586#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27587#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27588#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27589#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27590#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27591#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27592#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27593#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27594#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27595#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27596#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27597#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27598#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27599#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27600#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27601#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27602#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
27603#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27604#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27605#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27606#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27607#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27608#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27609#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27610#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27611#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27612#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27613#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27614#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27615#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27616#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27617#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27618#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27619#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27620//TA_CGTT_CTRL
27621#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
27622#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27623#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27624#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27625#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27626#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27627#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27628#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27629#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27630#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27631#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27632#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27633#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27634#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27635#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27636#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27637#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27638#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27639#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
27640#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27641#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27642#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27643#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27644#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27645#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27646#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27647#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27648#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27649#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27650#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27651#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27652#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27653#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27654#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27655#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27656#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27657//CGTT_TCPI_CLK_CTRL
27658#define CGTT_TCPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
27659#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27660#define CGTT_TCPI_CLK_CTRL__SPARE__SHIFT 0xc
27661#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27662#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27663#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27664#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27665#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27666#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27667#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27668#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27669#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27670#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27671#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27672#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27673#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27674#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27675#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27676#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27677#define CGTT_TCPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27678#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27679#define CGTT_TCPI_CLK_CTRL__SPARE_MASK 0x0000F000L
27680#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27681#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27682#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27683#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27684#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27685#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27686#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27687#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27688#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27689#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27690#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27691#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27692#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27693#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27694#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27695#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27696//CGTT_TCI_CLK_CTRL
27697#define CGTT_TCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
27698#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27699#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27700#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27701#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27702#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27703#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27704#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27705#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27706#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27707#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27708#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27709#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27710#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27711#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27712#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27713#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27714#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27715#define CGTT_TCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27716#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27717#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27718#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27719#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27720#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27721#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27722#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27723#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27724#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27725#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27726#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27727#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27728#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27729#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27730#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27731#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27732#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27733//CGTT_GDS_CLK_CTRL
27734#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x0
27735#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27736#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27737#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27738#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27739#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27740#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27741#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27742#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27743#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27744#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27745#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27746#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27747#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27748#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27749#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27750#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27751#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27752#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27753#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27754#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27755#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27756#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27757#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27758#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27759#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27760#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27761#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27762#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27763#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27764#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27765#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27766#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27767#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27768#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27769#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27770//DB_CGTT_CLK_CTRL_0
27771#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
27772#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
27773#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
27774#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27775#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27776#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27777#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27778#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27779#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27780#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27781#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27782#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
27783#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
27784#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
27785#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
27786#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
27787#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
27788#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
27789#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
27790#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000FL
27791#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000FF0L
27792#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x0000F000L
27793#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27794#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27795#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27796#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27797#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27798#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27799#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27800#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27801#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
27802#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
27803#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
27804#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
27805#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
27806#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
27807#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
27808#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
27809//CB_CGTT_SCLK_CTRL
27810#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
27811#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27812#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27813#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27814#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27815#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27816#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27817#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27818#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27819#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27820#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27821#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27822#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27823#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27824#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27825#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27826#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27827#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27828#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
27829#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27830#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27831#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27832#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27833#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27834#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27835#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27836#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27837#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27838#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27839#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27840#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27841#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27842#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27843#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27844#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27845#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27846//TCC_CGTT_SCLK_CTRL
27847#define TCC_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
27848#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27849#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27850#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27851#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27852#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27853#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27854#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27855#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27856#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27857#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27858#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27859#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27860#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27861#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27862#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27863#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27864#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27865#define TCC_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
27866#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27867#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27868#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27869#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27870#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27871#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27872#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27873#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27874#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27875#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27876#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27877#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27878#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27879#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27880#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27881#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27882#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27883//TCA_CGTT_SCLK_CTRL
27884#define TCA_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
27885#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27886#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27887#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27888#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27889#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27890#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27891#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27892#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27893#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27894#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
27895#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
27896#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
27897#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
27898#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
27899#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
27900#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
27901#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
27902#define TCA_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
27903#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27904#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27905#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27906#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27907#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27908#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27909#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27910#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27911#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27912#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
27913#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
27914#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
27915#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
27916#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
27917#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
27918#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
27919#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
27920//CGTT_CP_CLK_CTRL
27921#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x0
27922#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27923#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
27924#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27925#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27926#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27927#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27928#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27929#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27930#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27931#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27932#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
27933#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
27934#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
27935#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27936#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27937#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
27938#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27939#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27940#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27941#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27942#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27943#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27944#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27945#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27946#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
27947#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
27948#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
27949//CGTT_CPF_CLK_CTRL
27950#define CGTT_CPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
27951#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27952#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
27953#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27954#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27955#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27956#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27957#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27958#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27959#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27960#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27961#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
27962#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
27963#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
27964#define CGTT_CPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27965#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27966#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
27967#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27968#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27969#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27970#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
27971#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
27972#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
27973#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
27974#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
27975#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
27976#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
27977#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
27978//CGTT_CPC_CLK_CTRL
27979#define CGTT_CPC_CLK_CTRL__ON_DELAY__SHIFT 0x0
27980#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
27981#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
27982#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
27983#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
27984#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
27985#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
27986#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
27987#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
27988#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
27989#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
27990#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
27991#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
27992#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
27993#define CGTT_CPC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
27994#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
27995#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
27996#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
27997#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
27998#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
27999#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
28000#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
28001#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
28002#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
28003#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
28004#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
28005#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
28006#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
28007//RLC_PWR_CTRL
28008#define RLC_PWR_CTRL__MON_CGPG_RTN_EN__SHIFT 0x0
28009#define RLC_PWR_CTRL__RESERVED__SHIFT 0x1
28010#define RLC_PWR_CTRL__DLDO_STATUS__SHIFT 0x8
28011#define RLC_PWR_CTRL__MON_CGPG_RTN_EN_MASK 0x00000001L
28012#define RLC_PWR_CTRL__RESERVED_MASK 0x000000FEL
28013#define RLC_PWR_CTRL__DLDO_STATUS_MASK 0x00000100L
28014//CGTT_RLC_CLK_CTRL
28015#define CGTT_RLC_CLK_CTRL__ON_DELAY__SHIFT 0x0
28016#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
28017#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
28018#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
28019#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
28020#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
28021#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
28022#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
28023#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
28024#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
28025#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
28026#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
28027#define CGTT_RLC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
28028#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
28029#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
28030#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
28031#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
28032#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
28033#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
28034#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
28035#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
28036#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
28037#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
28038#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
28039//RLC_GFX_RM_CNTL
28040#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
28041#define RLC_GFX_RM_CNTL__RESERVED__SHIFT 0x1
28042#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
28043#define RLC_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
28044//RMI_CGTT_SCLK_CTRL
28045#define RMI_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
28046#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
28047#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
28048#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
28049#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
28050#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
28051#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
28052#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
28053#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
28054#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
28055#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
28056#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
28057#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
28058#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
28059#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
28060#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
28061#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
28062#define RMI_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
28063#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
28064#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
28065#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
28066#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
28067#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
28068#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
28069#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
28070#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
28071#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
28072#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
28073#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
28074#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
28075#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
28076#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
28077#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
28078#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
28079//CGTT_TCPF_CLK_CTRL
28080#define CGTT_TCPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
28081#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
28082#define CGTT_TCPF_CLK_CTRL__SPARE__SHIFT 0xc
28083#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
28084#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
28085#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
28086#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
28087#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
28088#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
28089#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
28090#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
28091#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
28092#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
28093#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
28094#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
28095#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
28096#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
28097#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
28098#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
28099#define CGTT_TCPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
28100#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
28101#define CGTT_TCPF_CLK_CTRL__SPARE_MASK 0x0000F000L
28102#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
28103#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
28104#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
28105#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
28106#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
28107#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
28108#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
28109#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
28110#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
28111#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
28112#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
28113#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
28114#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
28115#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
28116#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
28117#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
28118
28119
28120// addressBlock: gc_ea_pwrdec
28121//GCEA_CGTT_CLK_CTRL
28122#define GCEA_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
28123#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
28124#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
28125#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
28126#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
28127#define GCEA_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
28128#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
28129#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
28130#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
28131#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
28132
28133
28134// addressBlock: gc_utcl2_vmsharedhvdec
28135//MC_VM_FB_SIZE_OFFSET_VF0
28136#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
28137#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
28138#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
28139#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
28140//MC_VM_FB_SIZE_OFFSET_VF1
28141#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
28142#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
28143#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
28144#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
28145//MC_VM_FB_SIZE_OFFSET_VF2
28146#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
28147#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
28148#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
28149#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
28150//MC_VM_FB_SIZE_OFFSET_VF3
28151#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
28152#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
28153#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
28154#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
28155//MC_VM_FB_SIZE_OFFSET_VF4
28156#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
28157#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
28158#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
28159#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
28160//MC_VM_FB_SIZE_OFFSET_VF5
28161#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
28162#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
28163#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
28164#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
28165//MC_VM_FB_SIZE_OFFSET_VF6
28166#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
28167#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
28168#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
28169#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
28170//MC_VM_FB_SIZE_OFFSET_VF7
28171#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
28172#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
28173#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
28174#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
28175//MC_VM_FB_SIZE_OFFSET_VF8
28176#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
28177#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
28178#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
28179#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
28180//MC_VM_FB_SIZE_OFFSET_VF9
28181#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
28182#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
28183#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
28184#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
28185//MC_VM_FB_SIZE_OFFSET_VF10
28186#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
28187#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
28188#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
28189#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
28190//MC_VM_FB_SIZE_OFFSET_VF11
28191#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
28192#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
28193#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
28194#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
28195//MC_VM_FB_SIZE_OFFSET_VF12
28196#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
28197#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
28198#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
28199#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
28200//MC_VM_FB_SIZE_OFFSET_VF13
28201#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
28202#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
28203#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
28204#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
28205//MC_VM_FB_SIZE_OFFSET_VF14
28206#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
28207#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
28208#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
28209#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
28210//MC_VM_FB_SIZE_OFFSET_VF15
28211#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
28212#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
28213#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
28214#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
28215//VM_IOMMU_MMIO_CNTRL_1
28216#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
28217#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
28218//MC_VM_MARC_BASE_LO_0
28219#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
28220#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
28221//MC_VM_MARC_BASE_LO_1
28222#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
28223#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
28224//MC_VM_MARC_BASE_LO_2
28225#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
28226#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
28227//MC_VM_MARC_BASE_LO_3
28228#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
28229#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
28230//MC_VM_MARC_BASE_HI_0
28231#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
28232#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
28233//MC_VM_MARC_BASE_HI_1
28234#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
28235#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
28236//MC_VM_MARC_BASE_HI_2
28237#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
28238#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
28239//MC_VM_MARC_BASE_HI_3
28240#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
28241#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
28242//MC_VM_MARC_RELOC_LO_0
28243#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
28244#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
28245#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
28246#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
28247#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
28248#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
28249//MC_VM_MARC_RELOC_LO_1
28250#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
28251#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
28252#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
28253#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
28254#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
28255#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
28256//MC_VM_MARC_RELOC_LO_2
28257#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
28258#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
28259#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
28260#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
28261#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
28262#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
28263//MC_VM_MARC_RELOC_LO_3
28264#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
28265#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
28266#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
28267#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
28268#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
28269#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
28270//MC_VM_MARC_RELOC_HI_0
28271#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
28272#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
28273//MC_VM_MARC_RELOC_HI_1
28274#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
28275#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
28276//MC_VM_MARC_RELOC_HI_2
28277#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
28278#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
28279//MC_VM_MARC_RELOC_HI_3
28280#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
28281#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
28282//MC_VM_MARC_LEN_LO_0
28283#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
28284#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
28285//MC_VM_MARC_LEN_LO_1
28286#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
28287#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
28288//MC_VM_MARC_LEN_LO_2
28289#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
28290#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
28291//MC_VM_MARC_LEN_LO_3
28292#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
28293#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
28294//MC_VM_MARC_LEN_HI_0
28295#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
28296#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
28297//MC_VM_MARC_LEN_HI_1
28298#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
28299#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
28300//MC_VM_MARC_LEN_HI_2
28301#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
28302#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
28303//MC_VM_MARC_LEN_HI_3
28304#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
28305#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
28306//VM_IOMMU_CONTROL_REGISTER
28307#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
28308#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
28309//VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
28310#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
28311#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
28312//VM_PCIE_ATS_CNTL
28313#define VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
28314#define VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
28315#define VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
28316#define VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
28317//VM_PCIE_ATS_CNTL_VF_0
28318#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
28319#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
28320//VM_PCIE_ATS_CNTL_VF_1
28321#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
28322#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
28323//VM_PCIE_ATS_CNTL_VF_2
28324#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
28325#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
28326//VM_PCIE_ATS_CNTL_VF_3
28327#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
28328#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
28329//VM_PCIE_ATS_CNTL_VF_4
28330#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
28331#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
28332//VM_PCIE_ATS_CNTL_VF_5
28333#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
28334#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
28335//VM_PCIE_ATS_CNTL_VF_6
28336#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
28337#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
28338//VM_PCIE_ATS_CNTL_VF_7
28339#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
28340#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
28341//VM_PCIE_ATS_CNTL_VF_8
28342#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
28343#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
28344//VM_PCIE_ATS_CNTL_VF_9
28345#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
28346#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
28347//VM_PCIE_ATS_CNTL_VF_10
28348#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
28349#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
28350//VM_PCIE_ATS_CNTL_VF_11
28351#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
28352#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
28353//VM_PCIE_ATS_CNTL_VF_12
28354#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
28355#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
28356//VM_PCIE_ATS_CNTL_VF_13
28357#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
28358#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
28359//VM_PCIE_ATS_CNTL_VF_14
28360#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
28361#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
28362//VM_PCIE_ATS_CNTL_VF_15
28363#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
28364#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
28365//UTCL2_CGTT_CLK_CTRL
28366#define UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
28367#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
28368#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
28369#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
28370#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
28371#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
28372#define UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
28373#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
28374#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
28375#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
28376#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
28377#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
28378
28379
28380// addressBlock: gc_hypdec
28381//CP_HYP_PFP_UCODE_ADDR
28382#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28383#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
28384//CP_PFP_UCODE_ADDR
28385#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28386#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
28387//CP_HYP_PFP_UCODE_DATA
28388#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28389#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28390//CP_PFP_UCODE_DATA
28391#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28392#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28393//CP_HYP_ME_UCODE_ADDR
28394#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28395#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR_MASK 0x00001FFFL
28396//CP_ME_RAM_RADDR
28397#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
28398#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x00001FFFL
28399//CP_ME_RAM_WADDR
28400#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
28401#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x00001FFFL
28402//CP_HYP_ME_UCODE_DATA
28403#define CP_HYP_ME_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28404#define CP_HYP_ME_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28405//CP_ME_RAM_DATA
28406#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
28407#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xFFFFFFFFL
28408//CP_CE_UCODE_ADDR
28409#define CP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28410#define CP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
28411//CP_HYP_CE_UCODE_ADDR
28412#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28413#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
28414//CP_CE_UCODE_DATA
28415#define CP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28416#define CP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28417//CP_HYP_CE_UCODE_DATA
28418#define CP_HYP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28419#define CP_HYP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28420//CP_HYP_MEC1_UCODE_ADDR
28421#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28422#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
28423//CP_MEC_ME1_UCODE_ADDR
28424#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28425#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
28426//CP_HYP_MEC1_UCODE_DATA
28427#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28428#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28429//CP_MEC_ME1_UCODE_DATA
28430#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28431#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28432//CP_HYP_MEC2_UCODE_ADDR
28433#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28434#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
28435//CP_MEC_ME2_UCODE_ADDR
28436#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28437#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
28438//CP_HYP_MEC2_UCODE_DATA
28439#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28440#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28441//CP_MEC_ME2_UCODE_DATA
28442#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28443#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28444//RLC_GPM_UCODE_ADDR
28445#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28446#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xe
28447#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
28448#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xFFFFC000L
28449//RLC_GPM_UCODE_DATA
28450#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28451#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28452//GRBM_GFX_INDEX_SR_SELECT
28453#define GRBM_GFX_INDEX_SR_SELECT__INDEX__SHIFT 0x0
28454#define GRBM_GFX_INDEX_SR_SELECT__INDEX_MASK 0x00000007L
28455//GRBM_GFX_INDEX_SR_DATA
28456#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX__SHIFT 0x0
28457#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX__SHIFT 0x8
28458#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX__SHIFT 0x10
28459#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES__SHIFT 0x1d
28460#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
28461#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES__SHIFT 0x1f
28462#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX_MASK 0x000000FFL
28463#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX_MASK 0x0000FF00L
28464#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX_MASK 0x00FF0000L
28465#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES_MASK 0x20000000L
28466#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
28467#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES_MASK 0x80000000L
28468//GRBM_GFX_CNTL_SR_SELECT
28469#define GRBM_GFX_CNTL_SR_SELECT__INDEX__SHIFT 0x0
28470#define GRBM_GFX_CNTL_SR_SELECT__INDEX_MASK 0x00000007L
28471//GRBM_GFX_CNTL_SR_DATA
28472#define GRBM_GFX_CNTL_SR_DATA__PIPEID__SHIFT 0x0
28473#define GRBM_GFX_CNTL_SR_DATA__MEID__SHIFT 0x2
28474#define GRBM_GFX_CNTL_SR_DATA__VMID__SHIFT 0x4
28475#define GRBM_GFX_CNTL_SR_DATA__QUEUEID__SHIFT 0x8
28476#define GRBM_GFX_CNTL_SR_DATA__PIPEID_MASK 0x00000003L
28477#define GRBM_GFX_CNTL_SR_DATA__MEID_MASK 0x0000000CL
28478#define GRBM_GFX_CNTL_SR_DATA__VMID_MASK 0x000000F0L
28479#define GRBM_GFX_CNTL_SR_DATA__QUEUEID_MASK 0x00000700L
28480//GRBM_CAM_INDEX
28481#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
28482#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
28483//GRBM_HYP_CAM_INDEX
28484#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
28485#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
28486//GRBM_CAM_DATA
28487#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
28488#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
28489#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
28490#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
28491//GRBM_HYP_CAM_DATA
28492#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
28493#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
28494#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
28495#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
28496//RLC_GPU_IOV_VF_ENABLE
28497#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
28498#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
28499#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
28500#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
28501#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0x0000FFFEL
28502#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xFFFF0000L
28503//RLC_GFX_RM_CNTL_ADJ
28504#define RLC_GFX_RM_CNTL_ADJ__RLC_GFX_RM_VALID__SHIFT 0x0
28505#define RLC_GFX_RM_CNTL_ADJ__RESERVED__SHIFT 0x1
28506#define RLC_GFX_RM_CNTL_ADJ__RLC_GFX_RM_VALID_MASK 0x00000001L
28507#define RLC_GFX_RM_CNTL_ADJ__RESERVED_MASK 0xFFFFFFFEL
28508//RLC_GPU_IOV_CFG_REG6
28509#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE__SHIFT 0x0
28510#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION__SHIFT 0x7
28511#define RLC_GPU_IOV_CFG_REG6__RESERVED__SHIFT 0x8
28512#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET__SHIFT 0xa
28513#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE_MASK 0x0000007FL
28514#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION_MASK 0x00000080L
28515#define RLC_GPU_IOV_CFG_REG6__RESERVED_MASK 0x00000300L
28516#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET_MASK 0xFFFFFC00L
28517//RLC_GPU_IOV_CFG_REG8
28518#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS__SHIFT 0x0
28519#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
28520//RLC_RLCV_TIMER_INT_0
28521#define RLC_RLCV_TIMER_INT_0__TIMER__SHIFT 0x0
28522#define RLC_RLCV_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
28523//RLC_RLCV_TIMER_CTRL
28524#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
28525#define RLC_RLCV_TIMER_CTRL__RESERVED__SHIFT 0x1
28526#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
28527#define RLC_RLCV_TIMER_CTRL__RESERVED_MASK 0xFFFFFFFEL
28528//RLC_RLCV_TIMER_STAT
28529#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
28530#define RLC_RLCV_TIMER_STAT__RESERVED__SHIFT 0x1
28531#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
28532#define RLC_RLCV_TIMER_STAT__RESERVED_MASK 0xFFFFFFFEL
28533//RLC_GPU_IOV_VF_DOORBELL_STATUS
28534#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS__SHIFT 0x0
28535#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED__SHIFT 0x10
28536#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS__SHIFT 0x1f
28537#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_MASK 0x0000FFFFL
28538#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED_MASK 0x7FFF0000L
28539#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS_MASK 0x80000000L
28540//RLC_GPU_IOV_VF_DOORBELL_STATUS_SET
28541#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET__SHIFT 0x0
28542#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED__SHIFT 0x10
28543#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET__SHIFT 0x1f
28544#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET_MASK 0x0000FFFFL
28545#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED_MASK 0x7FFF0000L
28546#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET_MASK 0x80000000L
28547//RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR
28548#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR__SHIFT 0x0
28549#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED__SHIFT 0x10
28550#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR__SHIFT 0x1f
28551#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR_MASK 0x0000FFFFL
28552#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED_MASK 0x7FFF0000L
28553#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR_MASK 0x80000000L
28554//RLC_GPU_IOV_VF_MASK
28555#define RLC_GPU_IOV_VF_MASK__VF_MASK__SHIFT 0x0
28556#define RLC_GPU_IOV_VF_MASK__RESERVED__SHIFT 0x10
28557#define RLC_GPU_IOV_VF_MASK__VF_MASK_MASK 0x0000FFFFL
28558#define RLC_GPU_IOV_VF_MASK__RESERVED_MASK 0xFFFF0000L
28559//RLC_HYP_SEMAPHORE_2
28560#define RLC_HYP_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
28561#define RLC_HYP_SEMAPHORE_2__RESERVED__SHIFT 0x5
28562#define RLC_HYP_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
28563#define RLC_HYP_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
28564//RLC_HYP_SEMAPHORE_3
28565#define RLC_HYP_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
28566#define RLC_HYP_SEMAPHORE_3__RESERVED__SHIFT 0x5
28567#define RLC_HYP_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
28568#define RLC_HYP_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
28569//RLC_CLK_CNTL
28570#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL__SHIFT 0x0
28571#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL__SHIFT 0x1
28572#define RLC_CLK_CNTL__RESERVED__SHIFT 0x2
28573#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL_MASK 0x00000001L
28574#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL_MASK 0x00000002L
28575#define RLC_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
28576//RLC_GPU_IOV_SCH_BLOCK
28577#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID__SHIFT 0x0
28578#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver__SHIFT 0x4
28579#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size__SHIFT 0x8
28580#define RLC_GPU_IOV_SCH_BLOCK__RESERVED__SHIFT 0x10
28581#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID_MASK 0x0000000FL
28582#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver_MASK 0x000000F0L
28583#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size_MASK 0x00007F00L
28584#define RLC_GPU_IOV_SCH_BLOCK__RESERVED_MASK 0x7FFF0000L
28585//RLC_GPU_IOV_CFG_REG1
28586#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE__SHIFT 0x0
28587#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE__SHIFT 0x4
28588#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
28589#define RLC_GPU_IOV_CFG_REG1__RESERVED__SHIFT 0x6
28590#define RLC_GPU_IOV_CFG_REG1__FCN_ID__SHIFT 0x8
28591#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID__SHIFT 0x10
28592#define RLC_GPU_IOV_CFG_REG1__RESERVED1__SHIFT 0x18
28593#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE_MASK 0x0000000FL
28594#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_MASK 0x00000010L
28595#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
28596#define RLC_GPU_IOV_CFG_REG1__RESERVED_MASK 0x000000C0L
28597#define RLC_GPU_IOV_CFG_REG1__FCN_ID_MASK 0x0000FF00L
28598#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID_MASK 0x00FF0000L
28599#define RLC_GPU_IOV_CFG_REG1__RESERVED1_MASK 0xFF000000L
28600//RLC_GPU_IOV_CFG_REG2
28601#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS__SHIFT 0x0
28602#define RLC_GPU_IOV_CFG_REG2__RESERVED__SHIFT 0x4
28603#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS_MASK 0x0000000FL
28604#define RLC_GPU_IOV_CFG_REG2__RESERVED_MASK 0xFFFFFFF0L
28605//RLC_GPU_IOV_VM_BUSY_STATUS
28606#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
28607#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
28608//RLC_GPU_IOV_SCH_0
28609#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS__SHIFT 0x0
28610#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS_MASK 0xFFFFFFFFL
28611//RLC_GPU_IOV_ACTIVE_FCN_ID
28612#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
28613#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
28614#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
28615#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
28616#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
28617#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
28618//RLC_GPU_IOV_SCH_3
28619#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def__SHIFT 0x0
28620#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def_MASK 0xFFFFFFFFL
28621//RLC_GPU_IOV_SCH_1
28622#define RLC_GPU_IOV_SCH_1__DATA__SHIFT 0x0
28623#define RLC_GPU_IOV_SCH_1__DATA_MASK 0xFFFFFFFFL
28624//RLC_GPU_IOV_SCH_2
28625#define RLC_GPU_IOV_SCH_2__DATA__SHIFT 0x0
28626#define RLC_GPU_IOV_SCH_2__DATA_MASK 0xFFFFFFFFL
28627//RLC_GPU_IOV_UCODE_ADDR
28628#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
28629#define RLC_GPU_IOV_UCODE_ADDR__RESERVED__SHIFT 0xc
28630#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
28631#define RLC_GPU_IOV_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
28632//RLC_GPU_IOV_UCODE_DATA
28633#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA__SHIFT 0x0
28634#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
28635//RLC_GPU_IOV_SCRATCH_ADDR
28636#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR__SHIFT 0x0
28637#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED__SHIFT 0x9
28638#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
28639#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
28640//RLC_GPU_IOV_SCRATCH_DATA
28641#define RLC_GPU_IOV_SCRATCH_DATA__DATA__SHIFT 0x0
28642#define RLC_GPU_IOV_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
28643//RLC_GPU_IOV_F32_CNTL
28644#define RLC_GPU_IOV_F32_CNTL__ENABLE__SHIFT 0x0
28645#define RLC_GPU_IOV_F32_CNTL__RESERVED__SHIFT 0x1
28646#define RLC_GPU_IOV_F32_CNTL__ENABLE_MASK 0x00000001L
28647#define RLC_GPU_IOV_F32_CNTL__RESERVED_MASK 0xFFFFFFFEL
28648//RLC_GPU_IOV_F32_RESET
28649#define RLC_GPU_IOV_F32_RESET__RESET__SHIFT 0x0
28650#define RLC_GPU_IOV_F32_RESET__RESERVED__SHIFT 0x1
28651#define RLC_GPU_IOV_F32_RESET__RESET_MASK 0x00000001L
28652#define RLC_GPU_IOV_F32_RESET__RESERVED_MASK 0xFFFFFFFEL
28653//RLC_GPU_IOV_SDMA0_STATUS
28654#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED__SHIFT 0x0
28655#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED__SHIFT 0x1
28656#define RLC_GPU_IOV_SDMA0_STATUS__SAVED__SHIFT 0x8
28657#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1__SHIFT 0x9
28658#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED__SHIFT 0xc
28659#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2__SHIFT 0xd
28660#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED_MASK 0x00000001L
28661#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED_MASK 0x000000FEL
28662#define RLC_GPU_IOV_SDMA0_STATUS__SAVED_MASK 0x00000100L
28663#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1_MASK 0x00000E00L
28664#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED_MASK 0x00001000L
28665#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2_MASK 0xFFFFE000L
28666//RLC_GPU_IOV_SDMA1_STATUS
28667#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED__SHIFT 0x0
28668#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED__SHIFT 0x1
28669#define RLC_GPU_IOV_SDMA1_STATUS__SAVED__SHIFT 0x8
28670#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1__SHIFT 0x9
28671#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED__SHIFT 0xc
28672#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2__SHIFT 0xd
28673#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED_MASK 0x00000001L
28674#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED_MASK 0x000000FEL
28675#define RLC_GPU_IOV_SDMA1_STATUS__SAVED_MASK 0x00000100L
28676#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1_MASK 0x00000E00L
28677#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED_MASK 0x00001000L
28678#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2_MASK 0xFFFFE000L
28679//RLC_GPU_IOV_SMU_RESPONSE
28680#define RLC_GPU_IOV_SMU_RESPONSE__RESP__SHIFT 0x0
28681#define RLC_GPU_IOV_SMU_RESPONSE__RESP_MASK 0xFFFFFFFFL
28682//RLC_GPU_IOV_VIRT_RESET_REQ
28683#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
28684#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED__SHIFT 0x10
28685#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR__SHIFT 0x1f
28686#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR_MASK 0x0000FFFFL
28687#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED_MASK 0x7FFF0000L
28688#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR_MASK 0x80000000L
28689//RLC_GPU_IOV_RLC_RESPONSE
28690#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
28691#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
28692//RLC_GPU_IOV_INT_DISABLE
28693#define RLC_GPU_IOV_INT_DISABLE__DISABLE__SHIFT 0x0
28694#define RLC_GPU_IOV_INT_DISABLE__DISABLE_MASK 0xFFFFFFFFL
28695//RLC_GPU_IOV_INT_FORCE
28696#define RLC_GPU_IOV_INT_FORCE__FORCE__SHIFT 0x0
28697#define RLC_GPU_IOV_INT_FORCE__FORCE_MASK 0xFFFFFFFFL
28698//RLC_GPU_IOV_SDMA0_BUSY_STATUS
28699#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
28700#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
28701//RLC_GPU_IOV_SDMA1_BUSY_STATUS
28702#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
28703#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
28704
28705
28706// addressBlock: gccacind
28707//GC_CAC_CNTL
28708#define GC_CAC_CNTL__CAC_ENABLE__SHIFT 0x0
28709#define GC_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x1
28710#define GC_CAC_CNTL__CAC_BLOCK_ID__SHIFT 0x11
28711#define GC_CAC_CNTL__CAC_SIGNAL_ID__SHIFT 0x17
28712#define GC_CAC_CNTL__UNUSED_0__SHIFT 0x1f
28713#define GC_CAC_CNTL__CAC_ENABLE_MASK 0x00000001L
28714#define GC_CAC_CNTL__CAC_THRESHOLD_MASK 0x0001FFFEL
28715#define GC_CAC_CNTL__CAC_BLOCK_ID_MASK 0x007E0000L
28716#define GC_CAC_CNTL__CAC_SIGNAL_ID_MASK 0x7F800000L
28717#define GC_CAC_CNTL__UNUSED_0_MASK 0x80000000L
28718//GC_CAC_OVR_SEL
28719#define GC_CAC_OVR_SEL__CAC_OVR_SEL__SHIFT 0x0
28720#define GC_CAC_OVR_SEL__CAC_OVR_SEL_MASK 0xFFFFFFFFL
28721//GC_CAC_OVR_VAL
28722#define GC_CAC_OVR_VAL__CAC_OVR_VAL__SHIFT 0x0
28723#define GC_CAC_OVR_VAL__CAC_OVR_VAL_MASK 0xFFFFFFFFL
28724//GC_CAC_WEIGHT_BCI_0
28725#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0__SHIFT 0x0
28726#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1__SHIFT 0x10
28727#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0_MASK 0x0000FFFFL
28728#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1_MASK 0xFFFF0000L
28729//GC_CAC_WEIGHT_CB_0
28730#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0__SHIFT 0x0
28731#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1__SHIFT 0x10
28732#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0_MASK 0x0000FFFFL
28733#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1_MASK 0xFFFF0000L
28734//GC_CAC_WEIGHT_CB_1
28735#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2__SHIFT 0x0
28736#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3__SHIFT 0x10
28737#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2_MASK 0x0000FFFFL
28738#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3_MASK 0xFFFF0000L
28739//GC_CAC_WEIGHT_CP_0
28740#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0__SHIFT 0x0
28741#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1__SHIFT 0x10
28742#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0_MASK 0x0000FFFFL
28743#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1_MASK 0xFFFF0000L
28744//GC_CAC_WEIGHT_CP_1
28745#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2__SHIFT 0x0
28746#define GC_CAC_WEIGHT_CP_1__UNUSED_0__SHIFT 0x10
28747#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2_MASK 0x0000FFFFL
28748#define GC_CAC_WEIGHT_CP_1__UNUSED_0_MASK 0xFFFF0000L
28749//GC_CAC_WEIGHT_DB_0
28750#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0__SHIFT 0x0
28751#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1__SHIFT 0x10
28752#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0_MASK 0x0000FFFFL
28753#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1_MASK 0xFFFF0000L
28754//GC_CAC_WEIGHT_DB_1
28755#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2__SHIFT 0x0
28756#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3__SHIFT 0x10
28757#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2_MASK 0x0000FFFFL
28758#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3_MASK 0xFFFF0000L
28759//GC_CAC_WEIGHT_GDS_0
28760#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0__SHIFT 0x0
28761#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1__SHIFT 0x10
28762#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0_MASK 0x0000FFFFL
28763#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1_MASK 0xFFFF0000L
28764//GC_CAC_WEIGHT_GDS_1
28765#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2__SHIFT 0x0
28766#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3__SHIFT 0x10
28767#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2_MASK 0x0000FFFFL
28768#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3_MASK 0xFFFF0000L
28769//GC_CAC_WEIGHT_IA_0
28770#define GC_CAC_WEIGHT_IA_0__WEIGHT_IA_SIG0__SHIFT 0x0
28771#define GC_CAC_WEIGHT_IA_0__UNUSED_0__SHIFT 0x10
28772#define GC_CAC_WEIGHT_IA_0__WEIGHT_IA_SIG0_MASK 0x0000FFFFL
28773#define GC_CAC_WEIGHT_IA_0__UNUSED_0_MASK 0xFFFF0000L
28774//GC_CAC_WEIGHT_LDS_0
28775#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0__SHIFT 0x0
28776#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1__SHIFT 0x10
28777#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0_MASK 0x0000FFFFL
28778#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1_MASK 0xFFFF0000L
28779//GC_CAC_WEIGHT_LDS_1
28780#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2__SHIFT 0x0
28781#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3__SHIFT 0x10
28782#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2_MASK 0x0000FFFFL
28783#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3_MASK 0xFFFF0000L
28784//GC_CAC_WEIGHT_PA_0
28785#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0__SHIFT 0x0
28786#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1__SHIFT 0x10
28787#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0_MASK 0x0000FFFFL
28788#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1_MASK 0xFFFF0000L
28789//GC_CAC_WEIGHT_PC_0
28790#define GC_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0__SHIFT 0x0
28791#define GC_CAC_WEIGHT_PC_0__UNUSED_0__SHIFT 0x10
28792#define GC_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0_MASK 0x0000FFFFL
28793#define GC_CAC_WEIGHT_PC_0__UNUSED_0_MASK 0xFFFF0000L
28794//GC_CAC_WEIGHT_SC_0
28795#define GC_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0__SHIFT 0x0
28796#define GC_CAC_WEIGHT_SC_0__UNUSED_0__SHIFT 0x10
28797#define GC_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0_MASK 0x0000FFFFL
28798#define GC_CAC_WEIGHT_SC_0__UNUSED_0_MASK 0xFFFF0000L
28799//GC_CAC_WEIGHT_SPI_0
28800#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0__SHIFT 0x0
28801#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1__SHIFT 0x10
28802#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0_MASK 0x0000FFFFL
28803#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1_MASK 0xFFFF0000L
28804//GC_CAC_WEIGHT_SPI_1
28805#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2__SHIFT 0x0
28806#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3__SHIFT 0x10
28807#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2_MASK 0x0000FFFFL
28808#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3_MASK 0xFFFF0000L
28809//GC_CAC_WEIGHT_SPI_2
28810#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4__SHIFT 0x0
28811#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG5__SHIFT 0x10
28812#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4_MASK 0x0000FFFFL
28813#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG5_MASK 0xFFFF0000L
28814//GC_CAC_WEIGHT_SQ_0
28815#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0__SHIFT 0x0
28816#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1__SHIFT 0x10
28817#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0_MASK 0x0000FFFFL
28818#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1_MASK 0xFFFF0000L
28819//GC_CAC_WEIGHT_SQ_1
28820#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2__SHIFT 0x0
28821#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3__SHIFT 0x10
28822#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2_MASK 0x0000FFFFL
28823#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3_MASK 0xFFFF0000L
28824//GC_CAC_WEIGHT_SQ_2
28825#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4__SHIFT 0x0
28826#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG5__SHIFT 0x10
28827#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4_MASK 0x0000FFFFL
28828#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG5_MASK 0xFFFF0000L
28829//GC_CAC_WEIGHT_SQ_3
28830#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG6__SHIFT 0x0
28831#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG7__SHIFT 0x10
28832#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG6_MASK 0x0000FFFFL
28833#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG7_MASK 0xFFFF0000L
28834//GC_CAC_WEIGHT_SQ_4
28835#define GC_CAC_WEIGHT_SQ_4__WEIGHT_SQ_SIG8__SHIFT 0x0
28836#define GC_CAC_WEIGHT_SQ_4__UNUSED_0__SHIFT 0x10
28837#define GC_CAC_WEIGHT_SQ_4__WEIGHT_SQ_SIG8_MASK 0x0000FFFFL
28838#define GC_CAC_WEIGHT_SQ_4__UNUSED_0_MASK 0xFFFF0000L
28839//GC_CAC_WEIGHT_SX_0
28840#define GC_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0__SHIFT 0x0
28841#define GC_CAC_WEIGHT_SX_0__UNUSED_0__SHIFT 0x10
28842#define GC_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0_MASK 0x0000FFFFL
28843#define GC_CAC_WEIGHT_SX_0__UNUSED_0_MASK 0xFFFF0000L
28844//GC_CAC_WEIGHT_SXRB_0
28845#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0__SHIFT 0x0
28846#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG1__SHIFT 0x10
28847#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0_MASK 0x0000FFFFL
28848#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG1_MASK 0xFFFF0000L
28849//GC_CAC_WEIGHT_TA_0
28850#define GC_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0__SHIFT 0x0
28851#define GC_CAC_WEIGHT_TA_0__UNUSED_0__SHIFT 0x10
28852#define GC_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0_MASK 0x0000FFFFL
28853#define GC_CAC_WEIGHT_TA_0__UNUSED_0_MASK 0xFFFF0000L
28854//GC_CAC_WEIGHT_TCC_0
28855#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG0__SHIFT 0x0
28856#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG1__SHIFT 0x10
28857#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG0_MASK 0x0000FFFFL
28858#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG1_MASK 0xFFFF0000L
28859//GC_CAC_WEIGHT_TCC_1
28860#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG2__SHIFT 0x0
28861#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG3__SHIFT 0x10
28862#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG2_MASK 0x0000FFFFL
28863#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG3_MASK 0xFFFF0000L
28864//GC_CAC_WEIGHT_TCC_2
28865#define GC_CAC_WEIGHT_TCC_2__WEIGHT_TCC_SIG4__SHIFT 0x0
28866#define GC_CAC_WEIGHT_TCC_2__UNUSED_0__SHIFT 0x10
28867#define GC_CAC_WEIGHT_TCC_2__WEIGHT_TCC_SIG4_MASK 0x0000FFFFL
28868#define GC_CAC_WEIGHT_TCC_2__UNUSED_0_MASK 0xFFFF0000L
28869//GC_CAC_WEIGHT_TCP_0
28870#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0__SHIFT 0x0
28871#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1__SHIFT 0x10
28872#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0_MASK 0x0000FFFFL
28873#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1_MASK 0xFFFF0000L
28874//GC_CAC_WEIGHT_TCP_1
28875#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2__SHIFT 0x0
28876#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3__SHIFT 0x10
28877#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2_MASK 0x0000FFFFL
28878#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3_MASK 0xFFFF0000L
28879//GC_CAC_WEIGHT_TCP_2
28880#define GC_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4__SHIFT 0x0
28881#define GC_CAC_WEIGHT_TCP_2__UNUSED_0__SHIFT 0x10
28882#define GC_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4_MASK 0x0000FFFFL
28883#define GC_CAC_WEIGHT_TCP_2__UNUSED_0_MASK 0xFFFF0000L
28884//GC_CAC_WEIGHT_TD_0
28885#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0__SHIFT 0x0
28886#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1__SHIFT 0x10
28887#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0_MASK 0x0000FFFFL
28888#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1_MASK 0xFFFF0000L
28889//GC_CAC_WEIGHT_TD_1
28890#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2__SHIFT 0x0
28891#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3__SHIFT 0x10
28892#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2_MASK 0x0000FFFFL
28893#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3_MASK 0xFFFF0000L
28894//GC_CAC_WEIGHT_TD_2
28895#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4__SHIFT 0x0
28896#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5__SHIFT 0x10
28897#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4_MASK 0x0000FFFFL
28898#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5_MASK 0xFFFF0000L
28899//GC_CAC_WEIGHT_VGT_0
28900#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG0__SHIFT 0x0
28901#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG1__SHIFT 0x10
28902#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG0_MASK 0x0000FFFFL
28903#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG1_MASK 0xFFFF0000L
28904//GC_CAC_WEIGHT_VGT_1
28905#define GC_CAC_WEIGHT_VGT_1__WEIGHT_VGT_SIG2__SHIFT 0x0
28906#define GC_CAC_WEIGHT_VGT_1__UNUSED_0__SHIFT 0x10
28907#define GC_CAC_WEIGHT_VGT_1__WEIGHT_VGT_SIG2_MASK 0x0000FFFFL
28908#define GC_CAC_WEIGHT_VGT_1__UNUSED_0_MASK 0xFFFF0000L
28909//GC_CAC_WEIGHT_WD_0
28910#define GC_CAC_WEIGHT_WD_0__WEIGHT_WD_SIG0__SHIFT 0x0
28911#define GC_CAC_WEIGHT_WD_0__UNUSED_0__SHIFT 0x10
28912#define GC_CAC_WEIGHT_WD_0__WEIGHT_WD_SIG0_MASK 0x0000FFFFL
28913#define GC_CAC_WEIGHT_WD_0__UNUSED_0_MASK 0xFFFF0000L
28914//GC_CAC_WEIGHT_CU_0
28915#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
28916#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10
28917#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0x0000FFFFL
28918#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xFFFF0000L
28919//GC_CAC_WEIGHT_CU_1
28920#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0
28921#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10
28922#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0x0000FFFFL
28923#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xFFFF0000L
28924//GC_CAC_WEIGHT_CU_2
28925#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0
28926#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10
28927#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0x0000FFFFL
28928#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xFFFF0000L
28929//GC_CAC_WEIGHT_CU_3
28930#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0
28931#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10
28932#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0x0000FFFFL
28933#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xFFFF0000L
28934//GC_CAC_WEIGHT_CU_4
28935#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0
28936#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10
28937#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0x0000FFFFL
28938#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xFFFF0000L
28939//GC_CAC_WEIGHT_CU_5
28940#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0
28941#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10
28942#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0x0000FFFFL
28943#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xFFFF0000L
28944//GC_CAC_ACC_BCI0
28945#define GC_CAC_ACC_BCI0__ACCUMULATOR_31_0__SHIFT 0x0
28946#define GC_CAC_ACC_BCI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28947//GC_CAC_ACC_CB0
28948#define GC_CAC_ACC_CB0__ACCUMULATOR_31_0__SHIFT 0x0
28949#define GC_CAC_ACC_CB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28950//GC_CAC_ACC_CB1
28951#define GC_CAC_ACC_CB1__ACCUMULATOR_31_0__SHIFT 0x0
28952#define GC_CAC_ACC_CB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28953//GC_CAC_ACC_CB2
28954#define GC_CAC_ACC_CB2__ACCUMULATOR_31_0__SHIFT 0x0
28955#define GC_CAC_ACC_CB2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28956//GC_CAC_ACC_CB3
28957#define GC_CAC_ACC_CB3__ACCUMULATOR_31_0__SHIFT 0x0
28958#define GC_CAC_ACC_CB3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28959//GC_CAC_ACC_CP0
28960#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0__SHIFT 0x0
28961#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28962//GC_CAC_ACC_CP1
28963#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0__SHIFT 0x0
28964#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28965//GC_CAC_ACC_CP2
28966#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0__SHIFT 0x0
28967#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28968//GC_CAC_ACC_DB0
28969#define GC_CAC_ACC_DB0__ACCUMULATOR_31_0__SHIFT 0x0
28970#define GC_CAC_ACC_DB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28971//GC_CAC_ACC_DB1
28972#define GC_CAC_ACC_DB1__ACCUMULATOR_31_0__SHIFT 0x0
28973#define GC_CAC_ACC_DB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28974//GC_CAC_ACC_DB2
28975#define GC_CAC_ACC_DB2__ACCUMULATOR_31_0__SHIFT 0x0
28976#define GC_CAC_ACC_DB2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28977//GC_CAC_ACC_DB3
28978#define GC_CAC_ACC_DB3__ACCUMULATOR_31_0__SHIFT 0x0
28979#define GC_CAC_ACC_DB3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28980//GC_CAC_ACC_GDS0
28981#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0__SHIFT 0x0
28982#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28983//GC_CAC_ACC_GDS1
28984#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0__SHIFT 0x0
28985#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28986//GC_CAC_ACC_GDS2
28987#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0__SHIFT 0x0
28988#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28989//GC_CAC_ACC_GDS3
28990#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0__SHIFT 0x0
28991#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28992//GC_CAC_ACC_IA0
28993#define GC_CAC_ACC_IA0__ACCUMULATOR_31_0__SHIFT 0x0
28994#define GC_CAC_ACC_IA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28995//GC_CAC_ACC_LDS0
28996#define GC_CAC_ACC_LDS0__ACCUMULATOR_31_0__SHIFT 0x0
28997#define GC_CAC_ACC_LDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
28998//GC_CAC_ACC_LDS1
28999#define GC_CAC_ACC_LDS1__ACCUMULATOR_31_0__SHIFT 0x0
29000#define GC_CAC_ACC_LDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29001//GC_CAC_ACC_LDS2
29002#define GC_CAC_ACC_LDS2__ACCUMULATOR_31_0__SHIFT 0x0
29003#define GC_CAC_ACC_LDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29004//GC_CAC_ACC_LDS3
29005#define GC_CAC_ACC_LDS3__ACCUMULATOR_31_0__SHIFT 0x0
29006#define GC_CAC_ACC_LDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29007//GC_CAC_ACC_PA0
29008#define GC_CAC_ACC_PA0__ACCUMULATOR_31_0__SHIFT 0x0
29009#define GC_CAC_ACC_PA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29010//GC_CAC_ACC_PA1
29011#define GC_CAC_ACC_PA1__ACCUMULATOR_31_0__SHIFT 0x0
29012#define GC_CAC_ACC_PA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29013//GC_CAC_ACC_PC0
29014#define GC_CAC_ACC_PC0__ACCUMULATOR_31_0__SHIFT 0x0
29015#define GC_CAC_ACC_PC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29016//GC_CAC_ACC_SC0
29017#define GC_CAC_ACC_SC0__ACCUMULATOR_31_0__SHIFT 0x0
29018#define GC_CAC_ACC_SC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29019//GC_CAC_ACC_SPI0
29020#define GC_CAC_ACC_SPI0__ACCUMULATOR_31_0__SHIFT 0x0
29021#define GC_CAC_ACC_SPI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29022//GC_CAC_ACC_SPI1
29023#define GC_CAC_ACC_SPI1__ACCUMULATOR_31_0__SHIFT 0x0
29024#define GC_CAC_ACC_SPI1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29025//GC_CAC_ACC_SPI2
29026#define GC_CAC_ACC_SPI2__ACCUMULATOR_31_0__SHIFT 0x0
29027#define GC_CAC_ACC_SPI2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29028//GC_CAC_ACC_SPI3
29029#define GC_CAC_ACC_SPI3__ACCUMULATOR_31_0__SHIFT 0x0
29030#define GC_CAC_ACC_SPI3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29031//GC_CAC_ACC_SPI4
29032#define GC_CAC_ACC_SPI4__ACCUMULATOR_31_0__SHIFT 0x0
29033#define GC_CAC_ACC_SPI4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29034//GC_CAC_ACC_SPI5
29035#define GC_CAC_ACC_SPI5__ACCUMULATOR_31_0__SHIFT 0x0
29036#define GC_CAC_ACC_SPI5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29037//GC_CAC_WEIGHT_PG_0
29038#define GC_CAC_WEIGHT_PG_0__WEIGHT_PG_SIG0__SHIFT 0x0
29039#define GC_CAC_WEIGHT_PG_0__unused__SHIFT 0x10
29040#define GC_CAC_WEIGHT_PG_0__WEIGHT_PG_SIG0_MASK 0x0000FFFFL
29041#define GC_CAC_WEIGHT_PG_0__unused_MASK 0xFFFF0000L
29042//GC_CAC_ACC_PG0
29043#define GC_CAC_ACC_PG0__ACCUMULATOR_31_0__SHIFT 0x0
29044#define GC_CAC_ACC_PG0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29045//GC_CAC_OVRD_PG
29046#define GC_CAC_OVRD_PG__OVRRD_SELECT__SHIFT 0x0
29047#define GC_CAC_OVRD_PG__OVRRD_VALUE__SHIFT 0x10
29048#define GC_CAC_OVRD_PG__OVRRD_SELECT_MASK 0x0000FFFFL
29049#define GC_CAC_OVRD_PG__OVRRD_VALUE_MASK 0xFFFF0000L
29050//GC_CAC_WEIGHT_UTCL2_ATCL2_0
29051#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG0__SHIFT 0x0
29052#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG1__SHIFT 0x10
29053#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG0_MASK 0x0000FFFFL
29054#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG1_MASK 0xFFFF0000L
29055//GC_CAC_ACC_EA0
29056#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0__SHIFT 0x0
29057#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29058//GC_CAC_ACC_EA1
29059#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0__SHIFT 0x0
29060#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29061//GC_CAC_ACC_EA2
29062#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0__SHIFT 0x0
29063#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29064//GC_CAC_ACC_EA3
29065#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0__SHIFT 0x0
29066#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29067//GC_CAC_ACC_UTCL2_ATCL20
29068#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0__SHIFT 0x0
29069#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29070//GC_CAC_OVRD_EA
29071#define GC_CAC_OVRD_EA__OVRRD_SELECT__SHIFT 0x0
29072#define GC_CAC_OVRD_EA__OVRRD_VALUE__SHIFT 0x6
29073#define GC_CAC_OVRD_EA__OVRRD_SELECT_MASK 0x0000003FL
29074#define GC_CAC_OVRD_EA__OVRRD_VALUE_MASK 0x00000FC0L
29075//GC_CAC_OVRD_UTCL2_ATCL2
29076#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_SELECT__SHIFT 0x0
29077#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_VALUE__SHIFT 0x5
29078#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_SELECT_MASK 0x0000001FL
29079#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_VALUE_MASK 0x000003E0L
29080//GC_CAC_WEIGHT_EA_0
29081#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0__SHIFT 0x0
29082#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1__SHIFT 0x10
29083#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0_MASK 0x0000FFFFL
29084#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1_MASK 0xFFFF0000L
29085//GC_CAC_WEIGHT_EA_1
29086#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2__SHIFT 0x0
29087#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3__SHIFT 0x10
29088#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2_MASK 0x0000FFFFL
29089#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3_MASK 0xFFFF0000L
29090//GC_CAC_WEIGHT_RMI_0
29091#define GC_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0__SHIFT 0x0
29092#define GC_CAC_WEIGHT_RMI_0__UNUSED__SHIFT 0x10
29093#define GC_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0_MASK 0x0000FFFFL
29094#define GC_CAC_WEIGHT_RMI_0__UNUSED_MASK 0xFFFF0000L
29095//GC_CAC_ACC_RMI0
29096#define GC_CAC_ACC_RMI0__ACCUMULATOR_31_0__SHIFT 0x0
29097#define GC_CAC_ACC_RMI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29098//GC_CAC_OVRD_RMI
29099#define GC_CAC_OVRD_RMI__OVRRD_SELECT__SHIFT 0x0
29100#define GC_CAC_OVRD_RMI__OVRRD_VALUE__SHIFT 0x1
29101#define GC_CAC_OVRD_RMI__OVRRD_SELECT_MASK 0x00000001L
29102#define GC_CAC_OVRD_RMI__OVRRD_VALUE_MASK 0x00000002L
29103//GC_CAC_WEIGHT_UTCL2_ATCL2_1
29104#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG2__SHIFT 0x0
29105#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG3__SHIFT 0x10
29106#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG2_MASK 0x0000FFFFL
29107#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG3_MASK 0xFFFF0000L
29108//GC_CAC_ACC_UTCL2_ATCL21
29109#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0__SHIFT 0x0
29110#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29111//GC_CAC_ACC_UTCL2_ATCL22
29112#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0__SHIFT 0x0
29113#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29114//GC_CAC_ACC_UTCL2_ATCL23
29115#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0__SHIFT 0x0
29116#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29117//GC_CAC_ACC_EA4
29118#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0__SHIFT 0x0
29119#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29120//GC_CAC_ACC_EA5
29121#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0__SHIFT 0x0
29122#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29123//GC_CAC_WEIGHT_EA_2
29124#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4__SHIFT 0x0
29125#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5__SHIFT 0x10
29126#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4_MASK 0x0000FFFFL
29127#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5_MASK 0xFFFF0000L
29128//GC_CAC_ACC_SQ0_LOWER
29129#define GC_CAC_ACC_SQ0_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29130#define GC_CAC_ACC_SQ0_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29131//GC_CAC_ACC_SQ0_UPPER
29132#define GC_CAC_ACC_SQ0_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29133#define GC_CAC_ACC_SQ0_UPPER__UNUSED_0__SHIFT 0x8
29134#define GC_CAC_ACC_SQ0_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29135#define GC_CAC_ACC_SQ0_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29136//GC_CAC_ACC_SQ1_LOWER
29137#define GC_CAC_ACC_SQ1_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29138#define GC_CAC_ACC_SQ1_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29139//GC_CAC_ACC_SQ1_UPPER
29140#define GC_CAC_ACC_SQ1_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29141#define GC_CAC_ACC_SQ1_UPPER__UNUSED_0__SHIFT 0x8
29142#define GC_CAC_ACC_SQ1_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29143#define GC_CAC_ACC_SQ1_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29144//GC_CAC_ACC_SQ2_LOWER
29145#define GC_CAC_ACC_SQ2_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29146#define GC_CAC_ACC_SQ2_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29147//GC_CAC_ACC_SQ2_UPPER
29148#define GC_CAC_ACC_SQ2_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29149#define GC_CAC_ACC_SQ2_UPPER__UNUSED_0__SHIFT 0x8
29150#define GC_CAC_ACC_SQ2_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29151#define GC_CAC_ACC_SQ2_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29152//GC_CAC_ACC_SQ3_LOWER
29153#define GC_CAC_ACC_SQ3_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29154#define GC_CAC_ACC_SQ3_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29155//GC_CAC_ACC_SQ3_UPPER
29156#define GC_CAC_ACC_SQ3_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29157#define GC_CAC_ACC_SQ3_UPPER__UNUSED_0__SHIFT 0x8
29158#define GC_CAC_ACC_SQ3_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29159#define GC_CAC_ACC_SQ3_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29160//GC_CAC_ACC_SQ4_LOWER
29161#define GC_CAC_ACC_SQ4_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29162#define GC_CAC_ACC_SQ4_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29163//GC_CAC_ACC_SQ4_UPPER
29164#define GC_CAC_ACC_SQ4_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29165#define GC_CAC_ACC_SQ4_UPPER__UNUSED_0__SHIFT 0x8
29166#define GC_CAC_ACC_SQ4_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29167#define GC_CAC_ACC_SQ4_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29168//GC_CAC_ACC_SQ5_LOWER
29169#define GC_CAC_ACC_SQ5_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29170#define GC_CAC_ACC_SQ5_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29171//GC_CAC_ACC_SQ5_UPPER
29172#define GC_CAC_ACC_SQ5_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29173#define GC_CAC_ACC_SQ5_UPPER__UNUSED_0__SHIFT 0x8
29174#define GC_CAC_ACC_SQ5_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29175#define GC_CAC_ACC_SQ5_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29176//GC_CAC_ACC_SQ6_LOWER
29177#define GC_CAC_ACC_SQ6_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29178#define GC_CAC_ACC_SQ6_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29179//GC_CAC_ACC_SQ6_UPPER
29180#define GC_CAC_ACC_SQ6_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29181#define GC_CAC_ACC_SQ6_UPPER__UNUSED_0__SHIFT 0x8
29182#define GC_CAC_ACC_SQ6_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29183#define GC_CAC_ACC_SQ6_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29184//GC_CAC_ACC_SQ7_LOWER
29185#define GC_CAC_ACC_SQ7_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29186#define GC_CAC_ACC_SQ7_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29187//GC_CAC_ACC_SQ7_UPPER
29188#define GC_CAC_ACC_SQ7_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29189#define GC_CAC_ACC_SQ7_UPPER__UNUSED_0__SHIFT 0x8
29190#define GC_CAC_ACC_SQ7_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29191#define GC_CAC_ACC_SQ7_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29192//GC_CAC_ACC_SQ8_LOWER
29193#define GC_CAC_ACC_SQ8_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
29194#define GC_CAC_ACC_SQ8_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29195//GC_CAC_ACC_SQ8_UPPER
29196#define GC_CAC_ACC_SQ8_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
29197#define GC_CAC_ACC_SQ8_UPPER__UNUSED_0__SHIFT 0x8
29198#define GC_CAC_ACC_SQ8_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
29199#define GC_CAC_ACC_SQ8_UPPER__UNUSED_0_MASK 0xFFFFFF00L
29200//GC_CAC_ACC_SX0
29201#define GC_CAC_ACC_SX0__ACCUMULATOR_31_0__SHIFT 0x0
29202#define GC_CAC_ACC_SX0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29203//GC_CAC_ACC_SXRB0
29204#define GC_CAC_ACC_SXRB0__ACCUMULATOR_31_0__SHIFT 0x0
29205#define GC_CAC_ACC_SXRB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29206//GC_CAC_ACC_SXRB1
29207#define GC_CAC_ACC_SXRB1__ACCUMULATOR_31_0__SHIFT 0x0
29208#define GC_CAC_ACC_SXRB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29209//GC_CAC_ACC_TA0
29210#define GC_CAC_ACC_TA0__ACCUMULATOR_31_0__SHIFT 0x0
29211#define GC_CAC_ACC_TA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29212//GC_CAC_ACC_TCC0
29213#define GC_CAC_ACC_TCC0__ACCUMULATOR_31_0__SHIFT 0x0
29214#define GC_CAC_ACC_TCC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29215//GC_CAC_ACC_TCC1
29216#define GC_CAC_ACC_TCC1__ACCUMULATOR_31_0__SHIFT 0x0
29217#define GC_CAC_ACC_TCC1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29218//GC_CAC_ACC_TCC2
29219#define GC_CAC_ACC_TCC2__ACCUMULATOR_31_0__SHIFT 0x0
29220#define GC_CAC_ACC_TCC2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29221//GC_CAC_ACC_TCC3
29222#define GC_CAC_ACC_TCC3__ACCUMULATOR_31_0__SHIFT 0x0
29223#define GC_CAC_ACC_TCC3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29224//GC_CAC_ACC_TCC4
29225#define GC_CAC_ACC_TCC4__ACCUMULATOR_31_0__SHIFT 0x0
29226#define GC_CAC_ACC_TCC4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29227//GC_CAC_ACC_TCP0
29228#define GC_CAC_ACC_TCP0__ACCUMULATOR_31_0__SHIFT 0x0
29229#define GC_CAC_ACC_TCP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29230//GC_CAC_ACC_TCP1
29231#define GC_CAC_ACC_TCP1__ACCUMULATOR_31_0__SHIFT 0x0
29232#define GC_CAC_ACC_TCP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29233//GC_CAC_ACC_TCP2
29234#define GC_CAC_ACC_TCP2__ACCUMULATOR_31_0__SHIFT 0x0
29235#define GC_CAC_ACC_TCP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29236//GC_CAC_ACC_TCP3
29237#define GC_CAC_ACC_TCP3__ACCUMULATOR_31_0__SHIFT 0x0
29238#define GC_CAC_ACC_TCP3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29239//GC_CAC_ACC_TCP4
29240#define GC_CAC_ACC_TCP4__ACCUMULATOR_31_0__SHIFT 0x0
29241#define GC_CAC_ACC_TCP4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29242//GC_CAC_ACC_TD0
29243#define GC_CAC_ACC_TD0__ACCUMULATOR_31_0__SHIFT 0x0
29244#define GC_CAC_ACC_TD0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29245//GC_CAC_ACC_TD1
29246#define GC_CAC_ACC_TD1__ACCUMULATOR_31_0__SHIFT 0x0
29247#define GC_CAC_ACC_TD1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29248//GC_CAC_ACC_TD2
29249#define GC_CAC_ACC_TD2__ACCUMULATOR_31_0__SHIFT 0x0
29250#define GC_CAC_ACC_TD2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29251//GC_CAC_ACC_TD3
29252#define GC_CAC_ACC_TD3__ACCUMULATOR_31_0__SHIFT 0x0
29253#define GC_CAC_ACC_TD3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29254//GC_CAC_ACC_TD4
29255#define GC_CAC_ACC_TD4__ACCUMULATOR_31_0__SHIFT 0x0
29256#define GC_CAC_ACC_TD4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29257//GC_CAC_ACC_TD5
29258#define GC_CAC_ACC_TD5__ACCUMULATOR_31_0__SHIFT 0x0
29259#define GC_CAC_ACC_TD5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29260//GC_CAC_ACC_VGT0
29261#define GC_CAC_ACC_VGT0__ACCUMULATOR_31_0__SHIFT 0x0
29262#define GC_CAC_ACC_VGT0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29263//GC_CAC_ACC_VGT1
29264#define GC_CAC_ACC_VGT1__ACCUMULATOR_31_0__SHIFT 0x0
29265#define GC_CAC_ACC_VGT1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29266//GC_CAC_ACC_VGT2
29267#define GC_CAC_ACC_VGT2__ACCUMULATOR_31_0__SHIFT 0x0
29268#define GC_CAC_ACC_VGT2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29269//GC_CAC_ACC_WD0
29270#define GC_CAC_ACC_WD0__ACCUMULATOR_31_0__SHIFT 0x0
29271#define GC_CAC_ACC_WD0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29272//GC_CAC_ACC_CU0
29273#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
29274#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29275//GC_CAC_ACC_CU1
29276#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
29277#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29278//GC_CAC_ACC_CU2
29279#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
29280#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29281//GC_CAC_ACC_CU3
29282#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
29283#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29284//GC_CAC_ACC_CU4
29285#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
29286#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29287//GC_CAC_ACC_CU5
29288#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0
29289#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29290//GC_CAC_ACC_CU6
29291#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0
29292#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29293//GC_CAC_ACC_CU7
29294#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0
29295#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29296//GC_CAC_ACC_CU8
29297#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0
29298#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29299//GC_CAC_ACC_CU9
29300#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0
29301#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29302//GC_CAC_ACC_CU10
29303#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0
29304#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29305//GC_CAC_OVRD_BCI
29306#define GC_CAC_OVRD_BCI__OVRRD_SELECT__SHIFT 0x0
29307#define GC_CAC_OVRD_BCI__OVRRD_VALUE__SHIFT 0x2
29308#define GC_CAC_OVRD_BCI__OVRRD_SELECT_MASK 0x00000003L
29309#define GC_CAC_OVRD_BCI__OVRRD_VALUE_MASK 0x0000000CL
29310//GC_CAC_OVRD_CB
29311#define GC_CAC_OVRD_CB__OVRRD_SELECT__SHIFT 0x0
29312#define GC_CAC_OVRD_CB__OVRRD_VALUE__SHIFT 0x4
29313#define GC_CAC_OVRD_CB__OVRRD_SELECT_MASK 0x0000000FL
29314#define GC_CAC_OVRD_CB__OVRRD_VALUE_MASK 0x000000F0L
29315//GC_CAC_OVRD_CP
29316#define GC_CAC_OVRD_CP__OVRRD_SELECT__SHIFT 0x0
29317#define GC_CAC_OVRD_CP__OVRRD_VALUE__SHIFT 0x3
29318#define GC_CAC_OVRD_CP__OVRRD_SELECT_MASK 0x00000007L
29319#define GC_CAC_OVRD_CP__OVRRD_VALUE_MASK 0x00000038L
29320//GC_CAC_OVRD_DB
29321#define GC_CAC_OVRD_DB__OVRRD_SELECT__SHIFT 0x0
29322#define GC_CAC_OVRD_DB__OVRRD_VALUE__SHIFT 0x4
29323#define GC_CAC_OVRD_DB__OVRRD_SELECT_MASK 0x0000000FL
29324#define GC_CAC_OVRD_DB__OVRRD_VALUE_MASK 0x000000F0L
29325//GC_CAC_OVRD_GDS
29326#define GC_CAC_OVRD_GDS__OVRRD_SELECT__SHIFT 0x0
29327#define GC_CAC_OVRD_GDS__OVRRD_VALUE__SHIFT 0x4
29328#define GC_CAC_OVRD_GDS__OVRRD_SELECT_MASK 0x0000000FL
29329#define GC_CAC_OVRD_GDS__OVRRD_VALUE_MASK 0x000000F0L
29330//GC_CAC_OVRD_IA
29331#define GC_CAC_OVRD_IA__OVRRD_SELECT__SHIFT 0x0
29332#define GC_CAC_OVRD_IA__OVRRD_VALUE__SHIFT 0x1
29333#define GC_CAC_OVRD_IA__OVRRD_SELECT_MASK 0x00000001L
29334#define GC_CAC_OVRD_IA__OVRRD_VALUE_MASK 0x00000002L
29335//GC_CAC_OVRD_LDS
29336#define GC_CAC_OVRD_LDS__OVRRD_SELECT__SHIFT 0x0
29337#define GC_CAC_OVRD_LDS__OVRRD_VALUE__SHIFT 0x4
29338#define GC_CAC_OVRD_LDS__OVRRD_SELECT_MASK 0x0000000FL
29339#define GC_CAC_OVRD_LDS__OVRRD_VALUE_MASK 0x000000F0L
29340//GC_CAC_OVRD_PA
29341#define GC_CAC_OVRD_PA__OVRRD_SELECT__SHIFT 0x0
29342#define GC_CAC_OVRD_PA__OVRRD_VALUE__SHIFT 0x2
29343#define GC_CAC_OVRD_PA__OVRRD_SELECT_MASK 0x00000003L
29344#define GC_CAC_OVRD_PA__OVRRD_VALUE_MASK 0x0000000CL
29345//GC_CAC_OVRD_PC
29346#define GC_CAC_OVRD_PC__OVRRD_SELECT__SHIFT 0x0
29347#define GC_CAC_OVRD_PC__OVRRD_VALUE__SHIFT 0x1
29348#define GC_CAC_OVRD_PC__OVRRD_SELECT_MASK 0x00000001L
29349#define GC_CAC_OVRD_PC__OVRRD_VALUE_MASK 0x00000002L
29350//GC_CAC_OVRD_SC
29351#define GC_CAC_OVRD_SC__OVRRD_SELECT__SHIFT 0x0
29352#define GC_CAC_OVRD_SC__OVRRD_VALUE__SHIFT 0x1
29353#define GC_CAC_OVRD_SC__OVRRD_SELECT_MASK 0x00000001L
29354#define GC_CAC_OVRD_SC__OVRRD_VALUE_MASK 0x00000002L
29355//GC_CAC_OVRD_SPI
29356#define GC_CAC_OVRD_SPI__OVRRD_SELECT__SHIFT 0x0
29357#define GC_CAC_OVRD_SPI__OVRRD_VALUE__SHIFT 0x6
29358#define GC_CAC_OVRD_SPI__OVRRD_SELECT_MASK 0x0000003FL
29359#define GC_CAC_OVRD_SPI__OVRRD_VALUE_MASK 0x00000FC0L
29360//GC_CAC_OVRD_CU
29361#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
29362#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x1
29363#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0x00000001L
29364#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0x00000002L
29365//GC_CAC_OVRD_SQ
29366#define GC_CAC_OVRD_SQ__OVRRD_SELECT__SHIFT 0x0
29367#define GC_CAC_OVRD_SQ__OVRRD_VALUE__SHIFT 0x9
29368#define GC_CAC_OVRD_SQ__OVRRD_SELECT_MASK 0x000001FFL
29369#define GC_CAC_OVRD_SQ__OVRRD_VALUE_MASK 0x0003FE00L
29370//GC_CAC_OVRD_SX
29371#define GC_CAC_OVRD_SX__OVRRD_SELECT__SHIFT 0x0
29372#define GC_CAC_OVRD_SX__OVRRD_VALUE__SHIFT 0x1
29373#define GC_CAC_OVRD_SX__OVRRD_SELECT_MASK 0x00000001L
29374#define GC_CAC_OVRD_SX__OVRRD_VALUE_MASK 0x00000002L
29375//GC_CAC_OVRD_SXRB
29376#define GC_CAC_OVRD_SXRB__OVRRD_SELECT__SHIFT 0x0
29377#define GC_CAC_OVRD_SXRB__OVRRD_VALUE__SHIFT 0x1
29378#define GC_CAC_OVRD_SXRB__OVRRD_SELECT_MASK 0x00000001L
29379#define GC_CAC_OVRD_SXRB__OVRRD_VALUE_MASK 0x00000002L
29380//GC_CAC_OVRD_TA
29381#define GC_CAC_OVRD_TA__OVRRD_SELECT__SHIFT 0x0
29382#define GC_CAC_OVRD_TA__OVRRD_VALUE__SHIFT 0x1
29383#define GC_CAC_OVRD_TA__OVRRD_SELECT_MASK 0x00000001L
29384#define GC_CAC_OVRD_TA__OVRRD_VALUE_MASK 0x00000002L
29385//GC_CAC_OVRD_TCC
29386#define GC_CAC_OVRD_TCC__OVRRD_SELECT__SHIFT 0x0
29387#define GC_CAC_OVRD_TCC__OVRRD_VALUE__SHIFT 0x5
29388#define GC_CAC_OVRD_TCC__OVRRD_SELECT_MASK 0x0000001FL
29389#define GC_CAC_OVRD_TCC__OVRRD_VALUE_MASK 0x000003E0L
29390//GC_CAC_OVRD_TCP
29391#define GC_CAC_OVRD_TCP__OVRRD_SELECT__SHIFT 0x0
29392#define GC_CAC_OVRD_TCP__OVRRD_VALUE__SHIFT 0x5
29393#define GC_CAC_OVRD_TCP__OVRRD_SELECT_MASK 0x0000001FL
29394#define GC_CAC_OVRD_TCP__OVRRD_VALUE_MASK 0x000003E0L
29395//GC_CAC_OVRD_TD
29396#define GC_CAC_OVRD_TD__OVRRD_SELECT__SHIFT 0x0
29397#define GC_CAC_OVRD_TD__OVRRD_VALUE__SHIFT 0x6
29398#define GC_CAC_OVRD_TD__OVRRD_SELECT_MASK 0x0000003FL
29399#define GC_CAC_OVRD_TD__OVRRD_VALUE_MASK 0x00000FC0L
29400//GC_CAC_OVRD_VGT
29401#define GC_CAC_OVRD_VGT__OVRRD_SELECT__SHIFT 0x0
29402#define GC_CAC_OVRD_VGT__OVRRD_VALUE__SHIFT 0x3
29403#define GC_CAC_OVRD_VGT__OVRRD_SELECT_MASK 0x00000007L
29404#define GC_CAC_OVRD_VGT__OVRRD_VALUE_MASK 0x00000038L
29405//GC_CAC_OVRD_WD
29406#define GC_CAC_OVRD_WD__OVRRD_SELECT__SHIFT 0x0
29407#define GC_CAC_OVRD_WD__OVRRD_VALUE__SHIFT 0x1
29408#define GC_CAC_OVRD_WD__OVRRD_SELECT_MASK 0x00000001L
29409#define GC_CAC_OVRD_WD__OVRRD_VALUE_MASK 0x00000002L
29410//GC_CAC_ACC_BCI1
29411#define GC_CAC_ACC_BCI1__ACCUMULATOR_31_0__SHIFT 0x0
29412#define GC_CAC_ACC_BCI1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29413//GC_CAC_WEIGHT_UTCL2_ATCL2_2
29414#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG4__SHIFT 0x0
29415#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG5__SHIFT 0x10
29416#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG4_MASK 0x0000FFFFL
29417#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG5_MASK 0xFFFF0000L
29418//GC_CAC_WEIGHT_UTCL2_ROUTER_0
29419#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0__SHIFT 0x0
29420#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1__SHIFT 0x10
29421#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0_MASK 0x0000FFFFL
29422#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1_MASK 0xFFFF0000L
29423//GC_CAC_WEIGHT_UTCL2_ROUTER_1
29424#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2__SHIFT 0x0
29425#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3__SHIFT 0x10
29426#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2_MASK 0x0000FFFFL
29427#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3_MASK 0xFFFF0000L
29428//GC_CAC_WEIGHT_UTCL2_ROUTER_2
29429#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4__SHIFT 0x0
29430#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5__SHIFT 0x10
29431#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4_MASK 0x0000FFFFL
29432#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5_MASK 0xFFFF0000L
29433//GC_CAC_WEIGHT_UTCL2_ROUTER_3
29434#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6__SHIFT 0x0
29435#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7__SHIFT 0x10
29436#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6_MASK 0x0000FFFFL
29437#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7_MASK 0xFFFF0000L
29438//GC_CAC_WEIGHT_UTCL2_ROUTER_4
29439#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8__SHIFT 0x0
29440#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9__SHIFT 0x10
29441#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8_MASK 0x0000FFFFL
29442#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9_MASK 0xFFFF0000L
29443//GC_CAC_WEIGHT_UTCL2_VML2_0
29444#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0__SHIFT 0x0
29445#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1__SHIFT 0x10
29446#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0_MASK 0x0000FFFFL
29447#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1_MASK 0xFFFF0000L
29448//GC_CAC_WEIGHT_UTCL2_VML2_1
29449#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2__SHIFT 0x0
29450#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3__SHIFT 0x10
29451#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2_MASK 0x0000FFFFL
29452#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3_MASK 0xFFFF0000L
29453//GC_CAC_WEIGHT_UTCL2_VML2_2
29454#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4__SHIFT 0x0
29455#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG5__SHIFT 0x10
29456#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4_MASK 0x0000FFFFL
29457#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG5_MASK 0xFFFF0000L
29458//GC_CAC_ACC_UTCL2_ATCL24
29459#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0__SHIFT 0x0
29460#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29461//GC_CAC_ACC_UTCL2_ROUTER0
29462#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0__SHIFT 0x0
29463#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29464//GC_CAC_ACC_UTCL2_ROUTER1
29465#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0__SHIFT 0x0
29466#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29467//GC_CAC_ACC_UTCL2_ROUTER2
29468#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0__SHIFT 0x0
29469#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29470//GC_CAC_ACC_UTCL2_ROUTER3
29471#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0__SHIFT 0x0
29472#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29473//GC_CAC_ACC_UTCL2_ROUTER4
29474#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0__SHIFT 0x0
29475#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29476//GC_CAC_ACC_UTCL2_ROUTER5
29477#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0__SHIFT 0x0
29478#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29479//GC_CAC_ACC_UTCL2_ROUTER6
29480#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0__SHIFT 0x0
29481#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29482//GC_CAC_ACC_UTCL2_ROUTER7
29483#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0__SHIFT 0x0
29484#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29485//GC_CAC_ACC_UTCL2_ROUTER8
29486#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0__SHIFT 0x0
29487#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29488//GC_CAC_ACC_UTCL2_ROUTER9
29489#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0__SHIFT 0x0
29490#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29491//GC_CAC_ACC_UTCL2_VML20
29492#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0__SHIFT 0x0
29493#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29494//GC_CAC_ACC_UTCL2_VML21
29495#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0__SHIFT 0x0
29496#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29497//GC_CAC_ACC_UTCL2_VML22
29498#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0__SHIFT 0x0
29499#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29500//GC_CAC_ACC_UTCL2_VML23
29501#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0__SHIFT 0x0
29502#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29503//GC_CAC_ACC_UTCL2_VML24
29504#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0__SHIFT 0x0
29505#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29506//GC_CAC_OVRD_UTCL2_ROUTER
29507#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_SELECT__SHIFT 0x0
29508#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_VALUE__SHIFT 0xa
29509#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_SELECT_MASK 0x000003FFL
29510#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_VALUE_MASK 0x000FFC00L
29511//GC_CAC_OVRD_UTCL2_VML2
29512#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_SELECT__SHIFT 0x0
29513#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_VALUE__SHIFT 0x5
29514#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_SELECT_MASK 0x0000001FL
29515#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_VALUE_MASK 0x000003E0L
29516//GC_CAC_WEIGHT_UTCL2_WALKER_0
29517#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0__SHIFT 0x0
29518#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1__SHIFT 0x10
29519#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0_MASK 0x0000FFFFL
29520#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1_MASK 0xFFFF0000L
29521//GC_CAC_WEIGHT_UTCL2_WALKER_1
29522#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2__SHIFT 0x0
29523#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3__SHIFT 0x10
29524#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2_MASK 0x0000FFFFL
29525#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3_MASK 0xFFFF0000L
29526//GC_CAC_WEIGHT_UTCL2_WALKER_2
29527#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4__SHIFT 0x0
29528#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG5__SHIFT 0x10
29529#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4_MASK 0x0000FFFFL
29530#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG5_MASK 0xFFFF0000L
29531//GC_CAC_ACC_UTCL2_WALKER0
29532#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0__SHIFT 0x0
29533#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29534//GC_CAC_ACC_UTCL2_WALKER1
29535#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0__SHIFT 0x0
29536#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29537//GC_CAC_ACC_UTCL2_WALKER2
29538#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0__SHIFT 0x0
29539#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29540//GC_CAC_ACC_UTCL2_WALKER3
29541#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0__SHIFT 0x0
29542#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29543//GC_CAC_ACC_UTCL2_WALKER4
29544#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0__SHIFT 0x0
29545#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
29546//GC_CAC_OVRD_UTCL2_WALKER
29547#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_SELECT__SHIFT 0x0
29548#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_VALUE__SHIFT 0x5
29549#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_SELECT_MASK 0x0000001FL
29550#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_VALUE_MASK 0x000003E0L
29551
29552
29553// addressBlock: secacind
29554//SE_CAC_CNTL
29555#define SE_CAC_CNTL__CAC_ENABLE__SHIFT 0x0
29556#define SE_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x1
29557#define SE_CAC_CNTL__CAC_BLOCK_ID__SHIFT 0x11
29558#define SE_CAC_CNTL__CAC_SIGNAL_ID__SHIFT 0x17
29559#define SE_CAC_CNTL__UNUSED_0__SHIFT 0x1f
29560#define SE_CAC_CNTL__CAC_ENABLE_MASK 0x00000001L
29561#define SE_CAC_CNTL__CAC_THRESHOLD_MASK 0x0001FFFEL
29562#define SE_CAC_CNTL__CAC_BLOCK_ID_MASK 0x007E0000L
29563#define SE_CAC_CNTL__CAC_SIGNAL_ID_MASK 0x7F800000L
29564#define SE_CAC_CNTL__UNUSED_0_MASK 0x80000000L
29565//SE_CAC_OVR_SEL
29566#define SE_CAC_OVR_SEL__CAC_OVR_SEL__SHIFT 0x0
29567#define SE_CAC_OVR_SEL__CAC_OVR_SEL_MASK 0xFFFFFFFFL
29568//SE_CAC_OVR_VAL
29569#define SE_CAC_OVR_VAL__CAC_OVR_VAL__SHIFT 0x0
29570#define SE_CAC_OVR_VAL__CAC_OVR_VAL_MASK 0xFFFFFFFFL
29571
29572
29573// addressBlock: sqind
29574//SQ_WAVE_MODE
29575#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
29576#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
29577#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
29578#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
29579#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
29580#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
29581#define SQ_WAVE_MODE__FP16_OVFL__SHIFT 0x17
29582#define SQ_WAVE_MODE__POPS_PACKER0__SHIFT 0x18
29583#define SQ_WAVE_MODE__POPS_PACKER1__SHIFT 0x19
29584#define SQ_WAVE_MODE__DISABLE_PERF__SHIFT 0x1a
29585#define SQ_WAVE_MODE__GPR_IDX_EN__SHIFT 0x1b
29586#define SQ_WAVE_MODE__VSKIP__SHIFT 0x1c
29587#define SQ_WAVE_MODE__CSP__SHIFT 0x1d
29588#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000FL
29589#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000F0L
29590#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
29591#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
29592#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
29593#define SQ_WAVE_MODE__EXCP_EN_MASK 0x001FF000L
29594#define SQ_WAVE_MODE__FP16_OVFL_MASK 0x00800000L
29595#define SQ_WAVE_MODE__POPS_PACKER0_MASK 0x01000000L
29596#define SQ_WAVE_MODE__POPS_PACKER1_MASK 0x02000000L
29597#define SQ_WAVE_MODE__DISABLE_PERF_MASK 0x04000000L
29598#define SQ_WAVE_MODE__GPR_IDX_EN_MASK 0x08000000L
29599#define SQ_WAVE_MODE__VSKIP_MASK 0x10000000L
29600#define SQ_WAVE_MODE__CSP_MASK 0xE0000000L
29601//SQ_WAVE_STATUS
29602#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
29603#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
29604#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
29605#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
29606#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
29607#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
29608#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
29609#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
29610#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
29611#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
29612#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
29613#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
29614#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
29615#define SQ_WAVE_STATUS__TTRACE_CU_EN__SHIFT 0xf
29616#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
29617#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
29618#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
29619#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
29620#define SQ_WAVE_STATUS__ALLOW_REPLAY__SHIFT 0x16
29621#define SQ_WAVE_STATUS__FATAL_HALT__SHIFT 0x17
29622#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
29623#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
29624#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
29625#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x00000018L
29626#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
29627#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
29628#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
29629#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
29630#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
29631#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
29632#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
29633#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
29634#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
29635#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
29636#define SQ_WAVE_STATUS__TTRACE_CU_EN_MASK 0x00008000L
29637#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
29638#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
29639#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
29640#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
29641#define SQ_WAVE_STATUS__ALLOW_REPLAY_MASK 0x00400000L
29642#define SQ_WAVE_STATUS__FATAL_HALT_MASK 0x00800000L
29643#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
29644//SQ_WAVE_TRAPSTS
29645#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
29646#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
29647#define SQ_WAVE_TRAPSTS__ILLEGAL_INST__SHIFT 0xb
29648#define SQ_WAVE_TRAPSTS__EXCP_HI__SHIFT 0xc
29649#define SQ_WAVE_TRAPSTS__EXCP_CYCLE__SHIFT 0x10
29650#define SQ_WAVE_TRAPSTS__XNACK_ERROR__SHIFT 0x1c
29651#define SQ_WAVE_TRAPSTS__DP_RATE__SHIFT 0x1d
29652#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x000001FFL
29653#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x00000400L
29654#define SQ_WAVE_TRAPSTS__ILLEGAL_INST_MASK 0x00000800L
29655#define SQ_WAVE_TRAPSTS__EXCP_HI_MASK 0x00007000L
29656#define SQ_WAVE_TRAPSTS__EXCP_CYCLE_MASK 0x003F0000L
29657#define SQ_WAVE_TRAPSTS__XNACK_ERROR_MASK 0x10000000L
29658#define SQ_WAVE_TRAPSTS__DP_RATE_MASK 0xE0000000L
29659//SQ_WAVE_HW_ID
29660#define SQ_WAVE_HW_ID__WAVE_ID__SHIFT 0x0
29661#define SQ_WAVE_HW_ID__SIMD_ID__SHIFT 0x4
29662#define SQ_WAVE_HW_ID__PIPE_ID__SHIFT 0x6
29663#define SQ_WAVE_HW_ID__CU_ID__SHIFT 0x8
29664#define SQ_WAVE_HW_ID__SH_ID__SHIFT 0xc
29665#define SQ_WAVE_HW_ID__SE_ID__SHIFT 0xd
29666#define SQ_WAVE_HW_ID__TG_ID__SHIFT 0x10
29667#define SQ_WAVE_HW_ID__VM_ID__SHIFT 0x14
29668#define SQ_WAVE_HW_ID__QUEUE_ID__SHIFT 0x18
29669#define SQ_WAVE_HW_ID__STATE_ID__SHIFT 0x1b
29670#define SQ_WAVE_HW_ID__ME_ID__SHIFT 0x1e
29671#define SQ_WAVE_HW_ID__WAVE_ID_MASK 0x0000000FL
29672#define SQ_WAVE_HW_ID__SIMD_ID_MASK 0x00000030L
29673#define SQ_WAVE_HW_ID__PIPE_ID_MASK 0x000000C0L
29674#define SQ_WAVE_HW_ID__CU_ID_MASK 0x00000F00L
29675#define SQ_WAVE_HW_ID__SH_ID_MASK 0x00001000L
29676#define SQ_WAVE_HW_ID__SE_ID_MASK 0x00006000L
29677#define SQ_WAVE_HW_ID__TG_ID_MASK 0x000F0000L
29678#define SQ_WAVE_HW_ID__VM_ID_MASK 0x00F00000L
29679#define SQ_WAVE_HW_ID__QUEUE_ID_MASK 0x07000000L
29680#define SQ_WAVE_HW_ID__STATE_ID_MASK 0x38000000L
29681#define SQ_WAVE_HW_ID__ME_ID_MASK 0xC0000000L
29682//SQ_WAVE_GPR_ALLOC
29683#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
29684#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0x8
29685#define SQ_WAVE_GPR_ALLOC__SGPR_BASE__SHIFT 0x10
29686#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE__SHIFT 0x18
29687#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x0000003FL
29688#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x00003F00L
29689#define SQ_WAVE_GPR_ALLOC__SGPR_BASE_MASK 0x003F0000L
29690#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE_MASK 0x0F000000L
29691//SQ_WAVE_LDS_ALLOC
29692#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
29693#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
29694#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000000FFL
29695#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001FF000L
29696//SQ_WAVE_IB_STS
29697#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0x0
29698#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x4
29699#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x8
29700#define SQ_WAVE_IB_STS__VALU_CNT__SHIFT 0xc
29701#define SQ_WAVE_IB_STS__FIRST_REPLAY__SHIFT 0xf
29702#define SQ_WAVE_IB_STS__RCNT__SHIFT 0x10
29703#define SQ_WAVE_IB_STS__VM_CNT_HI__SHIFT 0x16
29704#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000000FL
29705#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000070L
29706#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x00000F00L
29707#define SQ_WAVE_IB_STS__VALU_CNT_MASK 0x00007000L
29708#define SQ_WAVE_IB_STS__FIRST_REPLAY_MASK 0x00008000L
29709#define SQ_WAVE_IB_STS__RCNT_MASK 0x001F0000L
29710#define SQ_WAVE_IB_STS__VM_CNT_HI_MASK 0x00C00000L
29711//SQ_WAVE_PC_LO
29712#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
29713#define SQ_WAVE_PC_LO__PC_LO_MASK 0xFFFFFFFFL
29714//SQ_WAVE_PC_HI
29715#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
29716#define SQ_WAVE_PC_HI__PC_HI_MASK 0x0000FFFFL
29717//SQ_WAVE_INST_DW0
29718#define SQ_WAVE_INST_DW0__INST_DW0__SHIFT 0x0
29719#define SQ_WAVE_INST_DW0__INST_DW0_MASK 0xFFFFFFFFL
29720//SQ_WAVE_INST_DW1
29721#define SQ_WAVE_INST_DW1__INST_DW1__SHIFT 0x0
29722#define SQ_WAVE_INST_DW1__INST_DW1_MASK 0xFFFFFFFFL
29723//SQ_WAVE_IB_DBG0
29724#define SQ_WAVE_IB_DBG0__IBUF_ST__SHIFT 0x0
29725#define SQ_WAVE_IB_DBG0__PC_INVALID__SHIFT 0x3
29726#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW__SHIFT 0x4
29727#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT__SHIFT 0x5
29728#define SQ_WAVE_IB_DBG0__IBUF_RPTR__SHIFT 0x8
29729#define SQ_WAVE_IB_DBG0__IBUF_WPTR__SHIFT 0xa
29730#define SQ_WAVE_IB_DBG0__INST_STR_ST__SHIFT 0x10
29731#define SQ_WAVE_IB_DBG0__ECC_ST__SHIFT 0x18
29732#define SQ_WAVE_IB_DBG0__IS_HYB__SHIFT 0x1a
29733#define SQ_WAVE_IB_DBG0__HYB_CNT__SHIFT 0x1b
29734#define SQ_WAVE_IB_DBG0__KILL__SHIFT 0x1d
29735#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH__SHIFT 0x1e
29736#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI__SHIFT 0x1f
29737#define SQ_WAVE_IB_DBG0__IBUF_ST_MASK 0x00000007L
29738#define SQ_WAVE_IB_DBG0__PC_INVALID_MASK 0x00000008L
29739#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW_MASK 0x00000010L
29740#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_MASK 0x000000E0L
29741#define SQ_WAVE_IB_DBG0__IBUF_RPTR_MASK 0x00000300L
29742#define SQ_WAVE_IB_DBG0__IBUF_WPTR_MASK 0x00000C00L
29743#define SQ_WAVE_IB_DBG0__INST_STR_ST_MASK 0x000F0000L
29744#define SQ_WAVE_IB_DBG0__ECC_ST_MASK 0x03000000L
29745#define SQ_WAVE_IB_DBG0__IS_HYB_MASK 0x04000000L
29746#define SQ_WAVE_IB_DBG0__HYB_CNT_MASK 0x18000000L
29747#define SQ_WAVE_IB_DBG0__KILL_MASK 0x20000000L
29748#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH_MASK 0x40000000L
29749#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI_MASK 0x80000000L
29750//SQ_WAVE_IB_DBG1
29751#define SQ_WAVE_IB_DBG1__IXNACK__SHIFT 0x0
29752#define SQ_WAVE_IB_DBG1__XNACK__SHIFT 0x1
29753#define SQ_WAVE_IB_DBG1__TA_NEED_RESET__SHIFT 0x2
29754#define SQ_WAVE_IB_DBG1__XCNT__SHIFT 0x4
29755#define SQ_WAVE_IB_DBG1__QCNT__SHIFT 0xb
29756#define SQ_WAVE_IB_DBG1__RCNT__SHIFT 0x12
29757#define SQ_WAVE_IB_DBG1__MISC_CNT__SHIFT 0x19
29758#define SQ_WAVE_IB_DBG1__IXNACK_MASK 0x00000001L
29759#define SQ_WAVE_IB_DBG1__XNACK_MASK 0x00000002L
29760#define SQ_WAVE_IB_DBG1__TA_NEED_RESET_MASK 0x00000004L
29761#define SQ_WAVE_IB_DBG1__XCNT_MASK 0x000001F0L
29762#define SQ_WAVE_IB_DBG1__QCNT_MASK 0x0000F800L
29763#define SQ_WAVE_IB_DBG1__RCNT_MASK 0x007C0000L
29764#define SQ_WAVE_IB_DBG1__MISC_CNT_MASK 0xFE000000L
29765//SQ_WAVE_FLUSH_IB
29766#define SQ_WAVE_FLUSH_IB__UNUSED__SHIFT 0x0
29767#define SQ_WAVE_FLUSH_IB__UNUSED_MASK 0xFFFFFFFFL
29768//SQ_WAVE_TTMP0
29769#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
29770#define SQ_WAVE_TTMP0__DATA_MASK 0xFFFFFFFFL
29771//SQ_WAVE_TTMP1
29772#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
29773#define SQ_WAVE_TTMP1__DATA_MASK 0xFFFFFFFFL
29774//SQ_WAVE_TTMP2
29775#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
29776#define SQ_WAVE_TTMP2__DATA_MASK 0xFFFFFFFFL
29777//SQ_WAVE_TTMP3
29778#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
29779#define SQ_WAVE_TTMP3__DATA_MASK 0xFFFFFFFFL
29780//SQ_WAVE_TTMP4
29781#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
29782#define SQ_WAVE_TTMP4__DATA_MASK 0xFFFFFFFFL
29783//SQ_WAVE_TTMP5
29784#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
29785#define SQ_WAVE_TTMP5__DATA_MASK 0xFFFFFFFFL
29786//SQ_WAVE_TTMP6
29787#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
29788#define SQ_WAVE_TTMP6__DATA_MASK 0xFFFFFFFFL
29789//SQ_WAVE_TTMP7
29790#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
29791#define SQ_WAVE_TTMP7__DATA_MASK 0xFFFFFFFFL
29792//SQ_WAVE_TTMP8
29793#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
29794#define SQ_WAVE_TTMP8__DATA_MASK 0xFFFFFFFFL
29795//SQ_WAVE_TTMP9
29796#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
29797#define SQ_WAVE_TTMP9__DATA_MASK 0xFFFFFFFFL
29798//SQ_WAVE_TTMP10
29799#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
29800#define SQ_WAVE_TTMP10__DATA_MASK 0xFFFFFFFFL
29801//SQ_WAVE_TTMP11
29802#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
29803#define SQ_WAVE_TTMP11__DATA_MASK 0xFFFFFFFFL
29804//SQ_WAVE_TTMP12
29805#define SQ_WAVE_TTMP12__DATA__SHIFT 0x0
29806#define SQ_WAVE_TTMP12__DATA_MASK 0xFFFFFFFFL
29807//SQ_WAVE_TTMP13
29808#define SQ_WAVE_TTMP13__DATA__SHIFT 0x0
29809#define SQ_WAVE_TTMP13__DATA_MASK 0xFFFFFFFFL
29810//SQ_WAVE_TTMP14
29811#define SQ_WAVE_TTMP14__DATA__SHIFT 0x0
29812#define SQ_WAVE_TTMP14__DATA_MASK 0xFFFFFFFFL
29813//SQ_WAVE_TTMP15
29814#define SQ_WAVE_TTMP15__DATA__SHIFT 0x0
29815#define SQ_WAVE_TTMP15__DATA_MASK 0xFFFFFFFFL
29816//SQ_WAVE_M0
29817#define SQ_WAVE_M0__M0__SHIFT 0x0
29818#define SQ_WAVE_M0__M0_MASK 0xFFFFFFFFL
29819//SQ_WAVE_EXEC_LO
29820#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
29821#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xFFFFFFFFL
29822//SQ_WAVE_EXEC_HI
29823#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
29824#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xFFFFFFFFL
29825//SQ_INTERRUPT_WORD_AUTO_CTXID
29826#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0x0
29827#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 0x1
29828#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 0x2
29829#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 0x3
29830#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 0x4
29831#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 0x5
29832#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 0x6
29833#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 0x7
29834#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
29835#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 0x18
29836#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 0x1a
29837#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x0000001L
29838#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x0000002L
29839#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x0000004L
29840#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x0000008L
29841#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x0000010L
29842#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x0000020L
29843#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x0000040L
29844#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x0000080L
29845#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x0000100L
29846#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x3000000L
29847#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0xC000000L
29848//SQ_INTERRUPT_WORD_AUTO_HI
29849#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID__SHIFT 0x8
29850#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING__SHIFT 0xa
29851#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID_MASK 0x300L
29852#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING_MASK 0xC00L
29853//SQ_INTERRUPT_WORD_AUTO_LO
29854#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE__SHIFT 0x0
29855#define SQ_INTERRUPT_WORD_AUTO_LO__WLT__SHIFT 0x1
29856#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL__SHIFT 0x2
29857#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP__SHIFT 0x3
29858#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP__SHIFT 0x4
29859#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW__SHIFT 0x5
29860#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW__SHIFT 0x6
29861#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW__SHIFT 0x7
29862#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
29863#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_MASK 0x001L
29864#define SQ_INTERRUPT_WORD_AUTO_LO__WLT_MASK 0x002L
29865#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL_MASK 0x004L
29866#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP_MASK 0x008L
29867#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP_MASK 0x010L
29868#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW_MASK 0x020L
29869#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW_MASK 0x040L
29870#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW_MASK 0x080L
29871#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR_MASK 0x100L
29872//SQ_INTERRUPT_WORD_CMN_CTXID
29873#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID__SHIFT 0x18
29874#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING__SHIFT 0x1a
29875#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID_MASK 0x3000000L
29876#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING_MASK 0xC000000L
29877//SQ_INTERRUPT_WORD_CMN_HI
29878#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID__SHIFT 0x8
29879#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING__SHIFT 0xa
29880#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID_MASK 0x300L
29881#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING_MASK 0xC00L
29882//SQ_INTERRUPT_WORD_WAVE_CTXID
29883#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0x0
29884#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 0xc
29885#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 0xd
29886#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 0xe
29887#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 0x12
29888#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 0x14
29889#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 0x18
29890#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 0x1a
29891#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x0000FFFL
29892#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x0001000L
29893#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x0002000L
29894#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x003C000L
29895#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x00C0000L
29896#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x0F00000L
29897#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x3000000L
29898#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0xC000000L
29899//SQ_INTERRUPT_WORD_WAVE_HI
29900#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID__SHIFT 0x0
29901#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID__SHIFT 0x4
29902#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID__SHIFT 0x8
29903#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING__SHIFT 0xa
29904#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID_MASK 0x00FL
29905#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID_MASK 0x0F0L
29906#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID_MASK 0x300L
29907#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING_MASK 0xC00L
29908//SQ_INTERRUPT_WORD_WAVE_LO
29909#define SQ_INTERRUPT_WORD_WAVE_LO__DATA__SHIFT 0x0
29910#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID__SHIFT 0x18
29911#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV__SHIFT 0x19
29912#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID__SHIFT 0x1a
29913#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID__SHIFT 0x1e
29914#define SQ_INTERRUPT_WORD_WAVE_LO__DATA_MASK 0x00FFFFFFL
29915#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID_MASK 0x01000000L
29916#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV_MASK 0x02000000L
29917#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID_MASK 0x3C000000L
29918#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID_MASK 0xC0000000L
29919
29920
29921
29922
29923
29924
29925
29926
29927// addressBlock: didtind
29928//DIDT_SQ_CTRL0
29929#define DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
29930#define DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT 0x1
29931#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
29932#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
29933#define DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
29934#define DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
29935#define DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
29936#define DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
29937#define DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
29938#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
29939#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
29940#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x1b
29941#define DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
29942#define DIDT_SQ_CTRL0__PHASE_OFFSET_MASK 0x00000006L
29943#define DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
29944#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
29945#define DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
29946#define DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
29947#define DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
29948#define DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
29949#define DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
29950#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
29951#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
29952#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xF8000000L
29953//DIDT_SQ_CTRL1
29954#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
29955#define DIDT_SQ_CTRL1__MAX_POWER__SHIFT 0x10
29956#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0x0000FFFFL
29957#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xFFFF0000L
29958//DIDT_SQ_CTRL2
29959#define DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
29960#define DIDT_SQ_CTRL2__UNUSED_0__SHIFT 0xe
29961#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
29962#define DIDT_SQ_CTRL2__UNUSED_1__SHIFT 0x1a
29963#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
29964#define DIDT_SQ_CTRL2__UNUSED_2__SHIFT 0x1f
29965#define DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
29966#define DIDT_SQ_CTRL2__UNUSED_0_MASK 0x0000C000L
29967#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
29968#define DIDT_SQ_CTRL2__UNUSED_1_MASK 0x04000000L
29969#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
29970#define DIDT_SQ_CTRL2__UNUSED_2_MASK 0x80000000L
29971//DIDT_SQ_STALL_CTRL
29972#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
29973#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
29974#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
29975#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
29976#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x18
29977#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
29978#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
29979#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
29980#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
29981#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
29982//DIDT_SQ_TUNING_CTRL
29983#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
29984#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
29985#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
29986#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
29987//DIDT_SQ_STALL_AUTO_RELEASE_CTRL
29988#define DIDT_SQ_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
29989#define DIDT_SQ_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
29990//DIDT_SQ_CTRL3
29991#define DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
29992#define DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
29993#define DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT 0x2
29994#define DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
29995#define DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
29996#define DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
29997#define DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
29998#define DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
29999#define DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
30000#define DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
30001#define DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
30002#define DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
30003#define DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
30004#define DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
30005#define DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
30006#define DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30007#define DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
30008#define DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
30009#define DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
30010#define DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
30011#define DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
30012#define DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
30013#define DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
30014#define DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
30015//DIDT_SQ_STALL_PATTERN_1_2
30016#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
30017#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30018#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
30019#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30020#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
30021#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30022#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
30023#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30024//DIDT_SQ_STALL_PATTERN_3_4
30025#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
30026#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30027#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
30028#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30029#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
30030#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30031#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
30032#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30033//DIDT_SQ_STALL_PATTERN_5_6
30034#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
30035#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30036#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
30037#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30038#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
30039#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30040#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
30041#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30042//DIDT_SQ_STALL_PATTERN_7
30043#define DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
30044#define DIDT_SQ_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30045#define DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
30046#define DIDT_SQ_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30047//DIDT_SQ_WEIGHT0_3
30048#define DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT 0x0
30049#define DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT 0x8
30050#define DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT 0x10
30051#define DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT 0x18
30052#define DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
30053#define DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
30054#define DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
30055#define DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
30056//DIDT_SQ_WEIGHT4_7
30057#define DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT 0x0
30058#define DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT 0x8
30059#define DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT 0x10
30060#define DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT 0x18
30061#define DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
30062#define DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
30063#define DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
30064#define DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
30065//DIDT_SQ_WEIGHT8_11
30066#define DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT 0x0
30067#define DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT 0x8
30068#define DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT 0x10
30069#define DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT 0x18
30070#define DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
30071#define DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
30072#define DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
30073#define DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
30074//DIDT_SQ_EDC_CTRL
30075#define DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT 0x0
30076#define DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
30077#define DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
30078#define DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
30079#define DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30080#define DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
30081#define DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
30082#define DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
30083#define DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
30084#define DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
30085#define DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
30086#define DIDT_SQ_EDC_CTRL__UNUSED_0__SHIFT 0x17
30087#define DIDT_SQ_EDC_CTRL__EDC_EN_MASK 0x00000001L
30088#define DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
30089#define DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
30090#define DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
30091#define DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30092#define DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
30093#define DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
30094#define DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
30095#define DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
30096#define DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
30097#define DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
30098#define DIDT_SQ_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
30099//DIDT_SQ_EDC_THRESHOLD
30100#define DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
30101#define DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
30102//DIDT_SQ_EDC_STALL_PATTERN_1_2
30103#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
30104#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30105#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
30106#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30107#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
30108#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30109#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
30110#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30111//DIDT_SQ_EDC_STALL_PATTERN_3_4
30112#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
30113#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30114#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
30115#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30116#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
30117#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30118#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
30119#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30120//DIDT_SQ_EDC_STALL_PATTERN_5_6
30121#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
30122#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30123#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
30124#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30125#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
30126#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30127#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
30128#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30129//DIDT_SQ_EDC_STALL_PATTERN_7
30130#define DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
30131#define DIDT_SQ_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30132#define DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
30133#define DIDT_SQ_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30134//DIDT_SQ_EDC_STATUS
30135#define DIDT_SQ_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
30136#define DIDT_SQ_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
30137#define DIDT_SQ_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
30138#define DIDT_SQ_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
30139//DIDT_SQ_EDC_STALL_DELAY_1
30140#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT 0x0
30141#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT 0x6
30142#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT 0xc
30143#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT 0x12
30144#define DIDT_SQ_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x18
30145#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK 0x0000003FL
30146#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK 0x00000FC0L
30147#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK 0x0003F000L
30148#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK 0x00FC0000L
30149#define DIDT_SQ_EDC_STALL_DELAY_1__UNUSED_MASK 0xFF000000L
30150//DIDT_SQ_EDC_STALL_DELAY_2
30151#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT 0x0
30152#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5__SHIFT 0x6
30153#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6__SHIFT 0xc
30154#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7__SHIFT 0x12
30155#define DIDT_SQ_EDC_STALL_DELAY_2__UNUSED__SHIFT 0x18
30156#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK 0x0000003FL
30157#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5_MASK 0x00000FC0L
30158#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6_MASK 0x0003F000L
30159#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7_MASK 0x00FC0000L
30160#define DIDT_SQ_EDC_STALL_DELAY_2__UNUSED_MASK 0xFF000000L
30161//DIDT_SQ_EDC_STALL_DELAY_3
30162#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8__SHIFT 0x0
30163#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9__SHIFT 0x6
30164#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10__SHIFT 0xc
30165#define DIDT_SQ_EDC_STALL_DELAY_3__UNUSED__SHIFT 0x12
30166#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8_MASK 0x0000003FL
30167#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9_MASK 0x00000FC0L
30168#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10_MASK 0x0003F000L
30169#define DIDT_SQ_EDC_STALL_DELAY_3__UNUSED_MASK 0xFFFC0000L
30170//DIDT_SQ_EDC_OVERFLOW
30171#define DIDT_SQ_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
30172#define DIDT_SQ_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
30173#define DIDT_SQ_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
30174#define DIDT_SQ_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
30175//DIDT_SQ_EDC_ROLLING_POWER_DELTA
30176#define DIDT_SQ_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
30177#define DIDT_SQ_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
30178//DIDT_DB_CTRL0
30179#define DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
30180#define DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT 0x1
30181#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
30182#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
30183#define DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
30184#define DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
30185#define DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
30186#define DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
30187#define DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
30188#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
30189#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
30190#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x1b
30191#define DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
30192#define DIDT_DB_CTRL0__PHASE_OFFSET_MASK 0x00000006L
30193#define DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
30194#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
30195#define DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
30196#define DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
30197#define DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
30198#define DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
30199#define DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
30200#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
30201#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
30202#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xF8000000L
30203//DIDT_DB_CTRL1
30204#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
30205#define DIDT_DB_CTRL1__MAX_POWER__SHIFT 0x10
30206#define DIDT_DB_CTRL1__MIN_POWER_MASK 0x0000FFFFL
30207#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xFFFF0000L
30208//DIDT_DB_CTRL2
30209#define DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
30210#define DIDT_DB_CTRL2__UNUSED_0__SHIFT 0xe
30211#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
30212#define DIDT_DB_CTRL2__UNUSED_1__SHIFT 0x1a
30213#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
30214#define DIDT_DB_CTRL2__UNUSED_2__SHIFT 0x1f
30215#define DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
30216#define DIDT_DB_CTRL2__UNUSED_0_MASK 0x0000C000L
30217#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
30218#define DIDT_DB_CTRL2__UNUSED_1_MASK 0x04000000L
30219#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
30220#define DIDT_DB_CTRL2__UNUSED_2_MASK 0x80000000L
30221//DIDT_DB_STALL_CTRL
30222#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
30223#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
30224#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
30225#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
30226#define DIDT_DB_STALL_CTRL__UNUSED_0__SHIFT 0x18
30227#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
30228#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
30229#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
30230#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
30231#define DIDT_DB_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
30232//DIDT_DB_TUNING_CTRL
30233#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
30234#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
30235#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
30236#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
30237//DIDT_DB_STALL_AUTO_RELEASE_CTRL
30238#define DIDT_DB_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
30239#define DIDT_DB_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
30240//DIDT_DB_CTRL3
30241#define DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
30242#define DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
30243#define DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT 0x2
30244#define DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30245#define DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
30246#define DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
30247#define DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
30248#define DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
30249#define DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
30250#define DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
30251#define DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
30252#define DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
30253#define DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
30254#define DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
30255#define DIDT_DB_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
30256#define DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30257#define DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
30258#define DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
30259#define DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
30260#define DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
30261#define DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
30262#define DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
30263#define DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
30264#define DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
30265//DIDT_DB_STALL_PATTERN_1_2
30266#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
30267#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30268#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
30269#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30270#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
30271#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30272#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
30273#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30274//DIDT_DB_STALL_PATTERN_3_4
30275#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
30276#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30277#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
30278#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30279#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
30280#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30281#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
30282#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30283//DIDT_DB_STALL_PATTERN_5_6
30284#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
30285#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30286#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
30287#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30288#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
30289#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30290#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
30291#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30292//DIDT_DB_STALL_PATTERN_7
30293#define DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
30294#define DIDT_DB_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30295#define DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
30296#define DIDT_DB_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30297//DIDT_DB_WEIGHT0_3
30298#define DIDT_DB_WEIGHT0_3__WEIGHT0__SHIFT 0x0
30299#define DIDT_DB_WEIGHT0_3__WEIGHT1__SHIFT 0x8
30300#define DIDT_DB_WEIGHT0_3__WEIGHT2__SHIFT 0x10
30301#define DIDT_DB_WEIGHT0_3__WEIGHT3__SHIFT 0x18
30302#define DIDT_DB_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
30303#define DIDT_DB_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
30304#define DIDT_DB_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
30305#define DIDT_DB_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
30306//DIDT_DB_WEIGHT4_7
30307#define DIDT_DB_WEIGHT4_7__WEIGHT4__SHIFT 0x0
30308#define DIDT_DB_WEIGHT4_7__WEIGHT5__SHIFT 0x8
30309#define DIDT_DB_WEIGHT4_7__WEIGHT6__SHIFT 0x10
30310#define DIDT_DB_WEIGHT4_7__WEIGHT7__SHIFT 0x18
30311#define DIDT_DB_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
30312#define DIDT_DB_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
30313#define DIDT_DB_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
30314#define DIDT_DB_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
30315//DIDT_DB_WEIGHT8_11
30316#define DIDT_DB_WEIGHT8_11__WEIGHT8__SHIFT 0x0
30317#define DIDT_DB_WEIGHT8_11__WEIGHT9__SHIFT 0x8
30318#define DIDT_DB_WEIGHT8_11__WEIGHT10__SHIFT 0x10
30319#define DIDT_DB_WEIGHT8_11__WEIGHT11__SHIFT 0x18
30320#define DIDT_DB_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
30321#define DIDT_DB_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
30322#define DIDT_DB_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
30323#define DIDT_DB_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
30324//DIDT_DB_EDC_CTRL
30325#define DIDT_DB_EDC_CTRL__EDC_EN__SHIFT 0x0
30326#define DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
30327#define DIDT_DB_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
30328#define DIDT_DB_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
30329#define DIDT_DB_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30330#define DIDT_DB_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
30331#define DIDT_DB_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
30332#define DIDT_DB_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
30333#define DIDT_DB_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
30334#define DIDT_DB_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
30335#define DIDT_DB_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
30336#define DIDT_DB_EDC_CTRL__UNUSED_0__SHIFT 0x17
30337#define DIDT_DB_EDC_CTRL__EDC_EN_MASK 0x00000001L
30338#define DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
30339#define DIDT_DB_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
30340#define DIDT_DB_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
30341#define DIDT_DB_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30342#define DIDT_DB_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
30343#define DIDT_DB_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
30344#define DIDT_DB_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
30345#define DIDT_DB_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
30346#define DIDT_DB_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
30347#define DIDT_DB_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
30348#define DIDT_DB_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
30349//DIDT_DB_EDC_THRESHOLD
30350#define DIDT_DB_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
30351#define DIDT_DB_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
30352//DIDT_DB_EDC_STALL_PATTERN_1_2
30353#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
30354#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30355#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
30356#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30357#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
30358#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30359#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
30360#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30361//DIDT_DB_EDC_STALL_PATTERN_3_4
30362#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
30363#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30364#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
30365#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30366#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
30367#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30368#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
30369#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30370//DIDT_DB_EDC_STALL_PATTERN_5_6
30371#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
30372#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30373#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
30374#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30375#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
30376#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30377#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
30378#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30379//DIDT_DB_EDC_STALL_PATTERN_7
30380#define DIDT_DB_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
30381#define DIDT_DB_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30382#define DIDT_DB_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
30383#define DIDT_DB_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30384//DIDT_DB_EDC_STATUS
30385#define DIDT_DB_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
30386#define DIDT_DB_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
30387#define DIDT_DB_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
30388#define DIDT_DB_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
30389//DIDT_DB_EDC_STALL_DELAY_1
30390#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB0__SHIFT 0x0
30391#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB1__SHIFT 0x3
30392#define DIDT_DB_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x6
30393#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB0_MASK 0x00000007L
30394#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB1_MASK 0x00000038L
30395#define DIDT_DB_EDC_STALL_DELAY_1__UNUSED_MASK 0xFFFFFFC0L
30396//DIDT_DB_EDC_OVERFLOW
30397#define DIDT_DB_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
30398#define DIDT_DB_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
30399#define DIDT_DB_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
30400#define DIDT_DB_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
30401//DIDT_DB_EDC_ROLLING_POWER_DELTA
30402#define DIDT_DB_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
30403#define DIDT_DB_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
30404//DIDT_TD_CTRL0
30405#define DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
30406#define DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT 0x1
30407#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
30408#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
30409#define DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
30410#define DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
30411#define DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
30412#define DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
30413#define DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
30414#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
30415#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
30416#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x1b
30417#define DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
30418#define DIDT_TD_CTRL0__PHASE_OFFSET_MASK 0x00000006L
30419#define DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
30420#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
30421#define DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
30422#define DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
30423#define DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
30424#define DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
30425#define DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
30426#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
30427#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
30428#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xF8000000L
30429//DIDT_TD_CTRL1
30430#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
30431#define DIDT_TD_CTRL1__MAX_POWER__SHIFT 0x10
30432#define DIDT_TD_CTRL1__MIN_POWER_MASK 0x0000FFFFL
30433#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xFFFF0000L
30434//DIDT_TD_CTRL2
30435#define DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
30436#define DIDT_TD_CTRL2__UNUSED_0__SHIFT 0xe
30437#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
30438#define DIDT_TD_CTRL2__UNUSED_1__SHIFT 0x1a
30439#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
30440#define DIDT_TD_CTRL2__UNUSED_2__SHIFT 0x1f
30441#define DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
30442#define DIDT_TD_CTRL2__UNUSED_0_MASK 0x0000C000L
30443#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
30444#define DIDT_TD_CTRL2__UNUSED_1_MASK 0x04000000L
30445#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
30446#define DIDT_TD_CTRL2__UNUSED_2_MASK 0x80000000L
30447//DIDT_TD_STALL_CTRL
30448#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
30449#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
30450#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
30451#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
30452#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x18
30453#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
30454#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
30455#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
30456#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
30457#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
30458//DIDT_TD_TUNING_CTRL
30459#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
30460#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
30461#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
30462#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
30463//DIDT_TD_STALL_AUTO_RELEASE_CTRL
30464#define DIDT_TD_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
30465#define DIDT_TD_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
30466//DIDT_TD_CTRL3
30467#define DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
30468#define DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
30469#define DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT 0x2
30470#define DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30471#define DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
30472#define DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
30473#define DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
30474#define DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
30475#define DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
30476#define DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
30477#define DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
30478#define DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
30479#define DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
30480#define DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
30481#define DIDT_TD_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
30482#define DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30483#define DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
30484#define DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
30485#define DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
30486#define DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
30487#define DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
30488#define DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
30489#define DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
30490#define DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
30491//DIDT_TD_STALL_PATTERN_1_2
30492#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
30493#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30494#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
30495#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30496#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
30497#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30498#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
30499#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30500//DIDT_TD_STALL_PATTERN_3_4
30501#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
30502#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30503#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
30504#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30505#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
30506#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30507#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
30508#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30509//DIDT_TD_STALL_PATTERN_5_6
30510#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
30511#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30512#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
30513#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30514#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
30515#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30516#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
30517#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30518//DIDT_TD_STALL_PATTERN_7
30519#define DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
30520#define DIDT_TD_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30521#define DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
30522#define DIDT_TD_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30523//DIDT_TD_WEIGHT0_3
30524#define DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT 0x0
30525#define DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT 0x8
30526#define DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT 0x10
30527#define DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT 0x18
30528#define DIDT_TD_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
30529#define DIDT_TD_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
30530#define DIDT_TD_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
30531#define DIDT_TD_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
30532//DIDT_TD_WEIGHT4_7
30533#define DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT 0x0
30534#define DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT 0x8
30535#define DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT 0x10
30536#define DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT 0x18
30537#define DIDT_TD_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
30538#define DIDT_TD_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
30539#define DIDT_TD_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
30540#define DIDT_TD_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
30541//DIDT_TD_WEIGHT8_11
30542#define DIDT_TD_WEIGHT8_11__WEIGHT8__SHIFT 0x0
30543#define DIDT_TD_WEIGHT8_11__WEIGHT9__SHIFT 0x8
30544#define DIDT_TD_WEIGHT8_11__WEIGHT10__SHIFT 0x10
30545#define DIDT_TD_WEIGHT8_11__WEIGHT11__SHIFT 0x18
30546#define DIDT_TD_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
30547#define DIDT_TD_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
30548#define DIDT_TD_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
30549#define DIDT_TD_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
30550//DIDT_TD_EDC_CTRL
30551#define DIDT_TD_EDC_CTRL__EDC_EN__SHIFT 0x0
30552#define DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
30553#define DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
30554#define DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
30555#define DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30556#define DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
30557#define DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
30558#define DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
30559#define DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
30560#define DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
30561#define DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
30562#define DIDT_TD_EDC_CTRL__UNUSED_0__SHIFT 0x17
30563#define DIDT_TD_EDC_CTRL__EDC_EN_MASK 0x00000001L
30564#define DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
30565#define DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
30566#define DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
30567#define DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30568#define DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
30569#define DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
30570#define DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
30571#define DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
30572#define DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
30573#define DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
30574#define DIDT_TD_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
30575//DIDT_TD_EDC_THRESHOLD
30576#define DIDT_TD_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
30577#define DIDT_TD_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
30578//DIDT_TD_EDC_STALL_PATTERN_1_2
30579#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
30580#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30581#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
30582#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30583#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
30584#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30585#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
30586#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30587//DIDT_TD_EDC_STALL_PATTERN_3_4
30588#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
30589#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30590#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
30591#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30592#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
30593#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30594#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
30595#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30596//DIDT_TD_EDC_STALL_PATTERN_5_6
30597#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
30598#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30599#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
30600#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30601#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
30602#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30603#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
30604#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30605//DIDT_TD_EDC_STALL_PATTERN_7
30606#define DIDT_TD_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
30607#define DIDT_TD_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30608#define DIDT_TD_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
30609#define DIDT_TD_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30610//DIDT_TD_EDC_STATUS
30611#define DIDT_TD_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
30612#define DIDT_TD_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
30613#define DIDT_TD_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
30614#define DIDT_TD_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
30615//DIDT_TD_EDC_STALL_DELAY_1
30616#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD0__SHIFT 0x0
30617#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD1__SHIFT 0x6
30618#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD2__SHIFT 0xc
30619#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD3__SHIFT 0x12
30620#define DIDT_TD_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x18
30621#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD0_MASK 0x0000003FL
30622#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD1_MASK 0x00000FC0L
30623#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD2_MASK 0x0003F000L
30624#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD3_MASK 0x00FC0000L
30625#define DIDT_TD_EDC_STALL_DELAY_1__UNUSED_MASK 0xFF000000L
30626//DIDT_TD_EDC_STALL_DELAY_2
30627#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD4__SHIFT 0x0
30628#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD5__SHIFT 0x6
30629#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD6__SHIFT 0xc
30630#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD7__SHIFT 0x12
30631#define DIDT_TD_EDC_STALL_DELAY_2__UNUSED__SHIFT 0x18
30632#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD4_MASK 0x0000003FL
30633#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD5_MASK 0x00000FC0L
30634#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD6_MASK 0x0003F000L
30635#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD7_MASK 0x00FC0000L
30636#define DIDT_TD_EDC_STALL_DELAY_2__UNUSED_MASK 0xFF000000L
30637//DIDT_TD_EDC_STALL_DELAY_3
30638#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD8__SHIFT 0x0
30639#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD9__SHIFT 0x6
30640#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD10__SHIFT 0xc
30641#define DIDT_TD_EDC_STALL_DELAY_3__UNUSED__SHIFT 0x12
30642#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD8_MASK 0x0000003FL
30643#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD9_MASK 0x00000FC0L
30644#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD10_MASK 0x0003F000L
30645#define DIDT_TD_EDC_STALL_DELAY_3__UNUSED_MASK 0xFFFC0000L
30646//DIDT_TD_EDC_OVERFLOW
30647#define DIDT_TD_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
30648#define DIDT_TD_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
30649#define DIDT_TD_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
30650#define DIDT_TD_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
30651//DIDT_TD_EDC_ROLLING_POWER_DELTA
30652#define DIDT_TD_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
30653#define DIDT_TD_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
30654//DIDT_TCP_CTRL0
30655#define DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
30656#define DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT 0x1
30657#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
30658#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
30659#define DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
30660#define DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
30661#define DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
30662#define DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
30663#define DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
30664#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
30665#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
30666#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x1b
30667#define DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
30668#define DIDT_TCP_CTRL0__PHASE_OFFSET_MASK 0x00000006L
30669#define DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
30670#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
30671#define DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
30672#define DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
30673#define DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
30674#define DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
30675#define DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
30676#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
30677#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
30678#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xF8000000L
30679//DIDT_TCP_CTRL1
30680#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
30681#define DIDT_TCP_CTRL1__MAX_POWER__SHIFT 0x10
30682#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0x0000FFFFL
30683#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xFFFF0000L
30684//DIDT_TCP_CTRL2
30685#define DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
30686#define DIDT_TCP_CTRL2__UNUSED_0__SHIFT 0xe
30687#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
30688#define DIDT_TCP_CTRL2__UNUSED_1__SHIFT 0x1a
30689#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
30690#define DIDT_TCP_CTRL2__UNUSED_2__SHIFT 0x1f
30691#define DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
30692#define DIDT_TCP_CTRL2__UNUSED_0_MASK 0x0000C000L
30693#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
30694#define DIDT_TCP_CTRL2__UNUSED_1_MASK 0x04000000L
30695#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
30696#define DIDT_TCP_CTRL2__UNUSED_2_MASK 0x80000000L
30697//DIDT_TCP_STALL_CTRL
30698#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
30699#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
30700#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
30701#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
30702#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x18
30703#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
30704#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
30705#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
30706#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
30707#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
30708//DIDT_TCP_TUNING_CTRL
30709#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
30710#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
30711#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
30712#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
30713//DIDT_TCP_STALL_AUTO_RELEASE_CTRL
30714#define DIDT_TCP_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
30715#define DIDT_TCP_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
30716//DIDT_TCP_CTRL3
30717#define DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
30718#define DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
30719#define DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT 0x2
30720#define DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30721#define DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
30722#define DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
30723#define DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
30724#define DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
30725#define DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
30726#define DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
30727#define DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
30728#define DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
30729#define DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
30730#define DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
30731#define DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
30732#define DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30733#define DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
30734#define DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
30735#define DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
30736#define DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
30737#define DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
30738#define DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
30739#define DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
30740#define DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
30741//DIDT_TCP_STALL_PATTERN_1_2
30742#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
30743#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30744#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
30745#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30746#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
30747#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30748#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
30749#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30750//DIDT_TCP_STALL_PATTERN_3_4
30751#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
30752#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30753#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
30754#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30755#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
30756#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30757#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
30758#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30759//DIDT_TCP_STALL_PATTERN_5_6
30760#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
30761#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30762#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
30763#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30764#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
30765#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30766#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
30767#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30768//DIDT_TCP_STALL_PATTERN_7
30769#define DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
30770#define DIDT_TCP_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30771#define DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
30772#define DIDT_TCP_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30773//DIDT_TCP_WEIGHT0_3
30774#define DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT 0x0
30775#define DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT 0x8
30776#define DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT 0x10
30777#define DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT 0x18
30778#define DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
30779#define DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
30780#define DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
30781#define DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
30782//DIDT_TCP_WEIGHT4_7
30783#define DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT 0x0
30784#define DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT 0x8
30785#define DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT 0x10
30786#define DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT 0x18
30787#define DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
30788#define DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
30789#define DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
30790#define DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
30791//DIDT_TCP_WEIGHT8_11
30792#define DIDT_TCP_WEIGHT8_11__WEIGHT8__SHIFT 0x0
30793#define DIDT_TCP_WEIGHT8_11__WEIGHT9__SHIFT 0x8
30794#define DIDT_TCP_WEIGHT8_11__WEIGHT10__SHIFT 0x10
30795#define DIDT_TCP_WEIGHT8_11__WEIGHT11__SHIFT 0x18
30796#define DIDT_TCP_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
30797#define DIDT_TCP_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
30798#define DIDT_TCP_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
30799#define DIDT_TCP_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
30800//DIDT_TCP_EDC_CTRL
30801#define DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT 0x0
30802#define DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
30803#define DIDT_TCP_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
30804#define DIDT_TCP_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
30805#define DIDT_TCP_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30806#define DIDT_TCP_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
30807#define DIDT_TCP_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
30808#define DIDT_TCP_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
30809#define DIDT_TCP_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
30810#define DIDT_TCP_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
30811#define DIDT_TCP_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
30812#define DIDT_TCP_EDC_CTRL__UNUSED_0__SHIFT 0x17
30813#define DIDT_TCP_EDC_CTRL__EDC_EN_MASK 0x00000001L
30814#define DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
30815#define DIDT_TCP_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
30816#define DIDT_TCP_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
30817#define DIDT_TCP_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30818#define DIDT_TCP_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
30819#define DIDT_TCP_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
30820#define DIDT_TCP_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
30821#define DIDT_TCP_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
30822#define DIDT_TCP_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
30823#define DIDT_TCP_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
30824#define DIDT_TCP_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
30825//DIDT_TCP_EDC_THRESHOLD
30826#define DIDT_TCP_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
30827#define DIDT_TCP_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
30828//DIDT_TCP_EDC_STALL_PATTERN_1_2
30829#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
30830#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30831#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
30832#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30833#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
30834#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30835#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
30836#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
30837//DIDT_TCP_EDC_STALL_PATTERN_3_4
30838#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
30839#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
30840#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
30841#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
30842#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
30843#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
30844#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
30845#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
30846//DIDT_TCP_EDC_STALL_PATTERN_5_6
30847#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
30848#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
30849#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
30850#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
30851#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
30852#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
30853#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
30854#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
30855//DIDT_TCP_EDC_STALL_PATTERN_7
30856#define DIDT_TCP_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
30857#define DIDT_TCP_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
30858#define DIDT_TCP_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
30859#define DIDT_TCP_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
30860//DIDT_TCP_EDC_STATUS
30861#define DIDT_TCP_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
30862#define DIDT_TCP_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
30863#define DIDT_TCP_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
30864#define DIDT_TCP_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
30865//DIDT_TCP_EDC_STALL_DELAY_1
30866#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP0__SHIFT 0x0
30867#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP1__SHIFT 0x6
30868#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP2__SHIFT 0xc
30869#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP3__SHIFT 0x12
30870#define DIDT_TCP_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x18
30871#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP0_MASK 0x0000003FL
30872#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP1_MASK 0x00000FC0L
30873#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP2_MASK 0x0003F000L
30874#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP3_MASK 0x00FC0000L
30875#define DIDT_TCP_EDC_STALL_DELAY_1__UNUSED_MASK 0xFF000000L
30876//DIDT_TCP_EDC_STALL_DELAY_2
30877#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP4__SHIFT 0x0
30878#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP5__SHIFT 0x6
30879#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP6__SHIFT 0xc
30880#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP7__SHIFT 0x12
30881#define DIDT_TCP_EDC_STALL_DELAY_2__UNUSED__SHIFT 0x18
30882#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP4_MASK 0x0000003FL
30883#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP5_MASK 0x00000FC0L
30884#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP6_MASK 0x0003F000L
30885#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP7_MASK 0x00FC0000L
30886#define DIDT_TCP_EDC_STALL_DELAY_2__UNUSED_MASK 0xFF000000L
30887//DIDT_TCP_EDC_STALL_DELAY_3
30888#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP8__SHIFT 0x0
30889#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP9__SHIFT 0x6
30890#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP10__SHIFT 0xc
30891#define DIDT_TCP_EDC_STALL_DELAY_3__UNUSED__SHIFT 0x12
30892#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP8_MASK 0x0000003FL
30893#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP9_MASK 0x00000FC0L
30894#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP10_MASK 0x0003F000L
30895#define DIDT_TCP_EDC_STALL_DELAY_3__UNUSED_MASK 0xFFFC0000L
30896//DIDT_TCP_EDC_OVERFLOW
30897#define DIDT_TCP_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
30898#define DIDT_TCP_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
30899#define DIDT_TCP_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
30900#define DIDT_TCP_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
30901//DIDT_TCP_EDC_ROLLING_POWER_DELTA
30902#define DIDT_TCP_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
30903#define DIDT_TCP_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
30904//DIDT_DBR_CTRL0
30905#define DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
30906#define DIDT_DBR_CTRL0__PHASE_OFFSET__SHIFT 0x1
30907#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
30908#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
30909#define DIDT_DBR_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
30910#define DIDT_DBR_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
30911#define DIDT_DBR_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
30912#define DIDT_DBR_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
30913#define DIDT_DBR_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
30914#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
30915#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
30916#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x1b
30917#define DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
30918#define DIDT_DBR_CTRL0__PHASE_OFFSET_MASK 0x00000006L
30919#define DIDT_DBR_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
30920#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
30921#define DIDT_DBR_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
30922#define DIDT_DBR_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
30923#define DIDT_DBR_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
30924#define DIDT_DBR_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
30925#define DIDT_DBR_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
30926#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
30927#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
30928#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xF8000000L
30929//DIDT_DBR_CTRL1
30930#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
30931#define DIDT_DBR_CTRL1__MAX_POWER__SHIFT 0x10
30932#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0x0000FFFFL
30933#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xFFFF0000L
30934//DIDT_DBR_CTRL2
30935#define DIDT_DBR_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
30936#define DIDT_DBR_CTRL2__UNUSED_0__SHIFT 0xe
30937#define DIDT_DBR_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
30938#define DIDT_DBR_CTRL2__UNUSED_1__SHIFT 0x1a
30939#define DIDT_DBR_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
30940#define DIDT_DBR_CTRL2__UNUSED_2__SHIFT 0x1f
30941#define DIDT_DBR_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
30942#define DIDT_DBR_CTRL2__UNUSED_0_MASK 0x0000C000L
30943#define DIDT_DBR_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
30944#define DIDT_DBR_CTRL2__UNUSED_1_MASK 0x04000000L
30945#define DIDT_DBR_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
30946#define DIDT_DBR_CTRL2__UNUSED_2_MASK 0x80000000L
30947//DIDT_DBR_STALL_CTRL
30948#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
30949#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
30950#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
30951#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
30952#define DIDT_DBR_STALL_CTRL__UNUSED_0__SHIFT 0x18
30953#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
30954#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
30955#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
30956#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
30957#define DIDT_DBR_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
30958//DIDT_DBR_TUNING_CTRL
30959#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
30960#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
30961#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
30962#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
30963//DIDT_DBR_STALL_AUTO_RELEASE_CTRL
30964#define DIDT_DBR_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
30965#define DIDT_DBR_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
30966//DIDT_DBR_CTRL3
30967#define DIDT_DBR_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
30968#define DIDT_DBR_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
30969#define DIDT_DBR_CTRL3__THROTTLE_POLICY__SHIFT 0x2
30970#define DIDT_DBR_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
30971#define DIDT_DBR_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
30972#define DIDT_DBR_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
30973#define DIDT_DBR_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
30974#define DIDT_DBR_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
30975#define DIDT_DBR_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
30976#define DIDT_DBR_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
30977#define DIDT_DBR_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
30978#define DIDT_DBR_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
30979#define DIDT_DBR_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
30980#define DIDT_DBR_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
30981#define DIDT_DBR_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
30982#define DIDT_DBR_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
30983#define DIDT_DBR_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
30984#define DIDT_DBR_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
30985#define DIDT_DBR_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
30986#define DIDT_DBR_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
30987#define DIDT_DBR_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
30988#define DIDT_DBR_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
30989#define DIDT_DBR_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
30990#define DIDT_DBR_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
30991//DIDT_DBR_STALL_PATTERN_1_2
30992#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
30993#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
30994#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
30995#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
30996#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
30997#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
30998#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
30999#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
31000//DIDT_DBR_STALL_PATTERN_3_4
31001#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
31002#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
31003#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
31004#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
31005#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
31006#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
31007#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
31008#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
31009//DIDT_DBR_STALL_PATTERN_5_6
31010#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
31011#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
31012#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
31013#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
31014#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
31015#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
31016#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
31017#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
31018//DIDT_DBR_STALL_PATTERN_7
31019#define DIDT_DBR_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
31020#define DIDT_DBR_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
31021#define DIDT_DBR_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
31022#define DIDT_DBR_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
31023//DIDT_DBR_WEIGHT0_3
31024#define DIDT_DBR_WEIGHT0_3__WEIGHT0__SHIFT 0x0
31025#define DIDT_DBR_WEIGHT0_3__WEIGHT1__SHIFT 0x8
31026#define DIDT_DBR_WEIGHT0_3__WEIGHT2__SHIFT 0x10
31027#define DIDT_DBR_WEIGHT0_3__WEIGHT3__SHIFT 0x18
31028#define DIDT_DBR_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
31029#define DIDT_DBR_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
31030#define DIDT_DBR_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
31031#define DIDT_DBR_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
31032//DIDT_DBR_WEIGHT4_7
31033#define DIDT_DBR_WEIGHT4_7__WEIGHT4__SHIFT 0x0
31034#define DIDT_DBR_WEIGHT4_7__WEIGHT5__SHIFT 0x8
31035#define DIDT_DBR_WEIGHT4_7__WEIGHT6__SHIFT 0x10
31036#define DIDT_DBR_WEIGHT4_7__WEIGHT7__SHIFT 0x18
31037#define DIDT_DBR_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
31038#define DIDT_DBR_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
31039#define DIDT_DBR_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
31040#define DIDT_DBR_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
31041//DIDT_DBR_WEIGHT8_11
31042#define DIDT_DBR_WEIGHT8_11__WEIGHT8__SHIFT 0x0
31043#define DIDT_DBR_WEIGHT8_11__WEIGHT9__SHIFT 0x8
31044#define DIDT_DBR_WEIGHT8_11__WEIGHT10__SHIFT 0x10
31045#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
31046#define DIDT_DBR_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
31047#define DIDT_DBR_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
31048#define DIDT_DBR_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
31049#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
31050//DIDT_DBR_EDC_CTRL
31051#define DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT 0x0
31052#define DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
31053#define DIDT_DBR_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
31054#define DIDT_DBR_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
31055#define DIDT_DBR_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
31056#define DIDT_DBR_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
31057#define DIDT_DBR_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
31058#define DIDT_DBR_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
31059#define DIDT_DBR_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
31060#define DIDT_DBR_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
31061#define DIDT_DBR_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
31062#define DIDT_DBR_EDC_CTRL__UNUSED_0__SHIFT 0x17
31063#define DIDT_DBR_EDC_CTRL__EDC_EN_MASK 0x00000001L
31064#define DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
31065#define DIDT_DBR_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
31066#define DIDT_DBR_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
31067#define DIDT_DBR_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
31068#define DIDT_DBR_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
31069#define DIDT_DBR_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
31070#define DIDT_DBR_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
31071#define DIDT_DBR_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
31072#define DIDT_DBR_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
31073#define DIDT_DBR_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
31074#define DIDT_DBR_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
31075//DIDT_DBR_EDC_THRESHOLD
31076#define DIDT_DBR_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
31077#define DIDT_DBR_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
31078//DIDT_DBR_EDC_STALL_PATTERN_1_2
31079#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
31080#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
31081#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
31082#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
31083#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
31084#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
31085#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
31086#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
31087//DIDT_DBR_EDC_STALL_PATTERN_3_4
31088#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
31089#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
31090#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
31091#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
31092#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
31093#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
31094#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
31095#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
31096//DIDT_DBR_EDC_STALL_PATTERN_5_6
31097#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
31098#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
31099#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
31100#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
31101#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
31102#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
31103#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
31104#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
31105//DIDT_DBR_EDC_STALL_PATTERN_7
31106#define DIDT_DBR_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
31107#define DIDT_DBR_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
31108#define DIDT_DBR_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
31109#define DIDT_DBR_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
31110//DIDT_DBR_EDC_STATUS
31111#define DIDT_DBR_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
31112#define DIDT_DBR_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
31113#define DIDT_DBR_EDC_STATUS__UNUSED_0__SHIFT 0x4
31114#define DIDT_DBR_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
31115#define DIDT_DBR_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
31116#define DIDT_DBR_EDC_STATUS__UNUSED_0_MASK 0xFFFFFFF0L
31117//DIDT_DBR_EDC_STALL_DELAY_1
31118#define DIDT_DBR_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DBR0__SHIFT 0x0
31119#define DIDT_DBR_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x1
31120#define DIDT_DBR_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DBR0_MASK 0x00000001L
31121#define DIDT_DBR_EDC_STALL_DELAY_1__UNUSED_MASK 0xFFFFFFFEL
31122//DIDT_DBR_EDC_OVERFLOW
31123#define DIDT_DBR_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
31124#define DIDT_DBR_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
31125#define DIDT_DBR_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
31126#define DIDT_DBR_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
31127//DIDT_DBR_EDC_ROLLING_POWER_DELTA
31128#define DIDT_DBR_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
31129#define DIDT_DBR_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
31130//DIDT_SQ_STALL_EVENT_COUNTER
31131#define DIDT_SQ_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
31132#define DIDT_SQ_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
31133//DIDT_DB_STALL_EVENT_COUNTER
31134#define DIDT_DB_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
31135#define DIDT_DB_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
31136//DIDT_TD_STALL_EVENT_COUNTER
31137#define DIDT_TD_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
31138#define DIDT_TD_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
31139//DIDT_TCP_STALL_EVENT_COUNTER
31140#define DIDT_TCP_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
31141#define DIDT_TCP_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
31142//DIDT_DBR_STALL_EVENT_COUNTER
31143#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
31144#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
31145
31146
31147
31148
31149
31150#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h
new file mode 100644
index 000000000000..1445bba8f41f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h
@@ -0,0 +1,1658 @@
1/*
2 * Copyright (C) 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21#ifndef _sdma0_4_1_SH_MASK_HEADER
22#define _sdma0_4_1_SH_MASK_HEADER
23
24
25// addressBlock: sdma0_sdma0dec
26//SDMA0_UCODE_ADDR
27#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
28#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
29//SDMA0_UCODE_DATA
30#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
31#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
32//SDMA0_VM_CNTL
33#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
34#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
35//SDMA0_VM_CTX_LO
36#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
37#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
38//SDMA0_VM_CTX_HI
39#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
40#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
41//SDMA0_ACTIVE_FCN_ID
42#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
43#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
44#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
45#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
46#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
47#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
48//SDMA0_VM_CTX_CNTL
49#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
50#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
51#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
52#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
53//SDMA0_VIRT_RESET_REQ
54#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
55#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
56#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
57#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
58//SDMA0_CONTEXT_REG_TYPE0
59#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0
60#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1
61#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2
62#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3
63#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4
64#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5
65#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6
66#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
67#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
68#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
69#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa
70#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb
71#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc
72#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd
73#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe
74#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf
75#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10
76#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11
77#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12
78#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13
79#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L
80#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L
81#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L
82#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L
83#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L
84#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L
85#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L
86#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
87#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
88#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
89#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L
90#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L
91#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L
92#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L
93#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L
94#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L
95#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L
96#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L
97#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L
98#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L
99//SDMA0_CONTEXT_REG_TYPE1
100#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8
101#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9
102#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa
103#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb
104#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc
105#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd
106#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
107#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf
108#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10
109#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11
110#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
111#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
112#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14
113#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
114#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
115#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L
116#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L
117#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L
118#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L
119#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L
120#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L
121#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
122#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L
123#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L
124#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L
125#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
126#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
127#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L
128#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
129#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
130//SDMA0_CONTEXT_REG_TYPE2
131#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0
132#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1
133#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2
134#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3
135#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4
136#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5
137#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6
138#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7
139#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8
140#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9
141#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
142#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L
143#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L
144#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L
145#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L
146#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L
147#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L
148#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L
149#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L
150#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L
151#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L
152#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
153//SDMA0_CONTEXT_REG_TYPE3
154#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
155#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
156//SDMA0_PUB_REG_TYPE0
157#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0
158#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1
159#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
160#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4
161#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5
162#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6
163#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7
164#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8
165#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9
166#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
167#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb
168#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc
169#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd
170#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe
171#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf
172#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10
173#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11
174#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12
175#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13
176#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
177#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
178#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
179#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b
180#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
181#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
182#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
183#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
184#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L
185#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L
186#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
187#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L
188#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L
189#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L
190#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L
191#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L
192#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L
193#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
194#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L
195#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L
196#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L
197#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L
198#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L
199#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L
200#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L
201#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L
202#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L
203#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
204#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
205#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
206#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L
207#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
208#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
209#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
210#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
211//SDMA0_PUB_REG_TYPE1
212#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0
213#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
214#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2
215#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
216#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
217#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
218#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
219#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7
220#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
221#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
222#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa
223#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
224#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc
225#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd
226#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
227#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
228#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
229#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
230#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
231#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
232#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
233#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
234#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
235#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
236#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
237#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
238#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
239#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
240#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
241#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
242#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e
243#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f
244#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L
245#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
246#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L
247#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
248#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
249#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
250#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
251#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L
252#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
253#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
254#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L
255#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
256#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L
257#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L
258#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
259#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
260#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
261#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
262#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
263#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
264#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
265#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
266#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
267#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
268#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
269#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
270#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
271#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
272#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
273#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
274#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L
275#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L
276//SDMA0_PUB_REG_TYPE2
277#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0
278#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1
279#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2
280#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3
281#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4
282#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5
283#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6
284#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7
285#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8
286#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9
287#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
288#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
289#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
290#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
291#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
292#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
293#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
294#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
295#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
296#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
297#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
298#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE__SHIFT 0x16
299#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17
300#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18
301#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19
302#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
303#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
304#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL__SHIFT 0x1c
305#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
306#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e
307#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
308#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L
309#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L
310#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L
311#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L
312#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L
313#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L
314#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L
315#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L
316#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L
317#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L
318#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
319#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
320#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
321#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
322#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
323#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
324#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
325#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
326#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
327#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
328#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
329#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE_MASK 0x00400000L
330#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L
331#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L
332#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L
333#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
334#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
335#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL_MASK 0x10000000L
336#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
337#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L
338#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
339//SDMA0_PUB_REG_TYPE3
340#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
341#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
342#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
343#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
344#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
345#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
346//SDMA0_MMHUB_CNTL
347#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
348#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
349//SDMA0_CONTEXT_GROUP_BOUNDARY
350#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
351#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
352//SDMA0_POWER_CNTL
353#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
354#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
355#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
356#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3
357#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
358#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
359#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
360#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
361#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
362#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a
363#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
364#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
365#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
366#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
367#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
368#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
369#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
370#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
371#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
372#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
373//SDMA0_CLK_CTRL
374#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0
375#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
376#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc
377#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
378#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
379#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
380#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
381#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
382#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
383#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
384#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
385#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
386#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
387#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L
388#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
389#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
390#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
391#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
392#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
393#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
394#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
395#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
396//SDMA0_CNTL
397#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
398#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1
399#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
400#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
401#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
402#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
403#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
404#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
405#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
406#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
407#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
408#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
409#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
410#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
411#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
412#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
413#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
414#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
415#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
416#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
417#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
418#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
419//SDMA0_CHICKEN_BITS
420#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
421#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
422#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
423#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
424#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
425#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
426#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
427#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
428#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
429#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
430#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
431#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
432#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
433#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
434#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
435#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
436#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
437#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
438#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
439#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
440#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
441#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
442#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
443#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
444#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
445#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
446//SDMA0_GB_ADDR_CONFIG
447#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
448#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
449#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
450#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
451#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
452#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
453#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
454#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
455#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
456#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
457//SDMA0_GB_ADDR_CONFIG_READ
458#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
459#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
460#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
461#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
462#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
463#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
464#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
465#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
466#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
467#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
468//SDMA0_RB_RPTR_FETCH_HI
469#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
470#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
471//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
472#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
473#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
474//SDMA0_RB_RPTR_FETCH
475#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
476#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
477//SDMA0_IB_OFFSET_FETCH
478#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
479#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
480//SDMA0_PROGRAM
481#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
482#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
483//SDMA0_STATUS_REG
484#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
485#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
486#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
487#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
488#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
489#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
490#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
491#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
492#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
493#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
494#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
495#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
496#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
497#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
498#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
499#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
500#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
501#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
502#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
503#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
504#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
505#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
506#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
507#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
508#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
509#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
510#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
511#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
512#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
513#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
514#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
515#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
516#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
517#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
518#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
519#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
520#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
521#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
522#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
523#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
524#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
525#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
526#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
527#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
528#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
529#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
530#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
531#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
532#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
533#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
534#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
535#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
536#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
537#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
538#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
539#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
540#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
541#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
542//SDMA0_STATUS1_REG
543#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
544#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
545#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
546#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
547#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
548#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
549#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
550#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
551#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
552#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
553#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
554#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf
555#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
556#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
557#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
558#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
559#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
560#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
561#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
562#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
563#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
564#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
565#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
566#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
567#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
568#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L
569#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
570#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
571//SDMA0_RD_BURST_CNTL
572#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
573#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
574//SDMA0_HBM_PAGE_CONFIG
575#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
576#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
577//SDMA0_UCODE_CHECKSUM
578#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
579#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
580//SDMA0_F32_CNTL
581#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
582#define SDMA0_F32_CNTL__STEP__SHIFT 0x1
583#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
584#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L
585//SDMA0_FREEZE
586#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
587#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
588#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
589#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
590#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
591#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
592#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
593#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
594//SDMA0_PHASE0_QUANTUM
595#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0
596#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8
597#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
598#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
599#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
600#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
601//SDMA0_PHASE1_QUANTUM
602#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0
603#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8
604#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
605#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
606#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
607#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
608//SDMA_POWER_GATING
609#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0
610#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1
611#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2
612#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3
613#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
614#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L
615#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L
616#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L
617#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L
618#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
619//SDMA_PGFSM_CONFIG
620#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
621#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
622#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
623#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
624#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
625#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
626#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
627#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
628#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
629#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
630#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
631#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
632#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
633#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
634#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
635#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
636#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
637#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
638//SDMA_PGFSM_WRITE
639#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
640#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
641//SDMA_PGFSM_READ
642#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
643#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
644//SDMA0_EDC_CONFIG
645#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
646#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
647#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
648#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
649//SDMA0_BA_THRESHOLD
650#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
651#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
652#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
653#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
654//SDMA0_ID
655#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
656#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
657//SDMA0_VERSION
658#define SDMA0_VERSION__MINVER__SHIFT 0x0
659#define SDMA0_VERSION__MAJVER__SHIFT 0x8
660#define SDMA0_VERSION__REV__SHIFT 0x10
661#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
662#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
663#define SDMA0_VERSION__REV_MASK 0x003F0000L
664//SDMA0_EDC_COUNTER
665#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
666#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
667#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
668#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
669#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
670#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
671#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
672#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
673#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
674#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
675#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
676#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
677#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
678#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
679#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
680#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
681#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
682#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
683#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
684#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
685#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
686#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
687#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
688#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
689#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
690#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
691#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
692#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
693#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
694#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
695#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
696#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
697#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
698#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
699//SDMA0_EDC_COUNTER_CLEAR
700#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
701#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
702//SDMA0_STATUS2_REG
703#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
704#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
705#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
706#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
707#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
708#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
709//SDMA0_ATOMIC_CNTL
710#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
711#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
712#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
713#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
714//SDMA0_ATOMIC_PREOP_LO
715#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
716#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
717//SDMA0_ATOMIC_PREOP_HI
718#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
719#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
720//SDMA0_UTCL1_CNTL
721#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
722#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
723#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
724#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
725#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
726#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
727#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
728#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
729#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
730#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
731#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
732#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
733//SDMA0_UTCL1_WATERMK
734#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
735#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0xa
736#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x12
737#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x1a
738#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000003FFL
739#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0003FC00L
740#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x03FC0000L
741#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFC000000L
742//SDMA0_UTCL1_RD_STATUS
743#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
744#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
745#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
746#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
747#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
748#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
749#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
750#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
751#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
752#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
753#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
754#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
755#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
756#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
757#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
758#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
759#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
760#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
761#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
762#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
763#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
764#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
765#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
766#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
767#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
768#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
769#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
770#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
771#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
772#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
773#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
774#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
775#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
776#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
777#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
778#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
779#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
780#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
781#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
782#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
783#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
784#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
785#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
786#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
787#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
788#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
789#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
790#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
791#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
792#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
793#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
794#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
795#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
796#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
797//SDMA0_UTCL1_WR_STATUS
798#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
799#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
800#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
801#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
802#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
803#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
804#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
805#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
806#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
807#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
808#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
809#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
810#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
811#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
812#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
813#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
814#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
815#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
816#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
817#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
818#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
819#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
820#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
821#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
822#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
823#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
824#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
825#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
826#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
827#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
828#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
829#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
830#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
831#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
832#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
833#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
834#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
835#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
836#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
837#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
838#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
839#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
840#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
841#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
842#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
843#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
844#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
845#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
846#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
847#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
848#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
849#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
850#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
851#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
852#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
853#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
854//SDMA0_UTCL1_INV0
855#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
856#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
857#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
858#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
859#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
860#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
861#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
862#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
863#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
864#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
865#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
866#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
867#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
868#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
869#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
870#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
871#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
872#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
873#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
874#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
875#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
876#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
877#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
878#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
879#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
880#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
881#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
882#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
883//SDMA0_UTCL1_INV1
884#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
885#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
886//SDMA0_UTCL1_INV2
887#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
888#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
889//SDMA0_UTCL1_RD_XNACK0
890#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
891#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
892//SDMA0_UTCL1_RD_XNACK1
893#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
894#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
895#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
896#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
897#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
898#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
899#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
900#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
901//SDMA0_UTCL1_WR_XNACK0
902#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
903#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
904//SDMA0_UTCL1_WR_XNACK1
905#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
906#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
907#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
908#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
909#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
910#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
911#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
912#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
913//SDMA0_UTCL1_TIMEOUT
914#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
915#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
916#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
917#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
918//SDMA0_UTCL1_PAGE
919#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
920#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
921#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
922#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
923#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
924#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
925#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
926#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
927//SDMA0_POWER_CNTL_IDLE
928#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
929#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
930#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
931#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
932#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
933#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
934//SDMA0_RELAX_ORDERING_LUT
935#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
936#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
937#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
938#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
939#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
940#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
941#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
942#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
943#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
944#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
945#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
946#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
947#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
948#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
949#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
950#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
951#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
952#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
953#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
954#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
955#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
956#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
957#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
958#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
959#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
960#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
961#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
962#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
963#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
964#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
965#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
966#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
967#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
968#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
969#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
970#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
971#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
972#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
973//SDMA0_CHICKEN_BITS_2
974#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
975#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
976//SDMA0_STATUS3_REG
977#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
978#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
979#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
980#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
981#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
982#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
983//SDMA0_PHYSICAL_ADDR_LO
984#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
985#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
986#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
987#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
988#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
989#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
990#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
991#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
992//SDMA0_PHYSICAL_ADDR_HI
993#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
994#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
995//SDMA0_ERROR_LOG
996#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
997#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
998#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
999#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
1000//SDMA0_PUB_DUMMY_REG0
1001#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
1002#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
1003//SDMA0_PUB_DUMMY_REG1
1004#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
1005#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
1006//SDMA0_PUB_DUMMY_REG2
1007#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
1008#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
1009//SDMA0_PUB_DUMMY_REG3
1010#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
1011#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
1012//SDMA0_F32_COUNTER
1013#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
1014#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
1015//SDMA0_UNBREAKABLE
1016#define SDMA0_UNBREAKABLE__VALUE__SHIFT 0x0
1017#define SDMA0_UNBREAKABLE__VALUE_MASK 0x00000001L
1018//SDMA0_PERFMON_CNTL
1019#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
1020#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
1021#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
1022#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
1023#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
1024#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
1025#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
1026#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
1027#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
1028#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
1029#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
1030#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
1031//SDMA0_PERFCOUNTER0_RESULT
1032#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
1033#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
1034//SDMA0_PERFCOUNTER1_RESULT
1035#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
1036#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
1037//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE
1038#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
1039#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
1040#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
1041#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
1042#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
1043#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
1044//SDMA0_CRD_CNTL
1045#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
1046#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
1047#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
1048#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
1049//SDMA0_MMHUB_TRUSTLVL
1050#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
1051#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x3
1052#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x6
1053#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0x9
1054#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0xc
1055#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0xf
1056#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x12
1057#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x15
1058#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x00000007L
1059#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x00000038L
1060#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x000001C0L
1061#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x00000E00L
1062#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x00007000L
1063#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00038000L
1064#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x001C0000L
1065#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7_MASK 0x00E00000L
1066//SDMA0_GPU_IOV_VIOLATION_LOG
1067#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
1068#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
1069#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
1070#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
1071#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
1072#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
1073#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
1074#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
1075#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
1076#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
1077#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
1078#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
1079#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
1080#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
1081//SDMA0_ULV_CNTL
1082#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0
1083#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
1084#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
1085#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
1086#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
1087#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
1088#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
1089#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
1090//SDMA0_EA_DBIT_ADDR_DATA
1091#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
1092#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
1093//SDMA0_EA_DBIT_ADDR_INDEX
1094#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
1095#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
1096//SDMA0_GFX_RB_CNTL
1097#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
1098#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
1099#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
1100#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
1101#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
1102#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
1103#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
1104#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
1105#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
1106#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000007EL
1107#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
1108#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
1109#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
1110#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
1111#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
1112#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
1113//SDMA0_GFX_RB_BASE
1114#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0
1115#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
1116//SDMA0_GFX_RB_BASE_HI
1117#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
1118#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
1119//SDMA0_GFX_RB_RPTR
1120#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0
1121#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
1122//SDMA0_GFX_RB_RPTR_HI
1123#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
1124#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
1125//SDMA0_GFX_RB_WPTR
1126#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0
1127#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
1128//SDMA0_GFX_RB_WPTR_HI
1129#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
1130#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
1131//SDMA0_GFX_RB_WPTR_POLL_CNTL
1132#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
1133#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
1134#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
1135#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
1136#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
1137#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
1138#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
1139#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
1140#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
1141#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
1142//SDMA0_GFX_RB_RPTR_ADDR_HI
1143#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
1144#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1145//SDMA0_GFX_RB_RPTR_ADDR_LO
1146#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
1147#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1148//SDMA0_GFX_IB_CNTL
1149#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
1150#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
1151#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
1152#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
1153#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
1154#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
1155#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
1156#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
1157//SDMA0_GFX_IB_RPTR
1158#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2
1159#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
1160//SDMA0_GFX_IB_OFFSET
1161#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
1162#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
1163//SDMA0_GFX_IB_BASE_LO
1164#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
1165#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
1166//SDMA0_GFX_IB_BASE_HI
1167#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
1168#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
1169//SDMA0_GFX_IB_SIZE
1170#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0
1171#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
1172//SDMA0_GFX_SKIP_CNTL
1173#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
1174#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
1175//SDMA0_GFX_CONTEXT_STATUS
1176#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
1177#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
1178#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
1179#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
1180#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
1181#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
1182#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
1183#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
1184#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
1185#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
1186#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
1187#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
1188#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
1189#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
1190#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
1191#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
1192//SDMA0_GFX_DOORBELL
1193#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c
1194#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
1195#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L
1196#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
1197//SDMA0_GFX_CONTEXT_CNTL
1198#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
1199#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
1200//SDMA0_GFX_STATUS
1201#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
1202#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
1203#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
1204#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
1205//SDMA0_GFX_DOORBELL_LOG
1206#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
1207#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
1208#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
1209#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
1210//SDMA0_GFX_WATERMARK
1211#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
1212#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
1213#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
1214#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
1215//SDMA0_GFX_DOORBELL_OFFSET
1216#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
1217#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
1218//SDMA0_GFX_CSA_ADDR_LO
1219#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
1220#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1221//SDMA0_GFX_CSA_ADDR_HI
1222#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
1223#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1224//SDMA0_GFX_IB_SUB_REMAIN
1225#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
1226#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
1227//SDMA0_GFX_PREEMPT
1228#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
1229#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
1230//SDMA0_GFX_DUMMY_REG
1231#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
1232#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
1233//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI
1234#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
1235#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1236//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO
1237#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
1238#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1239//SDMA0_GFX_RB_AQL_CNTL
1240#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
1241#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
1242#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
1243#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
1244#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
1245#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
1246//SDMA0_GFX_MINOR_PTR_UPDATE
1247#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
1248#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
1249//SDMA0_GFX_MIDCMD_DATA0
1250#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
1251#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
1252//SDMA0_GFX_MIDCMD_DATA1
1253#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
1254#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
1255//SDMA0_GFX_MIDCMD_DATA2
1256#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
1257#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
1258//SDMA0_GFX_MIDCMD_DATA3
1259#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
1260#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
1261//SDMA0_GFX_MIDCMD_DATA4
1262#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
1263#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
1264//SDMA0_GFX_MIDCMD_DATA5
1265#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
1266#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
1267//SDMA0_GFX_MIDCMD_DATA6
1268#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
1269#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
1270//SDMA0_GFX_MIDCMD_DATA7
1271#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
1272#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
1273//SDMA0_GFX_MIDCMD_DATA8
1274#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
1275#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
1276//SDMA0_GFX_MIDCMD_CNTL
1277#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
1278#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
1279#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
1280#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
1281#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
1282#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
1283#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
1284#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
1285//SDMA0_RLC0_RB_CNTL
1286#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
1287#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
1288#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
1289#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
1290#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
1291#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
1292#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
1293#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
1294#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
1295#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000007EL
1296#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
1297#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
1298#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
1299#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
1300#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
1301#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
1302//SDMA0_RLC0_RB_BASE
1303#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0
1304#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
1305//SDMA0_RLC0_RB_BASE_HI
1306#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
1307#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
1308//SDMA0_RLC0_RB_RPTR
1309#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
1310#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
1311//SDMA0_RLC0_RB_RPTR_HI
1312#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
1313#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
1314//SDMA0_RLC0_RB_WPTR
1315#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
1316#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
1317//SDMA0_RLC0_RB_WPTR_HI
1318#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
1319#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
1320//SDMA0_RLC0_RB_WPTR_POLL_CNTL
1321#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
1322#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
1323#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
1324#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
1325#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
1326#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
1327#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
1328#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
1329#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
1330#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
1331//SDMA0_RLC0_RB_RPTR_ADDR_HI
1332#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
1333#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1334//SDMA0_RLC0_RB_RPTR_ADDR_LO
1335#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
1336#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1337//SDMA0_RLC0_IB_CNTL
1338#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
1339#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
1340#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
1341#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
1342#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
1343#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
1344#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
1345#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
1346//SDMA0_RLC0_IB_RPTR
1347#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
1348#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
1349//SDMA0_RLC0_IB_OFFSET
1350#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
1351#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
1352//SDMA0_RLC0_IB_BASE_LO
1353#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
1354#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
1355//SDMA0_RLC0_IB_BASE_HI
1356#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
1357#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
1358//SDMA0_RLC0_IB_SIZE
1359#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0
1360#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
1361//SDMA0_RLC0_SKIP_CNTL
1362#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
1363#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
1364//SDMA0_RLC0_CONTEXT_STATUS
1365#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
1366#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
1367#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
1368#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
1369#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
1370#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
1371#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
1372#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
1373#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
1374#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
1375#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
1376#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
1377#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
1378#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
1379#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
1380#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
1381//SDMA0_RLC0_DOORBELL
1382#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
1383#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
1384#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
1385#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
1386//SDMA0_RLC0_STATUS
1387#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
1388#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
1389#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
1390#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
1391//SDMA0_RLC0_DOORBELL_LOG
1392#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
1393#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
1394#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
1395#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
1396//SDMA0_RLC0_WATERMARK
1397#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
1398#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
1399#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
1400#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
1401//SDMA0_RLC0_DOORBELL_OFFSET
1402#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
1403#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
1404//SDMA0_RLC0_CSA_ADDR_LO
1405#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
1406#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1407//SDMA0_RLC0_CSA_ADDR_HI
1408#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
1409#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1410//SDMA0_RLC0_IB_SUB_REMAIN
1411#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
1412#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
1413//SDMA0_RLC0_PREEMPT
1414#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
1415#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
1416//SDMA0_RLC0_DUMMY_REG
1417#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
1418#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
1419//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI
1420#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
1421#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1422//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO
1423#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
1424#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1425//SDMA0_RLC0_RB_AQL_CNTL
1426#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
1427#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
1428#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
1429#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
1430#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
1431#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
1432//SDMA0_RLC0_MINOR_PTR_UPDATE
1433#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
1434#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
1435//SDMA0_RLC0_MIDCMD_DATA0
1436#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
1437#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
1438//SDMA0_RLC0_MIDCMD_DATA1
1439#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
1440#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
1441//SDMA0_RLC0_MIDCMD_DATA2
1442#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
1443#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
1444//SDMA0_RLC0_MIDCMD_DATA3
1445#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
1446#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
1447//SDMA0_RLC0_MIDCMD_DATA4
1448#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
1449#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
1450//SDMA0_RLC0_MIDCMD_DATA5
1451#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
1452#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
1453//SDMA0_RLC0_MIDCMD_DATA6
1454#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
1455#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
1456//SDMA0_RLC0_MIDCMD_DATA7
1457#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
1458#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
1459//SDMA0_RLC0_MIDCMD_DATA8
1460#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
1461#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
1462//SDMA0_RLC0_MIDCMD_CNTL
1463#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
1464#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
1465#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
1466#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
1467#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
1468#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
1469#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
1470#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
1471//SDMA0_RLC1_RB_CNTL
1472#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
1473#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
1474#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
1475#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
1476#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
1477#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
1478#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
1479#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
1480#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
1481#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000007EL
1482#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
1483#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
1484#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
1485#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
1486#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
1487#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
1488//SDMA0_RLC1_RB_BASE
1489#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0
1490#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
1491//SDMA0_RLC1_RB_BASE_HI
1492#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
1493#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
1494//SDMA0_RLC1_RB_RPTR
1495#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
1496#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
1497//SDMA0_RLC1_RB_RPTR_HI
1498#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
1499#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
1500//SDMA0_RLC1_RB_WPTR
1501#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
1502#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
1503//SDMA0_RLC1_RB_WPTR_HI
1504#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
1505#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
1506//SDMA0_RLC1_RB_WPTR_POLL_CNTL
1507#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
1508#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
1509#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
1510#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
1511#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
1512#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
1513#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
1514#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
1515#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
1516#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
1517//SDMA0_RLC1_RB_RPTR_ADDR_HI
1518#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
1519#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1520//SDMA0_RLC1_RB_RPTR_ADDR_LO
1521#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
1522#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1523//SDMA0_RLC1_IB_CNTL
1524#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
1525#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
1526#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
1527#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
1528#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
1529#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
1530#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
1531#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
1532//SDMA0_RLC1_IB_RPTR
1533#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
1534#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
1535//SDMA0_RLC1_IB_OFFSET
1536#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
1537#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
1538//SDMA0_RLC1_IB_BASE_LO
1539#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
1540#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
1541//SDMA0_RLC1_IB_BASE_HI
1542#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
1543#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
1544//SDMA0_RLC1_IB_SIZE
1545#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0
1546#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
1547//SDMA0_RLC1_SKIP_CNTL
1548#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
1549#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
1550//SDMA0_RLC1_CONTEXT_STATUS
1551#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
1552#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
1553#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
1554#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
1555#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
1556#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
1557#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
1558#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
1559#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
1560#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
1561#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
1562#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
1563#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
1564#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
1565#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
1566#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
1567//SDMA0_RLC1_DOORBELL
1568#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
1569#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
1570#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
1571#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
1572//SDMA0_RLC1_STATUS
1573#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
1574#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
1575#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
1576#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
1577//SDMA0_RLC1_DOORBELL_LOG
1578#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
1579#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
1580#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
1581#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
1582//SDMA0_RLC1_WATERMARK
1583#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
1584#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
1585#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
1586#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
1587//SDMA0_RLC1_DOORBELL_OFFSET
1588#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
1589#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
1590//SDMA0_RLC1_CSA_ADDR_LO
1591#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
1592#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1593//SDMA0_RLC1_CSA_ADDR_HI
1594#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
1595#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1596//SDMA0_RLC1_IB_SUB_REMAIN
1597#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
1598#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
1599//SDMA0_RLC1_PREEMPT
1600#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
1601#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
1602//SDMA0_RLC1_DUMMY_REG
1603#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
1604#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
1605//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI
1606#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
1607#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
1608//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO
1609#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
1610#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
1611//SDMA0_RLC1_RB_AQL_CNTL
1612#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
1613#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
1614#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
1615#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
1616#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
1617#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
1618//SDMA0_RLC1_MINOR_PTR_UPDATE
1619#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
1620#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
1621//SDMA0_RLC1_MIDCMD_DATA0
1622#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
1623#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
1624//SDMA0_RLC1_MIDCMD_DATA1
1625#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
1626#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
1627//SDMA0_RLC1_MIDCMD_DATA2
1628#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
1629#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
1630//SDMA0_RLC1_MIDCMD_DATA3
1631#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
1632#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
1633//SDMA0_RLC1_MIDCMD_DATA4
1634#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
1635#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
1636//SDMA0_RLC1_MIDCMD_DATA5
1637#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
1638#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
1639//SDMA0_RLC1_MIDCMD_DATA6
1640#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
1641#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
1642//SDMA0_RLC1_MIDCMD_DATA7
1643#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
1644#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
1645//SDMA0_RLC1_MIDCMD_DATA8
1646#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
1647#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
1648//SDMA0_RLC1_MIDCMD_CNTL
1649#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
1650#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
1651#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
1652#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
1653#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
1654#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
1655#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
1656#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
1657
1658#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index b89347ed1a40..f35aba72e640 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -1246,5 +1246,6 @@
1246#define ixGC_CAC_OVRD_CU 0xe7 1246#define ixGC_CAC_OVRD_CU 0xe7
1247#define ixCURRENT_PG_STATUS 0xc020029c 1247#define ixCURRENT_PG_STATUS 0xc020029c
1248#define ixCURRENT_PG_STATUS_APU 0xd020029c 1248#define ixCURRENT_PG_STATUS_APU 0xd020029c
1249#define ixPWR_SVI2_STATUS 0xC0200294
1249 1250
1250#endif /* SMU_7_1_3_D_H */ 1251#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 654c1093d362..481ee6560aa9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -6078,6 +6078,8 @@
6078#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10 6078#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
6079#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 6079#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
6080#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 6080#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
6081 6081#define PWR_SVI2_STATUS__PLANE1_VID_MASK 0x000000ff
6082 6082#define PWR_SVI2_STATUS__PLANE1_VID__SHIFT 0x00000000
6083#define PWR_SVI2_STATUS__PLANE2_VID_MASK 0x0000ff00
6084#define PWR_SVI2_STATUS__PLANE2_VID__SHIFT 0x00000008
6083#endif /* SMU_7_1_3_SH_MASK_H */ 6085#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
index c1006fe58daa..efd2704d0f8f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
@@ -172,4 +172,7 @@
172#define mmROM_SW_DATA_64 0x006d 172#define mmROM_SW_DATA_64 0x006d
173#define mmROM_SW_DATA_64_BASE_IDX 0 173#define mmROM_SW_DATA_64_BASE_IDX 0
174 174
175#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 0
176#define mmSMUSVI0_PLANE0_CURRENTVID 0x0013
177
175#endif 178#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
index a0be5c9bfc10..2487ab9621e9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
@@ -254,5 +254,8 @@
254//ROM_SW_DATA_64 254//ROM_SW_DATA_64
255#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 255#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
256#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL 256#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL
257/* SMUSVI0_PLANE0_CURRENTVID */
258#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
259#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
257 260
258#endif 261#endif
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 675988d56392..860221924ef7 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -29,18 +29,6 @@
29struct cgs_device; 29struct cgs_device;
30 30
31/** 31/**
32 * enum cgs_gpu_mem_type - GPU memory types
33 */
34enum cgs_gpu_mem_type {
35 CGS_GPU_MEM_TYPE__VISIBLE_FB,
36 CGS_GPU_MEM_TYPE__INVISIBLE_FB,
37 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
38 CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
39 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
40 CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
41};
42
43/**
44 * enum cgs_ind_reg - Indirect register spaces 32 * enum cgs_ind_reg - Indirect register spaces
45 */ 33 */
46enum cgs_ind_reg { 34enum cgs_ind_reg {
@@ -88,32 +76,6 @@ enum cgs_ucode_id {
88 CGS_UCODE_ID_MAXIMUM, 76 CGS_UCODE_ID_MAXIMUM,
89}; 77};
90 78
91enum cgs_system_info_id {
92 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
93 CGS_SYSTEM_INFO_PCIE_GEN_INFO,
94 CGS_SYSTEM_INFO_PCIE_MLW,
95 CGS_SYSTEM_INFO_PCIE_DEV,
96 CGS_SYSTEM_INFO_PCIE_REV,
97 CGS_SYSTEM_INFO_CG_FLAGS,
98 CGS_SYSTEM_INFO_PG_FLAGS,
99 CGS_SYSTEM_INFO_GFX_CU_INFO,
100 CGS_SYSTEM_INFO_GFX_SE_INFO,
101 CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
102 CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
103 CGS_SYSTEM_INFO_PCIE_BUS_DEVFN,
104 CGS_SYSTEM_INFO_ID_MAXIMUM,
105};
106
107struct cgs_system_info {
108 uint64_t size;
109 enum cgs_system_info_id info_id;
110 union {
111 void *ptr;
112 uint64_t value;
113 };
114 uint64_t padding[13];
115};
116
117/* 79/*
118 * enum cgs_resource_type - GPU resource type 80 * enum cgs_resource_type - GPU resource type
119 */ 81 */
@@ -156,121 +118,6 @@ struct cgs_display_info {
156 118
157typedef unsigned long cgs_handle_t; 119typedef unsigned long cgs_handle_t;
158 120
159#define CGS_ACPI_METHOD_ATCS 0x53435441
160#define CGS_ACPI_METHOD_ATIF 0x46495441
161#define CGS_ACPI_METHOD_ATPX 0x58505441
162#define CGS_ACPI_FIELD_METHOD_NAME 0x00000001
163#define CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT 0x00000002
164#define CGS_ACPI_MAX_BUFFER_SIZE 256
165#define CGS_ACPI_TYPE_ANY 0x00
166#define CGS_ACPI_TYPE_INTEGER 0x01
167#define CGS_ACPI_TYPE_STRING 0x02
168#define CGS_ACPI_TYPE_BUFFER 0x03
169#define CGS_ACPI_TYPE_PACKAGE 0x04
170
171struct cgs_acpi_method_argument {
172 uint32_t type;
173 uint32_t data_length;
174 union{
175 uint32_t value;
176 void *pointer;
177 };
178};
179
180struct cgs_acpi_method_info {
181 uint32_t size;
182 uint32_t field;
183 uint32_t input_count;
184 uint32_t name;
185 struct cgs_acpi_method_argument *pinput_argument;
186 uint32_t output_count;
187 struct cgs_acpi_method_argument *poutput_argument;
188 uint32_t padding[9];
189};
190
191/**
192 * cgs_alloc_gpu_mem() - Allocate GPU memory
193 * @cgs_device: opaque device handle
194 * @type: memory type
195 * @size: size in bytes
196 * @align: alignment in bytes
197 * @handle: memory handle (output)
198 *
199 * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
200 * memory allocation. This guarantees that the MC address returned by
201 * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
202 * FB memory types may be GART mapped depending on memory
203 * fragmentation and memory allocator policies.
204 *
205 * If min/max_offset are non-0, the allocation will be forced to
206 * reside between these offsets in its respective memory heap. The
207 * base address that the offset relates to, depends on the memory
208 * type.
209 *
210 * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
211 * - CGS_GPU_MEM_TYPE__GART_*: GART aperture base address
212 * - others: undefined, don't use with max_offset
213 *
214 * Return: 0 on success, -errno otherwise
215 */
216typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
217 uint64_t size, uint64_t align,
218 cgs_handle_t *handle);
219
220/**
221 * cgs_free_gpu_mem() - Free GPU memory
222 * @cgs_device: opaque device handle
223 * @handle: memory handle returned by alloc or import
224 *
225 * Return: 0 on success, -errno otherwise
226 */
227typedef int (*cgs_free_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
228
229/**
230 * cgs_gmap_gpu_mem() - GPU-map GPU memory
231 * @cgs_device: opaque device handle
232 * @handle: memory handle returned by alloc or import
233 * @mcaddr: MC address (output)
234 *
235 * Ensures that a buffer is GPU accessible and returns its MC address.
236 *
237 * Return: 0 on success, -errno otherwise
238 */
239typedef int (*cgs_gmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
240 uint64_t *mcaddr);
241
242/**
243 * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
244 * @cgs_device: opaque device handle
245 * @handle: memory handle returned by alloc or import
246 *
247 * Allows the buffer to be migrated while it's not used by the GPU.
248 *
249 * Return: 0 on success, -errno otherwise
250 */
251typedef int (*cgs_gunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
252
253/**
254 * cgs_kmap_gpu_mem() - Kernel-map GPU memory
255 *
256 * @cgs_device: opaque device handle
257 * @handle: memory handle returned by alloc or import
258 * @map: Kernel virtual address the memory was mapped to (output)
259 *
260 * Return: 0 on success, -errno otherwise
261 */
262typedef int (*cgs_kmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
263 void **map);
264
265/**
266 * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
267 * @cgs_device: opaque device handle
268 * @handle: memory handle returned by alloc or import
269 *
270 * Return: 0 on success, -errno otherwise
271 */
272typedef int (*cgs_kunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
273
274/** 121/**
275 * cgs_read_register() - Read an MMIO register 122 * cgs_read_register() - Read an MMIO register
276 * @cgs_device: opaque device handle 123 * @cgs_device: opaque device handle
@@ -406,35 +253,13 @@ typedef int(*cgs_get_active_displays_info)(
406 253
407typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled); 254typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
408 255
409typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
410 uint32_t acpi_method,
411 uint32_t acpi_function,
412 void *pinput, void *poutput,
413 uint32_t output_count,
414 uint32_t input_size,
415 uint32_t output_size);
416
417typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
418 struct cgs_system_info *sys_info);
419
420typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device); 256typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
421 257
422typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en); 258typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
423 259
424typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock); 260typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
425 261
426struct amd_pp_init;
427typedef void* (*cgs_register_pp_handle)(struct cgs_device *cgs_device,
428 int (*call_back_func)(struct amd_pp_init *, void **));
429
430struct cgs_ops { 262struct cgs_ops {
431 /* memory management calls (similar to KFD interface) */
432 cgs_alloc_gpu_mem_t alloc_gpu_mem;
433 cgs_free_gpu_mem_t free_gpu_mem;
434 cgs_gmap_gpu_mem_t gmap_gpu_mem;
435 cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
436 cgs_kmap_gpu_mem_t kmap_gpu_mem;
437 cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
438 /* MMIO access */ 263 /* MMIO access */
439 cgs_read_register_t read_register; 264 cgs_read_register_t read_register;
440 cgs_write_register_t write_register; 265 cgs_write_register_t write_register;
@@ -456,14 +281,9 @@ struct cgs_ops {
456 cgs_get_active_displays_info get_active_displays_info; 281 cgs_get_active_displays_info get_active_displays_info;
457 /* notify dpm enabled */ 282 /* notify dpm enabled */
458 cgs_notify_dpm_enabled notify_dpm_enabled; 283 cgs_notify_dpm_enabled notify_dpm_enabled;
459 /* ACPI */
460 cgs_call_acpi_method call_acpi_method;
461 /* get system info */
462 cgs_query_system_info query_system_info;
463 cgs_is_virtualization_enabled_t is_virtualization_enabled; 284 cgs_is_virtualization_enabled_t is_virtualization_enabled;
464 cgs_enter_safe_mode enter_safe_mode; 285 cgs_enter_safe_mode enter_safe_mode;
465 cgs_lock_grbm_idx lock_grbm_idx; 286 cgs_lock_grbm_idx lock_grbm_idx;
466 cgs_register_pp_handle register_pp_handle;
467}; 287};
468 288
469struct cgs_os_ops; /* To be define in OS-specific CGS header */ 289struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -482,19 +302,6 @@ struct cgs_device
482#define CGS_OS_CALL(func,dev,...) \ 302#define CGS_OS_CALL(func,dev,...) \
483 (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__)) 303 (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
484 304
485#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \
486 CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle)
487#define cgs_free_gpu_mem(dev,handle) \
488 CGS_CALL(free_gpu_mem,dev,handle)
489#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
490 CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
491#define cgs_gunmap_gpu_mem(dev,handle) \
492 CGS_CALL(gunmap_gpu_mem,dev,handle)
493#define cgs_kmap_gpu_mem(dev,handle,map) \
494 CGS_CALL(kmap_gpu_mem,dev,handle,map)
495#define cgs_kunmap_gpu_mem(dev,handle) \
496 CGS_CALL(kunmap_gpu_mem,dev,handle)
497
498#define cgs_read_register(dev,offset) \ 305#define cgs_read_register(dev,offset) \
499 CGS_CALL(read_register,dev,offset) 306 CGS_CALL(read_register,dev,offset)
500#define cgs_write_register(dev,offset,value) \ 307#define cgs_write_register(dev,offset,value) \
@@ -525,10 +332,6 @@ struct cgs_device
525#define cgs_get_active_displays_info(dev, info) \ 332#define cgs_get_active_displays_info(dev, info) \
526 CGS_CALL(get_active_displays_info, dev, info) 333 CGS_CALL(get_active_displays_info, dev, info)
527 334
528#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
529 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
530#define cgs_query_system_info(dev, sys_info) \
531 CGS_CALL(query_system_info, dev, sys_info)
532#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \ 335#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \
533 resource_base) \ 336 resource_base) \
534 CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \ 337 CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
@@ -542,7 +345,6 @@ struct cgs_device
542 345
543#define cgs_lock_grbm_idx(cgs_device, lock) \ 346#define cgs_lock_grbm_idx(cgs_device, lock) \
544 CGS_CALL(lock_grbm_idx, cgs_device, lock) 347 CGS_CALL(lock_grbm_idx, cgs_device, lock)
545#define cgs_register_pp_handle(cgs_device, call_back_func) \ 348
546 CGS_CALL(register_pp_handle, cgs_device, call_back_func)
547 349
548#endif /* _CGS_COMMON_H */ 350#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a6752bd0c871..1e5c22ceb256 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -30,6 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/bitmap.h> 32#include <linux/bitmap.h>
33#include <linux/dma-fence.h>
33 34
34struct pci_dev; 35struct pci_dev;
35 36
@@ -107,6 +108,12 @@ struct kgd2kfd_shared_resources {
107 108
108 /* Number of bytes at start of aperture reserved for KGD. */ 109 /* Number of bytes at start of aperture reserved for KGD. */
109 size_t doorbell_start_offset; 110 size_t doorbell_start_offset;
111
112 /* GPUVM address space size in bytes */
113 uint64_t gpuvm_size;
114
115 /* Minor device number of the render node */
116 int drm_render_minor;
110}; 117};
111 118
112struct tile_config { 119struct tile_config {
@@ -120,6 +127,25 @@ struct tile_config {
120 uint32_t num_ranks; 127 uint32_t num_ranks;
121}; 128};
122 129
130
131/*
132 * Allocation flag domains
133 */
134#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
135#define ALLOC_MEM_FLAGS_GTT (1 << 1)
136#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
137#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
138
139/*
140 * Allocation flags attributes/access options.
141 */
142#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
143#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
144#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
145#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
146#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
147#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
148
123/** 149/**
124 * struct kfd2kgd_calls 150 * struct kfd2kgd_calls
125 * 151 *
@@ -179,6 +205,45 @@ struct tile_config {
179 * 205 *
180 * @get_vram_usage: Returns current VRAM usage 206 * @get_vram_usage: Returns current VRAM usage
181 * 207 *
208 * @create_process_vm: Create a VM address space for a given process and GPU
209 *
210 * @destroy_process_vm: Destroy a VM
211 *
212 * @get_process_page_dir: Get physical address of a VM page directory
213 *
214 * @set_vm_context_page_table_base: Program page table base for a VMID
215 *
216 * @alloc_memory_of_gpu: Allocate GPUVM memory
217 *
218 * @free_memory_of_gpu: Free GPUVM memory
219 *
220 * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
221 * space. Allocates and updates page tables and page directories as
222 * needed. This function may return before all page table updates have
223 * completed. This allows multiple map operations (on multiple GPUs)
224 * to happen concurrently. Use sync_memory to synchronize with all
225 * pending updates.
226 *
227 * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
228 *
229 * @sync_memory: Wait for pending page table updates to complete
230 *
231 * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
232 * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
233 * The kernel virtual address remains valid until the BO is freed.
234 *
235 * @restore_process_bos: Restore all BOs that belong to the
236 * process. This is intended for restoring memory mappings after a TTM
237 * eviction.
238 *
239 * @invalidate_tlbs: Invalidate TLBs for a specific PASID
240 *
241 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
242 *
243 * @submit_ib: Submits an IB to the engine specified by inserting the
244 * IB to the corresponding ring (ring type). The IB is executed with the
245 * specified VMID in a user mode context.
246 *
182 * This structure contains function pointers to services that the kgd driver 247 * This structure contains function pointers to services that the kgd driver
183 * provides to amdkfd driver. 248 * provides to amdkfd driver.
184 * 249 *
@@ -258,8 +323,6 @@ struct kfd2kgd_calls {
258 uint16_t (*get_atc_vmid_pasid_mapping_pasid)( 323 uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
259 struct kgd_dev *kgd, 324 struct kgd_dev *kgd,
260 uint8_t vmid); 325 uint8_t vmid);
261 void (*write_vmid_invalidate_request)(struct kgd_dev *kgd,
262 uint8_t vmid);
263 326
264 uint16_t (*get_fw_version)(struct kgd_dev *kgd, 327 uint16_t (*get_fw_version)(struct kgd_dev *kgd,
265 enum kgd_engine_type type); 328 enum kgd_engine_type type);
@@ -270,6 +333,33 @@ struct kfd2kgd_calls {
270 void (*get_cu_info)(struct kgd_dev *kgd, 333 void (*get_cu_info)(struct kgd_dev *kgd,
271 struct kfd_cu_info *cu_info); 334 struct kfd_cu_info *cu_info);
272 uint64_t (*get_vram_usage)(struct kgd_dev *kgd); 335 uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
336
337 int (*create_process_vm)(struct kgd_dev *kgd, void **vm,
338 void **process_info, struct dma_fence **ef);
339 void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
340 uint32_t (*get_process_page_dir)(void *vm);
341 void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
342 uint32_t vmid, uint32_t page_table_base);
343 int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
344 uint64_t size, void *vm,
345 struct kgd_mem **mem, uint64_t *offset,
346 uint32_t flags);
347 int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
348 int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
349 void *vm);
350 int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
351 void *vm);
352 int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
353 int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
354 void **kptr, uint64_t *size);
355 int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
356
357 int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
358 int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
359
360 int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
361 uint32_t vmid, uint64_t gpu_addr,
362 uint32_t *ib_cmd, uint32_t ib_len);
273}; 363};
274 364
275/** 365/**
@@ -288,6 +378,9 @@ struct kfd2kgd_calls {
288 * 378 *
289 * @resume: Notifies amdkfd about a resume action done to a kgd device 379 * @resume: Notifies amdkfd about a resume action done to a kgd device
290 * 380 *
381 * @schedule_evict_and_restore_process: Schedules work queue that will prepare
382 * for safe eviction of KFD BOs that belong to the specified process.
383 *
291 * This structure contains function callback pointers so the kgd driver 384 * This structure contains function callback pointers so the kgd driver
292 * will notify to the amdkfd about certain status changes. 385 * will notify to the amdkfd about certain status changes.
293 * 386 *
@@ -302,6 +395,8 @@ struct kgd2kfd_calls {
302 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry); 395 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
303 void (*suspend)(struct kfd_dev *kfd); 396 void (*suspend)(struct kfd_dev *kfd);
304 int (*resume)(struct kfd_dev *kfd); 397 int (*resume)(struct kfd_dev *kfd);
398 int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
399 struct dma_fence *fence);
305}; 400};
306 401
307int kgd2kfd_init(unsigned interface_version, 402int kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index ed27626dff14..5c840c022b52 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -24,8 +24,7 @@
24#ifndef __KGD_PP_INTERFACE_H__ 24#ifndef __KGD_PP_INTERFACE_H__
25#define __KGD_PP_INTERFACE_H__ 25#define __KGD_PP_INTERFACE_H__
26 26
27extern const struct amd_ip_funcs pp_ip_funcs; 27extern const struct amdgpu_ip_block_version pp_smu_ip_block;
28extern const struct amd_pm_funcs pp_dpm_funcs;
29 28
30struct amd_vce_state { 29struct amd_vce_state {
31 /* vce clocks */ 30 /* vce clocks */
@@ -83,20 +82,6 @@ enum amd_vce_level {
83 AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ 82 AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
84}; 83};
85 84
86enum amd_pp_profile_type {
87 AMD_PP_GFX_PROFILE,
88 AMD_PP_COMPUTE_PROFILE,
89};
90
91struct amd_pp_profile {
92 enum amd_pp_profile_type type;
93 uint32_t min_sclk;
94 uint32_t min_mclk;
95 uint16_t activity_threshold;
96 uint8_t up_hyst;
97 uint8_t down_hyst;
98};
99
100enum amd_fan_ctrl_mode { 85enum amd_fan_ctrl_mode {
101 AMD_FAN_CTRL_NONE = 0, 86 AMD_FAN_CTRL_NONE = 0,
102 AMD_FAN_CTRL_MANUAL = 1, 87 AMD_FAN_CTRL_MANUAL = 1,
@@ -107,6 +92,8 @@ enum pp_clock_type {
107 PP_SCLK, 92 PP_SCLK,
108 PP_MCLK, 93 PP_MCLK,
109 PP_PCIE, 94 PP_PCIE,
95 OD_SCLK,
96 OD_MCLK,
110}; 97};
111 98
112enum amd_pp_sensors { 99enum amd_pp_sensors {
@@ -122,6 +109,8 @@ enum amd_pp_sensors {
122 AMDGPU_PP_SENSOR_VCE_POWER, 109 AMDGPU_PP_SENSOR_VCE_POWER,
123 AMDGPU_PP_SENSOR_UVD_POWER, 110 AMDGPU_PP_SENSOR_UVD_POWER,
124 AMDGPU_PP_SENSOR_GPU_POWER, 111 AMDGPU_PP_SENSOR_GPU_POWER,
112 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
113 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
125}; 114};
126 115
127enum amd_pp_task { 116enum amd_pp_task {
@@ -132,16 +121,15 @@ enum amd_pp_task {
132 AMD_PP_TASK_MAX 121 AMD_PP_TASK_MAX
133}; 122};
134 123
135struct amd_pp_init { 124enum PP_SMC_POWER_PROFILE {
136 struct cgs_device *device; 125 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0,
137 uint32_t chip_family; 126 PP_SMC_POWER_PROFILE_POWERSAVING = 0x1,
138 uint32_t chip_id; 127 PP_SMC_POWER_PROFILE_VIDEO = 0x2,
139 bool pm_en; 128 PP_SMC_POWER_PROFILE_VR = 0x3,
140 uint32_t feature_mask; 129 PP_SMC_POWER_PROFILE_COMPUTE = 0x4,
130 PP_SMC_POWER_PROFILE_CUSTOM = 0x5,
141}; 131};
142 132
143
144
145enum { 133enum {
146 PP_GROUP_UNKNOWN = 0, 134 PP_GROUP_UNKNOWN = 0,
147 PP_GROUP_GFX = 1, 135 PP_GROUP_GFX = 1,
@@ -149,6 +137,13 @@ enum {
149 PP_GROUP_MAX 137 PP_GROUP_MAX
150}; 138};
151 139
140enum PP_OD_DPM_TABLE_COMMAND {
141 PP_OD_EDIT_SCLK_VDDC_TABLE,
142 PP_OD_EDIT_MCLK_VDDC_TABLE,
143 PP_OD_RESTORE_DEFAULT_TABLE,
144 PP_OD_COMMIT_DPM_TABLE
145};
146
152struct pp_states_info { 147struct pp_states_info {
153 uint32_t nums; 148 uint32_t nums;
154 uint32_t states[16]; 149 uint32_t states[16];
@@ -222,7 +217,6 @@ struct amd_pm_funcs {
222 void *rps, 217 void *rps,
223 bool *equal); 218 bool *equal);
224/* export for sysfs */ 219/* export for sysfs */
225 int (*get_temperature)(void *handle);
226 void (*set_fan_control_mode)(void *handle, u32 mode); 220 void (*set_fan_control_mode)(void *handle, u32 mode);
227 u32 (*get_fan_control_mode)(void *handle); 221 u32 (*get_fan_control_mode)(void *handle);
228 int (*set_fan_speed_percent)(void *handle, u32 speed); 222 int (*set_fan_speed_percent)(void *handle, u32 speed);
@@ -242,21 +236,13 @@ struct amd_pm_funcs {
242 int (*get_pp_table)(void *handle, char **table); 236 int (*get_pp_table)(void *handle, char **table);
243 int (*set_pp_table)(void *handle, const char *buf, size_t size); 237 int (*set_pp_table)(void *handle, const char *buf, size_t size);
244 void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m); 238 void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
245 239 int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
246 int (*reset_power_profile_state)(void *handle,
247 struct amd_pp_profile *request);
248 int (*get_power_profile_state)(void *handle,
249 struct amd_pp_profile *query);
250 int (*set_power_profile_state)(void *handle,
251 struct amd_pp_profile *request);
252 int (*switch_power_profile)(void *handle,
253 enum amd_pp_profile_type type);
254/* export to amdgpu */ 240/* export to amdgpu */
255 void (*powergate_uvd)(void *handle, bool gate); 241 void (*powergate_uvd)(void *handle, bool gate);
256 void (*powergate_vce)(void *handle, bool gate); 242 void (*powergate_vce)(void *handle, bool gate);
257 struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx); 243 struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
258 int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id, 244 int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
259 void *input, void *output); 245 enum amd_pm_state_type *user_state);
260 int (*load_firmware)(void *handle); 246 int (*load_firmware)(void *handle);
261 int (*wait_for_fw_loading_complete)(void *handle); 247 int (*wait_for_fw_loading_complete)(void *handle);
262 int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id); 248 int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
@@ -265,6 +251,8 @@ struct amd_pm_funcs {
265 uint32_t mc_addr_low, 251 uint32_t mc_addr_low,
266 uint32_t mc_addr_hi, 252 uint32_t mc_addr_hi,
267 uint32_t size); 253 uint32_t size);
254 int (*set_power_limit)(void *handle, uint32_t n);
255 int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
268/* export to DC */ 256/* export to DC */
269 u32 (*get_sclk)(void *handle, bool low); 257 u32 (*get_sclk)(void *handle, bool low);
270 u32 (*get_mclk)(void *handle, bool low); 258 u32 (*get_mclk)(void *handle, bool low);
@@ -289,6 +277,10 @@ struct amd_pm_funcs {
289 struct pp_display_clock_request *clock); 277 struct pp_display_clock_request *clock);
290 int (*get_display_mode_validation_clocks)(void *handle, 278 int (*get_display_mode_validation_clocks)(void *handle,
291 struct amd_pp_simple_clock_info *clocks); 279 struct amd_pp_simple_clock_info *clocks);
280 int (*get_power_profile_mode)(void *handle, char *buf);
281 int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
282 int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
283 int (*set_mmhub_powergating_by_smu)(void *handle);
292}; 284};
293 285
294#endif 286#endif
diff --git a/drivers/gpu/drm/amd/include/soc15_hw_ip.h b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
new file mode 100644
index 000000000000..f17e30cb4eae
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21#ifndef _soc15_hw_ip_HEADER
22#define _soc15_hw_ip_HEADER
23
24// HW ID
25#define MP1_HWID 1
26#define MP2_HWID 2
27#define THM_HWID 3
28#define SMUIO_HWID 4
29#define FUSE_HWID 5
30#define CLKA_HWID 6
31#define PWR_HWID 10
32#define GC_HWID 11
33#define UVD_HWID 12
34#define VCN_HWID UVD_HWID
35#define AUDIO_AZ_HWID 13
36#define ACP_HWID 14
37#define DCI_HWID 15
38#define DMU_HWID 271
39#define DCO_HWID 16
40#define DIO_HWID 272
41#define XDMA_HWID 17
42#define DCEAZ_HWID 18
43#define DAZ_HWID 274
44#define SDPMUX_HWID 19
45#define NTB_HWID 20
46#define IOHC_HWID 24
47#define L2IMU_HWID 28
48#define VCE_HWID 32
49#define MMHUB_HWID 34
50#define ATHUB_HWID 35
51#define DBGU_NBIO_HWID 36
52#define DFX_HWID 37
53#define DBGU0_HWID 38
54#define DBGU1_HWID 39
55#define OSSSYS_HWID 40
56#define HDP_HWID 41
57#define SDMA0_HWID 42
58#define SDMA1_HWID 43
59#define ISP_HWID 44
60#define DBGU_IO_HWID 45
61#define DF_HWID 46
62#define CLKB_HWID 47
63#define FCH_HWID 48
64#define DFX_DAP_HWID 49
65#define L1IMU_PCIE_HWID 50
66#define L1IMU_NBIF_HWID 51
67#define L1IMU_IOAGR_HWID 52
68#define L1IMU3_HWID 53
69#define L1IMU4_HWID 54
70#define L1IMU5_HWID 55
71#define L1IMU6_HWID 56
72#define L1IMU7_HWID 57
73#define L1IMU8_HWID 58
74#define L1IMU9_HWID 59
75#define L1IMU10_HWID 60
76#define L1IMU11_HWID 61
77#define L1IMU12_HWID 62
78#define L1IMU13_HWID 63
79#define L1IMU14_HWID 64
80#define L1IMU15_HWID 65
81#define WAFLC_HWID 66
82#define FCH_USB_PD_HWID 67
83#define PCIE_HWID 70
84#define PCS_HWID 80
85#define DDCL_HWID 89
86#define SST_HWID 90
87#define IOAGR_HWID 100
88#define NBIF_HWID 108
89#define IOAPIC_HWID 124
90#define SYSTEMHUB_HWID 128
91#define NTBCCP_HWID 144
92#define UMC_HWID 150
93#define SATA_HWID 168
94#define USB_HWID 170
95#define CCXSEC_HWID 176
96#define XGBE_HWID 216
97#define MP0_HWID 254
98#endif
diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
new file mode 100644
index 000000000000..a12d4f27cfa4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef __SOC15_IH_CLIENTID_H__
25#define __SOC15_IH_CLIENTID_H__
26
27 /*
28 * vega10+ IH clients
29 */
30enum soc15_ih_clientid {
31 SOC15_IH_CLIENTID_IH = 0x00,
32 SOC15_IH_CLIENTID_ACP = 0x01,
33 SOC15_IH_CLIENTID_ATHUB = 0x02,
34 SOC15_IH_CLIENTID_BIF = 0x03,
35 SOC15_IH_CLIENTID_DCE = 0x04,
36 SOC15_IH_CLIENTID_ISP = 0x05,
37 SOC15_IH_CLIENTID_PCIE0 = 0x06,
38 SOC15_IH_CLIENTID_RLC = 0x07,
39 SOC15_IH_CLIENTID_SDMA0 = 0x08,
40 SOC15_IH_CLIENTID_SDMA1 = 0x09,
41 SOC15_IH_CLIENTID_SE0SH = 0x0a,
42 SOC15_IH_CLIENTID_SE1SH = 0x0b,
43 SOC15_IH_CLIENTID_SE2SH = 0x0c,
44 SOC15_IH_CLIENTID_SE3SH = 0x0d,
45 SOC15_IH_CLIENTID_SYSHUB = 0x0e,
46 SOC15_IH_CLIENTID_THM = 0x0f,
47 SOC15_IH_CLIENTID_UVD = 0x10,
48 SOC15_IH_CLIENTID_VCE0 = 0x11,
49 SOC15_IH_CLIENTID_VMC = 0x12,
50 SOC15_IH_CLIENTID_XDMA = 0x13,
51 SOC15_IH_CLIENTID_GRBM_CP = 0x14,
52 SOC15_IH_CLIENTID_ATS = 0x15,
53 SOC15_IH_CLIENTID_ROM_SMUIO = 0x16,
54 SOC15_IH_CLIENTID_DF = 0x17,
55 SOC15_IH_CLIENTID_VCE1 = 0x18,
56 SOC15_IH_CLIENTID_PWR = 0x19,
57 SOC15_IH_CLIENTID_UTCL2 = 0x1b,
58 SOC15_IH_CLIENTID_EA = 0x1c,
59 SOC15_IH_CLIENTID_UTCL2LOG = 0x1d,
60 SOC15_IH_CLIENTID_MP0 = 0x1e,
61 SOC15_IH_CLIENTID_MP1 = 0x1f,
62
63 SOC15_IH_CLIENTID_MAX,
64
65 SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD
66};
67
68#endif
69
70
diff --git a/drivers/gpu/drm/amd/include/soc15ip.h b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
index 1767db69df7a..976dd2d565ba 100644
--- a/drivers/gpu/drm/amd/include/soc15ip.h
+++ b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2017 Advanced Micro Devices, Inc. 2 * Copyright (C) 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,273 +18,197 @@
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */ 20 */
21#ifndef _soc15ip_new_HEADER 21#ifndef _vega10_ip_offset_HEADER
22#define _soc15ip_new_HEADER 22#define _vega10_ip_offset_HEADER
23
24// HW ID
25#define MP1_HWID 1
26#define MP2_HWID 2
27#define THM_HWID 3
28#define SMUIO_HWID 4
29#define FUSE_HWID 5
30#define CLKA_HWID 6
31#define PWR_HWID 10
32#define GC_HWID 11
33#define UVD_HWID 12
34#define VCN_HWID UVD_HWID
35#define AUDIO_AZ_HWID 13
36#define ACP_HWID 14
37#define DCI_HWID 15
38#define DMU_HWID 271
39#define DCO_HWID 16
40#define DIO_HWID 272
41#define XDMA_HWID 17
42#define DCEAZ_HWID 18
43#define DAZ_HWID 274
44#define SDPMUX_HWID 19
45#define NTB_HWID 20
46#define IOHC_HWID 24
47#define L2IMU_HWID 28
48#define VCE_HWID 32
49#define MMHUB_HWID 34
50#define ATHUB_HWID 35
51#define DBGU_NBIO_HWID 36
52#define DFX_HWID 37
53#define DBGU0_HWID 38
54#define DBGU1_HWID 39
55#define OSSSYS_HWID 40
56#define HDP_HWID 41
57#define SDMA0_HWID 42
58#define SDMA1_HWID 43
59#define ISP_HWID 44
60#define DBGU_IO_HWID 45
61#define DF_HWID 46
62#define CLKB_HWID 47
63#define FCH_HWID 48
64#define DFX_DAP_HWID 49
65#define L1IMU_PCIE_HWID 50
66#define L1IMU_NBIF_HWID 51
67#define L1IMU_IOAGR_HWID 52
68#define L1IMU3_HWID 53
69#define L1IMU4_HWID 54
70#define L1IMU5_HWID 55
71#define L1IMU6_HWID 56
72#define L1IMU7_HWID 57
73#define L1IMU8_HWID 58
74#define L1IMU9_HWID 59
75#define L1IMU10_HWID 60
76#define L1IMU11_HWID 61
77#define L1IMU12_HWID 62
78#define L1IMU13_HWID 63
79#define L1IMU14_HWID 64
80#define L1IMU15_HWID 65
81#define WAFLC_HWID 66
82#define FCH_USB_PD_HWID 67
83#define PCIE_HWID 70
84#define PCS_HWID 80
85#define DDCL_HWID 89
86#define SST_HWID 90
87#define IOAGR_HWID 100
88#define NBIF_HWID 108
89#define IOAPIC_HWID 124
90#define SYSTEMHUB_HWID 128
91#define NTBCCP_HWID 144
92#define UMC_HWID 150
93#define SATA_HWID 168
94#define USB_HWID 170
95#define CCXSEC_HWID 176
96#define XGBE_HWID 216
97#define MP0_HWID 254
98 23
99#define MAX_INSTANCE 5 24#define MAX_INSTANCE 5
100#define MAX_SEGMENT 5 25#define MAX_SEGMENT 5
101 26
102 27struct IP_BASE_INSTANCE
103struct IP_BASE_INSTANCE
104{ 28{
105 unsigned int segment[MAX_SEGMENT]; 29 unsigned int segment[MAX_SEGMENT];
106}; 30};
107 31
108struct IP_BASE 32struct IP_BASE
109{ 33{
110 struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; 34 struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
111}; 35};
112 36
113 37
114static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } }, 38static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
115 { { 0, 0, 0, 0, 0 } }, 39 { { 0, 0, 0, 0, 0 } },
116 { { 0, 0, 0, 0, 0 } }, 40 { { 0, 0, 0, 0, 0 } },
117 { { 0, 0, 0, 0, 0 } }, 41 { { 0, 0, 0, 0, 0 } },
118 { { 0, 0, 0, 0, 0 } } } }; 42 { { 0, 0, 0, 0, 0 } } } };
119static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } }, 43static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
120 { { 0, 0, 0, 0, 0 } }, 44 { { 0, 0, 0, 0, 0 } },
121 { { 0, 0, 0, 0, 0 } }, 45 { { 0, 0, 0, 0, 0 } },
122 { { 0, 0, 0, 0, 0 } }, 46 { { 0, 0, 0, 0, 0 } },
123 { { 0, 0, 0, 0, 0 } } } }; 47 { { 0, 0, 0, 0, 0 } } } };
124static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } }, 48static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
125 { { 0, 0, 0, 0, 0 } }, 49 { { 0, 0, 0, 0, 0 } },
126 { { 0, 0, 0, 0, 0 } }, 50 { { 0, 0, 0, 0, 0 } },
127 { { 0, 0, 0, 0, 0 } }, 51 { { 0, 0, 0, 0, 0 } },
128 { { 0, 0, 0, 0, 0 } } } }; 52 { { 0, 0, 0, 0, 0 } } } };
129static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } }, 53static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
130 { { 0, 0, 0, 0, 0 } }, 54 { { 0, 0, 0, 0, 0 } },
131 { { 0, 0, 0, 0, 0 } }, 55 { { 0, 0, 0, 0, 0 } },
132 { { 0, 0, 0, 0, 0 } }, 56 { { 0, 0, 0, 0, 0 } },
133 { { 0, 0, 0, 0, 0 } } } }; 57 { { 0, 0, 0, 0, 0 } } } };
134static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } }, 58static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
135 { { 0, 0, 0, 0, 0 } }, 59 { { 0, 0, 0, 0, 0 } },
136 { { 0, 0, 0, 0, 0 } }, 60 { { 0, 0, 0, 0, 0 } },
137 { { 0, 0, 0, 0, 0 } }, 61 { { 0, 0, 0, 0, 0 } },
138 { { 0, 0, 0, 0, 0 } } } }; 62 { { 0, 0, 0, 0, 0 } } } };
139static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } }, 63static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
140 { { 0, 0, 0, 0, 0 } }, 64 { { 0, 0, 0, 0, 0 } },
141 { { 0, 0, 0, 0, 0 } }, 65 { { 0, 0, 0, 0, 0 } },
142 { { 0, 0, 0, 0, 0 } }, 66 { { 0, 0, 0, 0, 0 } },
143 { { 0, 0, 0, 0, 0 } } } }; 67 { { 0, 0, 0, 0, 0 } } } };
144static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } }, 68static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
145 { { 0, 0, 0, 0, 0 } }, 69 { { 0, 0, 0, 0, 0 } },
146 { { 0, 0, 0, 0, 0 } }, 70 { { 0, 0, 0, 0, 0 } },
147 { { 0, 0, 0, 0, 0 } }, 71 { { 0, 0, 0, 0, 0 } },
148 { { 0, 0, 0, 0, 0 } } } }; 72 { { 0, 0, 0, 0, 0 } } } };
149static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } }, 73static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
150 { { 0, 0, 0, 0, 0 } }, 74 { { 0, 0, 0, 0, 0 } },
151 { { 0, 0, 0, 0, 0 } }, 75 { { 0, 0, 0, 0, 0 } },
152 { { 0, 0, 0, 0, 0 } }, 76 { { 0, 0, 0, 0, 0 } },
153 { { 0, 0, 0, 0, 0 } } } }; 77 { { 0, 0, 0, 0, 0 } } } };
154static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } }, 78static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
155 { { 0, 0, 0, 0, 0 } }, 79 { { 0, 0, 0, 0, 0 } },
156 { { 0, 0, 0, 0, 0 } }, 80 { { 0, 0, 0, 0, 0 } },
157 { { 0, 0, 0, 0, 0 } }, 81 { { 0, 0, 0, 0, 0 } },
158 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment 82 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
159static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } }, 83static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
160 { { 0, 0, 0, 0, 0 } }, 84 { { 0, 0, 0, 0, 0 } },
161 { { 0, 0, 0, 0, 0 } }, 85 { { 0, 0, 0, 0, 0 } },
162 { { 0, 0, 0, 0, 0 } }, 86 { { 0, 0, 0, 0, 0 } },
163 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment 87 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
164static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } }, 88static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
165 { { 0, 0, 0, 0, 0 } }, 89 { { 0, 0, 0, 0, 0 } },
166 { { 0, 0, 0, 0, 0 } }, 90 { { 0, 0, 0, 0, 0 } },
167 { { 0, 0, 0, 0, 0 } }, 91 { { 0, 0, 0, 0, 0 } },
168 { { 0, 0, 0, 0, 0 } } } }; // not exist 92 { { 0, 0, 0, 0, 0 } } } }; // not exist
169static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } }, 93static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
170 { { 0, 0, 0, 0, 0 } }, 94 { { 0, 0, 0, 0, 0 } },
171 { { 0, 0, 0, 0, 0 } }, 95 { { 0, 0, 0, 0, 0 } },
172 { { 0, 0, 0, 0, 0 } }, 96 { { 0, 0, 0, 0, 0 } },
173 { { 0, 0, 0, 0, 0 } } } }; // not exist 97 { { 0, 0, 0, 0, 0 } } } }; // not exist
174static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } }, 98static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
175 { { 0, 0, 0, 0, 0 } }, 99 { { 0, 0, 0, 0, 0 } },
176 { { 0, 0, 0, 0, 0 } }, 100 { { 0, 0, 0, 0, 0 } },
177 { { 0, 0, 0, 0, 0 } }, 101 { { 0, 0, 0, 0, 0 } },
178 { { 0, 0, 0, 0, 0 } } } }; // not exist 102 { { 0, 0, 0, 0, 0 } } } }; // not exist
179static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } }, 103static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
180 { { 0, 0, 0, 0, 0 } }, 104 { { 0, 0, 0, 0, 0 } },
181 { { 0, 0, 0, 0, 0 } }, 105 { { 0, 0, 0, 0, 0 } },
182 { { 0, 0, 0, 0, 0 } }, 106 { { 0, 0, 0, 0, 0 } },
183 { { 0, 0, 0, 0, 0 } } } }; // not exist 107 { { 0, 0, 0, 0, 0 } } } }; // not exist
184static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } }, 108static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
185 { { 0, 0, 0, 0, 0 } }, 109 { { 0, 0, 0, 0, 0 } },
186 { { 0, 0, 0, 0, 0 } }, 110 { { 0, 0, 0, 0, 0 } },
187 { { 0, 0, 0, 0, 0 } }, 111 { { 0, 0, 0, 0, 0 } },
188 { { 0, 0, 0, 0, 0 } } } }; // this file does not contain registers 112 { { 0, 0, 0, 0, 0 } } } }; // this file does not contain registers
189static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } }, 113static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
190 { { 0, 0, 0, 0, 0 } }, 114 { { 0, 0, 0, 0, 0 } },
191 { { 0, 0, 0, 0, 0 } }, 115 { { 0, 0, 0, 0, 0 } },
192 { { 0, 0, 0, 0, 0 } }, 116 { { 0, 0, 0, 0, 0 } },
193 { { 0, 0, 0, 0, 0 } } } }; // not exist 117 { { 0, 0, 0, 0, 0 } } } }; // not exist
194static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } }, 118static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
195 { { 0, 0, 0, 0, 0 } }, 119 { { 0, 0, 0, 0, 0 } },
196 { { 0, 0, 0, 0, 0 } }, 120 { { 0, 0, 0, 0, 0 } },
197 { { 0, 0, 0, 0, 0 } }, 121 { { 0, 0, 0, 0, 0 } },
198 { { 0, 0, 0, 0, 0 } } } }; // not exist 122 { { 0, 0, 0, 0, 0 } } } }; // not exist
199static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } }, 123static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
200 { { 0, 0, 0, 0, 0 } }, 124 { { 0, 0, 0, 0, 0 } },
201 { { 0, 0, 0, 0, 0 } }, 125 { { 0, 0, 0, 0, 0 } },
202 { { 0, 0, 0, 0, 0 } }, 126 { { 0, 0, 0, 0, 0 } },
203 { { 0, 0, 0, 0, 0 } } } }; 127 { { 0, 0, 0, 0, 0 } } } };
204static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } }, 128static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
205 { { 0, 0, 0, 0, 0 } }, 129 { { 0, 0, 0, 0, 0 } },
206 { { 0, 0, 0, 0, 0 } }, 130 { { 0, 0, 0, 0, 0 } },
207 { { 0, 0, 0, 0, 0 } }, 131 { { 0, 0, 0, 0, 0 } },
208 { { 0, 0, 0, 0, 0 } } } }; 132 { { 0, 0, 0, 0, 0 } } } };
209static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } }, 133static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
210 { { 0, 0, 0, 0, 0 } }, 134 { { 0, 0, 0, 0, 0 } },
211 { { 0, 0, 0, 0, 0 } }, 135 { { 0, 0, 0, 0, 0 } },
212 { { 0, 0, 0, 0, 0 } }, 136 { { 0, 0, 0, 0, 0 } },
213 { { 0, 0, 0, 0, 0 } } } }; 137 { { 0, 0, 0, 0, 0 } } } };
214static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } }, 138static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
215 { { 0, 0, 0, 0, 0 } }, 139 { { 0, 0, 0, 0, 0 } },
216 { { 0, 0, 0, 0, 0 } }, 140 { { 0, 0, 0, 0, 0 } },
217 { { 0, 0, 0, 0, 0 } }, 141 { { 0, 0, 0, 0, 0 } },
218 { { 0, 0, 0, 0, 0 } } } }; 142 { { 0, 0, 0, 0, 0 } } } };
219static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } }, 143static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
220 { { 0, 0, 0, 0, 0 } }, 144 { { 0, 0, 0, 0, 0 } },
221 { { 0, 0, 0, 0, 0 } }, 145 { { 0, 0, 0, 0, 0 } },
222 { { 0, 0, 0, 0, 0 } }, 146 { { 0, 0, 0, 0, 0 } },
223 { { 0, 0, 0, 0, 0 } } } }; 147 { { 0, 0, 0, 0, 0 } } } };
224static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } }, 148static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
225 { { 0, 0, 0, 0, 0 } }, 149 { { 0, 0, 0, 0, 0 } },
226 { { 0, 0, 0, 0, 0 } }, 150 { { 0, 0, 0, 0, 0 } },
227 { { 0, 0, 0, 0, 0 } }, 151 { { 0, 0, 0, 0, 0 } },
228 { { 0, 0, 0, 0, 0 } } } }; 152 { { 0, 0, 0, 0, 0 } } } };
229static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } }, 153static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
230 { { 0, 0, 0, 0, 0 } }, 154 { { 0, 0, 0, 0, 0 } },
231 { { 0, 0, 0, 0, 0 } }, 155 { { 0, 0, 0, 0, 0 } },
232 { { 0, 0, 0, 0, 0 } }, 156 { { 0, 0, 0, 0, 0 } },
233 { { 0, 0, 0, 0, 0 } } } }; 157 { { 0, 0, 0, 0, 0 } } } };
234static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } }, 158static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
235 { { 0, 0, 0, 0, 0 } }, 159 { { 0, 0, 0, 0, 0 } },
236 { { 0, 0, 0, 0, 0 } }, 160 { { 0, 0, 0, 0, 0 } },
237 { { 0, 0, 0, 0, 0 } }, 161 { { 0, 0, 0, 0, 0 } },
238 { { 0, 0, 0, 0, 0 } } } }; 162 { { 0, 0, 0, 0, 0 } } } };
239static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } }, 163static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
240 { { 0, 0, 0, 0, 0 } }, 164 { { 0, 0, 0, 0, 0 } },
241 { { 0, 0, 0, 0, 0 } }, 165 { { 0, 0, 0, 0, 0 } },
242 { { 0, 0, 0, 0, 0 } }, 166 { { 0, 0, 0, 0, 0 } },
243 { { 0, 0, 0, 0, 0 } } } }; 167 { { 0, 0, 0, 0, 0 } } } };
244static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } }, 168static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
245 { { 0, 0, 0, 0, 0 } }, 169 { { 0, 0, 0, 0, 0 } },
246 { { 0, 0, 0, 0, 0 } }, 170 { { 0, 0, 0, 0, 0 } },
247 { { 0, 0, 0, 0, 0 } }, 171 { { 0, 0, 0, 0, 0 } },
248 { { 0, 0, 0, 0, 0 } } } }; 172 { { 0, 0, 0, 0, 0 } } } };
249static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } }, 173static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
250 { { 0, 0, 0, 0, 0 } }, 174 { { 0, 0, 0, 0, 0 } },
251 { { 0, 0, 0, 0, 0 } }, 175 { { 0, 0, 0, 0, 0 } },
252 { { 0, 0, 0, 0, 0 } }, 176 { { 0, 0, 0, 0, 0 } },
253 { { 0, 0, 0, 0, 0 } } } }; 177 { { 0, 0, 0, 0, 0 } } } };
254static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } }, 178static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
255 { { 0, 0, 0, 0, 0 } }, 179 { { 0, 0, 0, 0, 0 } },
256 { { 0, 0, 0, 0, 0 } }, 180 { { 0, 0, 0, 0, 0 } },
257 { { 0, 0, 0, 0, 0 } }, 181 { { 0, 0, 0, 0, 0 } },
258 { { 0, 0, 0, 0, 0 } } } }; 182 { { 0, 0, 0, 0, 0 } } } };
259static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } }, 183static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
260 { { 0, 0, 0, 0, 0 } }, 184 { { 0, 0, 0, 0, 0 } },
261 { { 0, 0, 0, 0, 0 } }, 185 { { 0, 0, 0, 0, 0 } },
262 { { 0, 0, 0, 0, 0 } }, 186 { { 0, 0, 0, 0, 0 } },
263 { { 0, 0, 0, 0, 0 } } } }; 187 { { 0, 0, 0, 0, 0 } } } };
264static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } }, 188static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
265 { { 0, 0, 0, 0, 0 } }, 189 { { 0, 0, 0, 0, 0 } },
266 { { 0, 0, 0, 0, 0 } }, 190 { { 0, 0, 0, 0, 0 } },
267 { { 0, 0, 0, 0, 0 } }, 191 { { 0, 0, 0, 0, 0 } },
268 { { 0, 0, 0, 0, 0 } } } }; 192 { { 0, 0, 0, 0, 0 } } } };
269static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } }, 193static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
270 { { 0, 0, 0, 0, 0 } }, 194 { { 0, 0, 0, 0, 0 } },
271 { { 0, 0, 0, 0, 0 } }, 195 { { 0, 0, 0, 0, 0 } },
272 { { 0, 0, 0, 0, 0 } }, 196 { { 0, 0, 0, 0, 0 } },
273 { { 0, 0, 0, 0, 0 } } } }; 197 { { 0, 0, 0, 0, 0 } } } };
274static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } }, 198static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
275 { { 0, 0, 0, 0, 0 } }, 199 { { 0, 0, 0, 0, 0 } },
276 { { 0, 0, 0, 0, 0 } }, 200 { { 0, 0, 0, 0, 0 } },
277 { { 0, 0, 0, 0, 0 } }, 201 { { 0, 0, 0, 0, 0 } },
278 { { 0, 0, 0, 0, 0 } } } }; 202 { { 0, 0, 0, 0, 0 } } } };
279static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0, 0, 0, 0 } }, 203static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0, 0, 0, 0 } },
280 { { 0x00016E00, 0, 0, 0, 0 } }, 204 { { 0x00016E00, 0, 0, 0, 0 } },
281 { { 0x00017000, 0, 0, 0, 0 } }, 205 { { 0x00017000, 0, 0, 0, 0 } },
282 { { 0x00017200, 0, 0, 0, 0 } }, 206 { { 0x00017200, 0, 0, 0, 0 } },
283 { { 0x00017E00, 0, 0, 0, 0 } } } }; 207 { { 0x00017E00, 0, 0, 0, 0 } } } };
284static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } }, 208static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
285 { { 0, 0, 0, 0, 0 } }, 209 { { 0, 0, 0, 0, 0 } },
286 { { 0, 0, 0, 0, 0 } }, 210 { { 0, 0, 0, 0, 0 } },
287 { { 0, 0, 0, 0, 0 } }, 211 { { 0, 0, 0, 0, 0 } },
288 { { 0, 0, 0, 0, 0 } } } }; 212 { { 0, 0, 0, 0, 0 } } } };
289 213
290 214
@@ -1337,7 +1261,5 @@ static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
1337#define FUSE_BASE__INST4_SEG2 0 1261#define FUSE_BASE__INST4_SEG2 0
1338#define FUSE_BASE__INST4_SEG3 0 1262#define FUSE_BASE__INST4_SEG3 0
1339#define FUSE_BASE__INST4_SEG4 0 1263#define FUSE_BASE__INST4_SEG4 0
1340
1341
1342#endif 1264#endif
1343 1265
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 4c3223a4d62b..3da3dccd13e2 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -27,79 +27,76 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include "amd_shared.h" 28#include "amd_shared.h"
29#include "amd_powerplay.h" 29#include "amd_powerplay.h"
30#include "pp_instance.h"
31#include "power_state.h" 30#include "power_state.h"
31#include "amdgpu.h"
32#include "hwmgr.h"
32 33
33#define PP_DPM_DISABLED 0xCCCC 34#define PP_DPM_DISABLED 0xCCCC
34 35
35static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 36static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
36 void *input, void *output); 37 enum amd_pm_state_type *user_state);
37 38
38static inline int pp_check(struct pp_instance *handle) 39static const struct amd_pm_funcs pp_dpm_funcs;
39{
40 if (handle == NULL)
41 return -EINVAL;
42 40
43 if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL) 41static inline int pp_check(struct pp_hwmgr *hwmgr)
42{
43 if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44 return -EINVAL; 44 return -EINVAL;
45 45
46 if (handle->pm_en == 0) 46 if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47 return PP_DPM_DISABLED;
48
49 if (handle->hwmgr->hwmgr_func == NULL)
50 return PP_DPM_DISABLED; 47 return PP_DPM_DISABLED;
51 48
52 return 0; 49 return 0;
53} 50}
54 51
55static int amd_powerplay_create(struct amd_pp_init *pp_init, 52static int amd_powerplay_create(struct amdgpu_device *adev)
56 void **handle)
57{ 53{
58 struct pp_instance *instance; 54 struct pp_hwmgr *hwmgr;
59 55
60 if (pp_init == NULL || handle == NULL) 56 if (adev == NULL)
61 return -EINVAL; 57 return -EINVAL;
62 58
63 instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL); 59 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
64 if (instance == NULL) 60 if (hwmgr == NULL)
65 return -ENOMEM; 61 return -ENOMEM;
66 62
67 instance->chip_family = pp_init->chip_family; 63 hwmgr->adev = adev;
68 instance->chip_id = pp_init->chip_id; 64 hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
69 instance->pm_en = pp_init->pm_en; 65 hwmgr->device = amdgpu_cgs_create_device(adev);
70 instance->feature_mask = pp_init->feature_mask; 66 mutex_init(&hwmgr->smu_lock);
71 instance->device = pp_init->device; 67 hwmgr->chip_family = adev->family;
72 mutex_init(&instance->pp_lock); 68 hwmgr->chip_id = adev->asic_type;
73 *handle = instance; 69 hwmgr->feature_mask = amdgpu_pp_feature_mask;
70 adev->powerplay.pp_handle = hwmgr;
71 adev->powerplay.pp_funcs = &pp_dpm_funcs;
74 return 0; 72 return 0;
75} 73}
76 74
77static int amd_powerplay_destroy(void *handle) 75
76static int amd_powerplay_destroy(struct amdgpu_device *adev)
78{ 77{
79 struct pp_instance *instance = (struct pp_instance *)handle; 78 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
80 79
81 kfree(instance->hwmgr->hardcode_pp_table); 80 kfree(hwmgr->hardcode_pp_table);
82 instance->hwmgr->hardcode_pp_table = NULL; 81 hwmgr->hardcode_pp_table = NULL;
83 82
84 kfree(instance->hwmgr); 83 kfree(hwmgr);
85 instance->hwmgr = NULL; 84 hwmgr = NULL;
86 85
87 kfree(instance);
88 instance = NULL;
89 return 0; 86 return 0;
90} 87}
91 88
92static int pp_early_init(void *handle) 89static int pp_early_init(void *handle)
93{ 90{
94 int ret; 91 int ret;
95 struct pp_instance *pp_handle = NULL; 92 struct amdgpu_device *adev = handle;
96 93
97 pp_handle = cgs_register_pp_handle(handle, amd_powerplay_create); 94 ret = amd_powerplay_create(adev);
98 95
99 if (!pp_handle) 96 if (ret != 0)
100 return -EINVAL; 97 return ret;
101 98
102 ret = hwmgr_early_init(pp_handle); 99 ret = hwmgr_early_init(adev->powerplay.pp_handle);
103 if (ret) 100 if (ret)
104 return -EINVAL; 101 return -EINVAL;
105 102
@@ -108,15 +105,13 @@ static int pp_early_init(void *handle)
108 105
109static int pp_sw_init(void *handle) 106static int pp_sw_init(void *handle)
110{ 107{
111 struct pp_hwmgr *hwmgr; 108 struct amdgpu_device *adev = handle;
109 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
112 int ret = 0; 110 int ret = 0;
113 struct pp_instance *pp_handle = (struct pp_instance *)handle;
114 111
115 ret = pp_check(pp_handle); 112 ret = pp_check(hwmgr);
116 113
117 if (ret >= 0) { 114 if (ret >= 0) {
118 hwmgr = pp_handle->hwmgr;
119
120 if (hwmgr->smumgr_funcs->smu_init == NULL) 115 if (hwmgr->smumgr_funcs->smu_init == NULL)
121 return -EINVAL; 116 return -EINVAL;
122 117
@@ -124,55 +119,57 @@ static int pp_sw_init(void *handle)
124 119
125 pr_debug("amdgpu: powerplay sw initialized\n"); 120 pr_debug("amdgpu: powerplay sw initialized\n");
126 } 121 }
122
127 return ret; 123 return ret;
128} 124}
129 125
130static int pp_sw_fini(void *handle) 126static int pp_sw_fini(void *handle)
131{ 127{
132 struct pp_hwmgr *hwmgr; 128 struct amdgpu_device *adev = handle;
129 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
133 int ret = 0; 130 int ret = 0;
134 struct pp_instance *pp_handle = (struct pp_instance *)handle;
135 131
136 ret = pp_check(pp_handle); 132 ret = pp_check(hwmgr);
137 if (ret >= 0) { 133 if (ret >= 0) {
138 hwmgr = pp_handle->hwmgr; 134 if (hwmgr->smumgr_funcs->smu_fini != NULL)
135 hwmgr->smumgr_funcs->smu_fini(hwmgr);
136 }
139 137
140 if (hwmgr->smumgr_funcs->smu_fini == NULL) 138 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
141 return -EINVAL; 139 amdgpu_ucode_fini_bo(adev);
142 140
143 ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 141 return 0;
144 }
145 return ret;
146} 142}
147 143
148static int pp_hw_init(void *handle) 144static int pp_hw_init(void *handle)
149{ 145{
150 int ret = 0; 146 int ret = 0;
151 struct pp_instance *pp_handle = (struct pp_instance *)handle; 147 struct amdgpu_device *adev = handle;
152 struct pp_hwmgr *hwmgr; 148 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
153 149
154 ret = pp_check(pp_handle); 150 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
151 amdgpu_ucode_init_bo(adev);
155 152
156 if (ret >= 0) { 153 ret = pp_check(hwmgr);
157 hwmgr = pp_handle->hwmgr;
158 154
155 if (ret >= 0) {
159 if (hwmgr->smumgr_funcs->start_smu == NULL) 156 if (hwmgr->smumgr_funcs->start_smu == NULL)
160 return -EINVAL; 157 return -EINVAL;
161 158
162 if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 159 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
163 pr_err("smc start failed\n"); 160 pr_err("smc start failed\n");
164 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 161 hwmgr->smumgr_funcs->smu_fini(hwmgr);
165 return -EINVAL;; 162 return -EINVAL;
166 } 163 }
167 if (ret == PP_DPM_DISABLED) 164 if (ret == PP_DPM_DISABLED)
168 goto exit; 165 goto exit;
169 ret = hwmgr_hw_init(pp_handle); 166 ret = hwmgr_hw_init(hwmgr);
170 if (ret) 167 if (ret)
171 goto exit; 168 goto exit;
172 } 169 }
173 return ret; 170 return ret;
174exit: 171exit:
175 pp_handle->pm_en = 0; 172 hwmgr->pm_en = 0;
176 cgs_notify_dpm_enabled(hwmgr->device, false); 173 cgs_notify_dpm_enabled(hwmgr->device, false);
177 return 0; 174 return 0;
178 175
@@ -180,32 +177,37 @@ exit:
180 177
181static int pp_hw_fini(void *handle) 178static int pp_hw_fini(void *handle)
182{ 179{
183 struct pp_instance *pp_handle = (struct pp_instance *)handle; 180 struct amdgpu_device *adev = handle;
181 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
184 int ret = 0; 182 int ret = 0;
185 183
186 ret = pp_check(pp_handle); 184 ret = pp_check(hwmgr);
187 if (ret == 0) 185 if (ret == 0)
188 hwmgr_hw_fini(pp_handle); 186 hwmgr_hw_fini(hwmgr);
189 187
190 return 0; 188 return 0;
191} 189}
192 190
193static int pp_late_init(void *handle) 191static int pp_late_init(void *handle)
194{ 192{
195 struct pp_instance *pp_handle = (struct pp_instance *)handle; 193 struct amdgpu_device *adev = handle;
194 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
196 int ret = 0; 195 int ret = 0;
197 196
198 ret = pp_check(pp_handle); 197 ret = pp_check(hwmgr);
198
199 if (ret == 0) 199 if (ret == 0)
200 pp_dpm_dispatch_tasks(pp_handle, 200 pp_dpm_dispatch_tasks(hwmgr,
201 AMD_PP_TASK_COMPLETE_INIT, NULL, NULL); 201 AMD_PP_TASK_COMPLETE_INIT, NULL);
202 202
203 return 0; 203 return 0;
204} 204}
205 205
206static void pp_late_fini(void *handle) 206static void pp_late_fini(void *handle)
207{ 207{
208 amd_powerplay_destroy(handle); 208 struct amdgpu_device *adev = handle;
209
210 amd_powerplay_destroy(adev);
209} 211}
210 212
211 213
@@ -227,17 +229,15 @@ static int pp_sw_reset(void *handle)
227static int pp_set_powergating_state(void *handle, 229static int pp_set_powergating_state(void *handle,
228 enum amd_powergating_state state) 230 enum amd_powergating_state state)
229{ 231{
230 struct pp_hwmgr *hwmgr; 232 struct amdgpu_device *adev = handle;
231 struct pp_instance *pp_handle = (struct pp_instance *)handle; 233 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
232 int ret = 0; 234 int ret = 0;
233 235
234 ret = pp_check(pp_handle); 236 ret = pp_check(hwmgr);
235 237
236 if (ret) 238 if (ret)
237 return ret; 239 return ret;
238 240
239 hwmgr = pp_handle->hwmgr;
240
241 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { 241 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
242 pr_info("%s was not implemented.\n", __func__); 242 pr_info("%s was not implemented.\n", __func__);
243 return 0; 243 return 0;
@@ -250,44 +250,43 @@ static int pp_set_powergating_state(void *handle,
250 250
251static int pp_suspend(void *handle) 251static int pp_suspend(void *handle)
252{ 252{
253 struct pp_instance *pp_handle = (struct pp_instance *)handle; 253 struct amdgpu_device *adev = handle;
254 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
254 int ret = 0; 255 int ret = 0;
255 256
256 ret = pp_check(pp_handle); 257 ret = pp_check(hwmgr);
257 if (ret == 0) 258 if (ret == 0)
258 hwmgr_hw_suspend(pp_handle); 259 hwmgr_hw_suspend(hwmgr);
259 return 0; 260 return 0;
260} 261}
261 262
262static int pp_resume(void *handle) 263static int pp_resume(void *handle)
263{ 264{
264 struct pp_hwmgr *hwmgr; 265 struct amdgpu_device *adev = handle;
266 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
265 int ret; 267 int ret;
266 struct pp_instance *pp_handle = (struct pp_instance *)handle;
267 268
268 ret = pp_check(pp_handle); 269 ret = pp_check(hwmgr);
269 270
270 if (ret < 0) 271 if (ret < 0)
271 return ret; 272 return ret;
272 273
273 hwmgr = pp_handle->hwmgr;
274
275 if (hwmgr->smumgr_funcs->start_smu == NULL) 274 if (hwmgr->smumgr_funcs->start_smu == NULL)
276 return -EINVAL; 275 return -EINVAL;
277 276
278 if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 277 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
279 pr_err("smc start failed\n"); 278 pr_err("smc start failed\n");
280 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 279 hwmgr->smumgr_funcs->smu_fini(hwmgr);
281 return -EINVAL; 280 return -EINVAL;
282 } 281 }
283 282
284 if (ret == PP_DPM_DISABLED) 283 if (ret == PP_DPM_DISABLED)
285 return 0; 284 return 0;
286 285
287 return hwmgr_hw_resume(pp_handle); 286 return hwmgr_hw_resume(hwmgr);
288} 287}
289 288
290const struct amd_ip_funcs pp_ip_funcs = { 289static const struct amd_ip_funcs pp_ip_funcs = {
291 .name = "powerplay", 290 .name = "powerplay",
292 .early_init = pp_early_init, 291 .early_init = pp_early_init,
293 .late_init = pp_late_init, 292 .late_init = pp_late_init,
@@ -305,6 +304,15 @@ const struct amd_ip_funcs pp_ip_funcs = {
305 .set_powergating_state = pp_set_powergating_state, 304 .set_powergating_state = pp_set_powergating_state,
306}; 305};
307 306
307const struct amdgpu_ip_block_version pp_smu_ip_block =
308{
309 .type = AMD_IP_BLOCK_TYPE_SMC,
310 .major = 1,
311 .minor = 0,
312 .rev = 0,
313 .funcs = &pp_ip_funcs,
314};
315
308static int pp_dpm_load_fw(void *handle) 316static int pp_dpm_load_fw(void *handle)
309{ 317{
310 return 0; 318 return 0;
@@ -317,17 +325,14 @@ static int pp_dpm_fw_loading_complete(void *handle)
317 325
318static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 326static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
319{ 327{
320 struct pp_hwmgr *hwmgr; 328 struct pp_hwmgr *hwmgr = handle;
321 struct pp_instance *pp_handle = (struct pp_instance *)handle;
322 int ret = 0; 329 int ret = 0;
323 330
324 ret = pp_check(pp_handle); 331 ret = pp_check(hwmgr);
325 332
326 if (ret) 333 if (ret)
327 return ret; 334 return ret;
328 335
329 hwmgr = pp_handle->hwmgr;
330
331 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 336 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
332 pr_info("%s was not implemented.\n", __func__); 337 pr_info("%s was not implemented.\n", __func__);
333 return 0; 338 return 0;
@@ -375,25 +380,22 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
375static int pp_dpm_force_performance_level(void *handle, 380static int pp_dpm_force_performance_level(void *handle,
376 enum amd_dpm_forced_level level) 381 enum amd_dpm_forced_level level)
377{ 382{
378 struct pp_hwmgr *hwmgr; 383 struct pp_hwmgr *hwmgr = handle;
379 struct pp_instance *pp_handle = (struct pp_instance *)handle;
380 int ret = 0; 384 int ret = 0;
381 385
382 ret = pp_check(pp_handle); 386 ret = pp_check(hwmgr);
383 387
384 if (ret) 388 if (ret)
385 return ret; 389 return ret;
386 390
387 hwmgr = pp_handle->hwmgr;
388
389 if (level == hwmgr->dpm_level) 391 if (level == hwmgr->dpm_level)
390 return 0; 392 return 0;
391 393
392 mutex_lock(&pp_handle->pp_lock); 394 mutex_lock(&hwmgr->smu_lock);
393 pp_dpm_en_umd_pstate(hwmgr, &level); 395 pp_dpm_en_umd_pstate(hwmgr, &level);
394 hwmgr->request_dpm_level = level; 396 hwmgr->request_dpm_level = level;
395 hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 397 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
396 mutex_unlock(&pp_handle->pp_lock); 398 mutex_unlock(&hwmgr->smu_lock);
397 399
398 return 0; 400 return 0;
399} 401}
@@ -401,152 +403,135 @@ static int pp_dpm_force_performance_level(void *handle,
401static enum amd_dpm_forced_level pp_dpm_get_performance_level( 403static enum amd_dpm_forced_level pp_dpm_get_performance_level(
402 void *handle) 404 void *handle)
403{ 405{
404 struct pp_hwmgr *hwmgr; 406 struct pp_hwmgr *hwmgr = handle;
405 struct pp_instance *pp_handle = (struct pp_instance *)handle;
406 int ret = 0; 407 int ret = 0;
407 enum amd_dpm_forced_level level; 408 enum amd_dpm_forced_level level;
408 409
409 ret = pp_check(pp_handle); 410 ret = pp_check(hwmgr);
410 411
411 if (ret) 412 if (ret)
412 return ret; 413 return ret;
413 414
414 hwmgr = pp_handle->hwmgr; 415 mutex_lock(&hwmgr->smu_lock);
415 mutex_lock(&pp_handle->pp_lock);
416 level = hwmgr->dpm_level; 416 level = hwmgr->dpm_level;
417 mutex_unlock(&pp_handle->pp_lock); 417 mutex_unlock(&hwmgr->smu_lock);
418 return level; 418 return level;
419} 419}
420 420
421static uint32_t pp_dpm_get_sclk(void *handle, bool low) 421static uint32_t pp_dpm_get_sclk(void *handle, bool low)
422{ 422{
423 struct pp_hwmgr *hwmgr; 423 struct pp_hwmgr *hwmgr = handle;
424 struct pp_instance *pp_handle = (struct pp_instance *)handle;
425 int ret = 0; 424 int ret = 0;
426 uint32_t clk = 0; 425 uint32_t clk = 0;
427 426
428 ret = pp_check(pp_handle); 427 ret = pp_check(hwmgr);
429 428
430 if (ret) 429 if (ret)
431 return ret; 430 return ret;
432 431
433 hwmgr = pp_handle->hwmgr;
434
435 if (hwmgr->hwmgr_func->get_sclk == NULL) { 432 if (hwmgr->hwmgr_func->get_sclk == NULL) {
436 pr_info("%s was not implemented.\n", __func__); 433 pr_info("%s was not implemented.\n", __func__);
437 return 0; 434 return 0;
438 } 435 }
439 mutex_lock(&pp_handle->pp_lock); 436 mutex_lock(&hwmgr->smu_lock);
440 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low); 437 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
441 mutex_unlock(&pp_handle->pp_lock); 438 mutex_unlock(&hwmgr->smu_lock);
442 return clk; 439 return clk;
443} 440}
444 441
445static uint32_t pp_dpm_get_mclk(void *handle, bool low) 442static uint32_t pp_dpm_get_mclk(void *handle, bool low)
446{ 443{
447 struct pp_hwmgr *hwmgr; 444 struct pp_hwmgr *hwmgr = handle;
448 struct pp_instance *pp_handle = (struct pp_instance *)handle;
449 int ret = 0; 445 int ret = 0;
450 uint32_t clk = 0; 446 uint32_t clk = 0;
451 447
452 ret = pp_check(pp_handle); 448 ret = pp_check(hwmgr);
453 449
454 if (ret) 450 if (ret)
455 return ret; 451 return ret;
456 452
457 hwmgr = pp_handle->hwmgr;
458
459 if (hwmgr->hwmgr_func->get_mclk == NULL) { 453 if (hwmgr->hwmgr_func->get_mclk == NULL) {
460 pr_info("%s was not implemented.\n", __func__); 454 pr_info("%s was not implemented.\n", __func__);
461 return 0; 455 return 0;
462 } 456 }
463 mutex_lock(&pp_handle->pp_lock); 457 mutex_lock(&hwmgr->smu_lock);
464 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low); 458 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
465 mutex_unlock(&pp_handle->pp_lock); 459 mutex_unlock(&hwmgr->smu_lock);
466 return clk; 460 return clk;
467} 461}
468 462
469static void pp_dpm_powergate_vce(void *handle, bool gate) 463static void pp_dpm_powergate_vce(void *handle, bool gate)
470{ 464{
471 struct pp_hwmgr *hwmgr; 465 struct pp_hwmgr *hwmgr = handle;
472 struct pp_instance *pp_handle = (struct pp_instance *)handle;
473 int ret = 0; 466 int ret = 0;
474 467
475 ret = pp_check(pp_handle); 468 ret = pp_check(hwmgr);
476 469
477 if (ret) 470 if (ret)
478 return; 471 return;
479 472
480 hwmgr = pp_handle->hwmgr;
481
482 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 473 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
483 pr_info("%s was not implemented.\n", __func__); 474 pr_info("%s was not implemented.\n", __func__);
484 return; 475 return;
485 } 476 }
486 mutex_lock(&pp_handle->pp_lock); 477 mutex_lock(&hwmgr->smu_lock);
487 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); 478 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
488 mutex_unlock(&pp_handle->pp_lock); 479 mutex_unlock(&hwmgr->smu_lock);
489} 480}
490 481
491static void pp_dpm_powergate_uvd(void *handle, bool gate) 482static void pp_dpm_powergate_uvd(void *handle, bool gate)
492{ 483{
493 struct pp_hwmgr *hwmgr; 484 struct pp_hwmgr *hwmgr = handle;
494 struct pp_instance *pp_handle = (struct pp_instance *)handle;
495 int ret = 0; 485 int ret = 0;
496 486
497 ret = pp_check(pp_handle); 487 ret = pp_check(hwmgr);
498 488
499 if (ret) 489 if (ret)
500 return; 490 return;
501 491
502 hwmgr = pp_handle->hwmgr;
503
504 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 492 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
505 pr_info("%s was not implemented.\n", __func__); 493 pr_info("%s was not implemented.\n", __func__);
506 return; 494 return;
507 } 495 }
508 mutex_lock(&pp_handle->pp_lock); 496 mutex_lock(&hwmgr->smu_lock);
509 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); 497 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
510 mutex_unlock(&pp_handle->pp_lock); 498 mutex_unlock(&hwmgr->smu_lock);
511} 499}
512 500
513static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 501static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
514 void *input, void *output) 502 enum amd_pm_state_type *user_state)
515{ 503{
516 int ret = 0; 504 int ret = 0;
517 struct pp_instance *pp_handle = (struct pp_instance *)handle; 505 struct pp_hwmgr *hwmgr = handle;
518 506
519 ret = pp_check(pp_handle); 507 ret = pp_check(hwmgr);
520 508
521 if (ret) 509 if (ret)
522 return ret; 510 return ret;
523 511
524 mutex_lock(&pp_handle->pp_lock); 512 mutex_lock(&hwmgr->smu_lock);
525 ret = hwmgr_handle_task(pp_handle, task_id, input, output); 513 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
526 mutex_unlock(&pp_handle->pp_lock); 514 mutex_unlock(&hwmgr->smu_lock);
527 515
528 return ret; 516 return ret;
529} 517}
530 518
531static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) 519static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
532{ 520{
533 struct pp_hwmgr *hwmgr; 521 struct pp_hwmgr *hwmgr = handle;
534 struct pp_power_state *state; 522 struct pp_power_state *state;
535 struct pp_instance *pp_handle = (struct pp_instance *)handle;
536 int ret = 0; 523 int ret = 0;
537 enum amd_pm_state_type pm_type; 524 enum amd_pm_state_type pm_type;
538 525
539 ret = pp_check(pp_handle); 526 ret = pp_check(hwmgr);
540 527
541 if (ret) 528 if (ret)
542 return ret; 529 return ret;
543 530
544 hwmgr = pp_handle->hwmgr;
545
546 if (hwmgr->current_ps == NULL) 531 if (hwmgr->current_ps == NULL)
547 return -EINVAL; 532 return -EINVAL;
548 533
549 mutex_lock(&pp_handle->pp_lock); 534 mutex_lock(&hwmgr->smu_lock);
550 535
551 state = hwmgr->current_ps; 536 state = hwmgr->current_ps;
552 537
@@ -567,170 +552,129 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
567 pm_type = POWER_STATE_TYPE_DEFAULT; 552 pm_type = POWER_STATE_TYPE_DEFAULT;
568 break; 553 break;
569 } 554 }
570 mutex_unlock(&pp_handle->pp_lock); 555 mutex_unlock(&hwmgr->smu_lock);
571 556
572 return pm_type; 557 return pm_type;
573} 558}
574 559
575static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 560static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
576{ 561{
577 struct pp_hwmgr *hwmgr; 562 struct pp_hwmgr *hwmgr = handle;
578 struct pp_instance *pp_handle = (struct pp_instance *)handle;
579 int ret = 0; 563 int ret = 0;
580 564
581 ret = pp_check(pp_handle); 565 ret = pp_check(hwmgr);
582 566
583 if (ret) 567 if (ret)
584 return; 568 return;
585 569
586 hwmgr = pp_handle->hwmgr;
587
588 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { 570 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
589 pr_info("%s was not implemented.\n", __func__); 571 pr_info("%s was not implemented.\n", __func__);
590 return; 572 return;
591 } 573 }
592 mutex_lock(&pp_handle->pp_lock); 574 mutex_lock(&hwmgr->smu_lock);
593 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); 575 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
594 mutex_unlock(&pp_handle->pp_lock); 576 mutex_unlock(&hwmgr->smu_lock);
595} 577}
596 578
597static uint32_t pp_dpm_get_fan_control_mode(void *handle) 579static uint32_t pp_dpm_get_fan_control_mode(void *handle)
598{ 580{
599 struct pp_hwmgr *hwmgr; 581 struct pp_hwmgr *hwmgr = handle;
600 struct pp_instance *pp_handle = (struct pp_instance *)handle;
601 int ret = 0; 582 int ret = 0;
602 uint32_t mode = 0; 583 uint32_t mode = 0;
603 584
604 ret = pp_check(pp_handle); 585 ret = pp_check(hwmgr);
605 586
606 if (ret) 587 if (ret)
607 return ret; 588 return ret;
608 589
609 hwmgr = pp_handle->hwmgr;
610
611 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { 590 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
612 pr_info("%s was not implemented.\n", __func__); 591 pr_info("%s was not implemented.\n", __func__);
613 return 0; 592 return 0;
614 } 593 }
615 mutex_lock(&pp_handle->pp_lock); 594 mutex_lock(&hwmgr->smu_lock);
616 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); 595 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
617 mutex_unlock(&pp_handle->pp_lock); 596 mutex_unlock(&hwmgr->smu_lock);
618 return mode; 597 return mode;
619} 598}
620 599
621static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) 600static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
622{ 601{
623 struct pp_hwmgr *hwmgr; 602 struct pp_hwmgr *hwmgr = handle;
624 struct pp_instance *pp_handle = (struct pp_instance *)handle;
625 int ret = 0; 603 int ret = 0;
626 604
627 ret = pp_check(pp_handle); 605 ret = pp_check(hwmgr);
628 606
629 if (ret) 607 if (ret)
630 return ret; 608 return ret;
631 609
632 hwmgr = pp_handle->hwmgr;
633
634 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { 610 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
635 pr_info("%s was not implemented.\n", __func__); 611 pr_info("%s was not implemented.\n", __func__);
636 return 0; 612 return 0;
637 } 613 }
638 mutex_lock(&pp_handle->pp_lock); 614 mutex_lock(&hwmgr->smu_lock);
639 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent); 615 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
640 mutex_unlock(&pp_handle->pp_lock); 616 mutex_unlock(&hwmgr->smu_lock);
641 return ret; 617 return ret;
642} 618}
643 619
644static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) 620static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
645{ 621{
646 struct pp_hwmgr *hwmgr; 622 struct pp_hwmgr *hwmgr = handle;
647 struct pp_instance *pp_handle = (struct pp_instance *)handle;
648 int ret = 0; 623 int ret = 0;
649 624
650 ret = pp_check(pp_handle); 625 ret = pp_check(hwmgr);
651 626
652 if (ret) 627 if (ret)
653 return ret; 628 return ret;
654 629
655 hwmgr = pp_handle->hwmgr;
656
657 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { 630 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
658 pr_info("%s was not implemented.\n", __func__); 631 pr_info("%s was not implemented.\n", __func__);
659 return 0; 632 return 0;
660 } 633 }
661 634
662 mutex_lock(&pp_handle->pp_lock); 635 mutex_lock(&hwmgr->smu_lock);
663 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed); 636 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
664 mutex_unlock(&pp_handle->pp_lock); 637 mutex_unlock(&hwmgr->smu_lock);
665 return ret; 638 return ret;
666} 639}
667 640
668static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) 641static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
669{ 642{
670 struct pp_hwmgr *hwmgr; 643 struct pp_hwmgr *hwmgr = handle;
671 struct pp_instance *pp_handle = (struct pp_instance *)handle;
672 int ret = 0; 644 int ret = 0;
673 645
674 ret = pp_check(pp_handle); 646 ret = pp_check(hwmgr);
675 647
676 if (ret) 648 if (ret)
677 return ret; 649 return ret;
678 650
679 hwmgr = pp_handle->hwmgr;
680
681 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 651 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
682 return -EINVAL; 652 return -EINVAL;
683 653
684 mutex_lock(&pp_handle->pp_lock); 654 mutex_lock(&hwmgr->smu_lock);
685 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); 655 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
686 mutex_unlock(&pp_handle->pp_lock); 656 mutex_unlock(&hwmgr->smu_lock);
687 return ret;
688}
689
690static int pp_dpm_get_temperature(void *handle)
691{
692 struct pp_hwmgr *hwmgr;
693 struct pp_instance *pp_handle = (struct pp_instance *)handle;
694 int ret = 0;
695
696 ret = pp_check(pp_handle);
697
698 if (ret)
699 return ret;
700
701 hwmgr = pp_handle->hwmgr;
702
703 if (hwmgr->hwmgr_func->get_temperature == NULL) {
704 pr_info("%s was not implemented.\n", __func__);
705 return 0;
706 }
707 mutex_lock(&pp_handle->pp_lock);
708 ret = hwmgr->hwmgr_func->get_temperature(hwmgr);
709 mutex_unlock(&pp_handle->pp_lock);
710 return ret; 657 return ret;
711} 658}
712 659
713static int pp_dpm_get_pp_num_states(void *handle, 660static int pp_dpm_get_pp_num_states(void *handle,
714 struct pp_states_info *data) 661 struct pp_states_info *data)
715{ 662{
716 struct pp_hwmgr *hwmgr; 663 struct pp_hwmgr *hwmgr = handle;
717 int i; 664 int i;
718 struct pp_instance *pp_handle = (struct pp_instance *)handle;
719 int ret = 0; 665 int ret = 0;
720 666
721 memset(data, 0, sizeof(*data)); 667 memset(data, 0, sizeof(*data));
722 668
723 ret = pp_check(pp_handle); 669 ret = pp_check(hwmgr);
724 670
725 if (ret) 671 if (ret)
726 return ret; 672 return ret;
727 673
728 hwmgr = pp_handle->hwmgr;
729
730 if (hwmgr->ps == NULL) 674 if (hwmgr->ps == NULL)
731 return -EINVAL; 675 return -EINVAL;
732 676
733 mutex_lock(&pp_handle->pp_lock); 677 mutex_lock(&hwmgr->smu_lock);
734 678
735 data->nums = hwmgr->num_ps; 679 data->nums = hwmgr->num_ps;
736 680
@@ -754,73 +698,68 @@ static int pp_dpm_get_pp_num_states(void *handle,
754 data->states[i] = POWER_STATE_TYPE_DEFAULT; 698 data->states[i] = POWER_STATE_TYPE_DEFAULT;
755 } 699 }
756 } 700 }
757 mutex_unlock(&pp_handle->pp_lock); 701 mutex_unlock(&hwmgr->smu_lock);
758 return 0; 702 return 0;
759} 703}
760 704
761static int pp_dpm_get_pp_table(void *handle, char **table) 705static int pp_dpm_get_pp_table(void *handle, char **table)
762{ 706{
763 struct pp_hwmgr *hwmgr; 707 struct pp_hwmgr *hwmgr = handle;
764 struct pp_instance *pp_handle = (struct pp_instance *)handle;
765 int ret = 0; 708 int ret = 0;
766 int size = 0; 709 int size = 0;
767 710
768 ret = pp_check(pp_handle); 711 ret = pp_check(hwmgr);
769 712
770 if (ret) 713 if (ret)
771 return ret; 714 return ret;
772 715
773 hwmgr = pp_handle->hwmgr;
774
775 if (!hwmgr->soft_pp_table) 716 if (!hwmgr->soft_pp_table)
776 return -EINVAL; 717 return -EINVAL;
777 718
778 mutex_lock(&pp_handle->pp_lock); 719 mutex_lock(&hwmgr->smu_lock);
779 *table = (char *)hwmgr->soft_pp_table; 720 *table = (char *)hwmgr->soft_pp_table;
780 size = hwmgr->soft_pp_table_size; 721 size = hwmgr->soft_pp_table_size;
781 mutex_unlock(&pp_handle->pp_lock); 722 mutex_unlock(&hwmgr->smu_lock);
782 return size; 723 return size;
783} 724}
784 725
785static int amd_powerplay_reset(void *handle) 726static int amd_powerplay_reset(void *handle)
786{ 727{
787 struct pp_instance *instance = (struct pp_instance *)handle; 728 struct pp_hwmgr *hwmgr = handle;
788 int ret; 729 int ret;
789 730
790 ret = pp_check(instance); 731 ret = pp_check(hwmgr);
791 if (ret) 732 if (ret)
792 return ret; 733 return ret;
793 734
794 ret = pp_hw_fini(instance); 735 ret = pp_hw_fini(hwmgr);
795 if (ret) 736 if (ret)
796 return ret; 737 return ret;
797 738
798 ret = hwmgr_hw_init(instance); 739 ret = hwmgr_hw_init(hwmgr);
799 if (ret) 740 if (ret)
800 return ret; 741 return ret;
801 742
802 return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL); 743 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
803} 744}
804 745
805static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 746static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
806{ 747{
807 struct pp_hwmgr *hwmgr; 748 struct pp_hwmgr *hwmgr = handle;
808 struct pp_instance *pp_handle = (struct pp_instance *)handle;
809 int ret = 0; 749 int ret = 0;
810 750
811 ret = pp_check(pp_handle); 751 ret = pp_check(hwmgr);
812 752
813 if (ret) 753 if (ret)
814 return ret; 754 return ret;
815 755
816 hwmgr = pp_handle->hwmgr; 756 mutex_lock(&hwmgr->smu_lock);
817 mutex_lock(&pp_handle->pp_lock);
818 if (!hwmgr->hardcode_pp_table) { 757 if (!hwmgr->hardcode_pp_table) {
819 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 758 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
820 hwmgr->soft_pp_table_size, 759 hwmgr->soft_pp_table_size,
821 GFP_KERNEL); 760 GFP_KERNEL);
822 if (!hwmgr->hardcode_pp_table) { 761 if (!hwmgr->hardcode_pp_table) {
823 mutex_unlock(&pp_handle->pp_lock); 762 mutex_unlock(&hwmgr->smu_lock);
824 return -ENOMEM; 763 return -ENOMEM;
825 } 764 }
826 } 765 }
@@ -828,7 +767,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
828 memcpy(hwmgr->hardcode_pp_table, buf, size); 767 memcpy(hwmgr->hardcode_pp_table, buf, size);
829 768
830 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 769 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
831 mutex_unlock(&pp_handle->pp_lock); 770 mutex_unlock(&hwmgr->smu_lock);
832 771
833 ret = amd_powerplay_reset(handle); 772 ret = amd_powerplay_reset(handle);
834 if (ret) 773 if (ret)
@@ -846,317 +785,258 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
846static int pp_dpm_force_clock_level(void *handle, 785static int pp_dpm_force_clock_level(void *handle,
847 enum pp_clock_type type, uint32_t mask) 786 enum pp_clock_type type, uint32_t mask)
848{ 787{
849 struct pp_hwmgr *hwmgr; 788 struct pp_hwmgr *hwmgr = handle;
850 struct pp_instance *pp_handle = (struct pp_instance *)handle;
851 int ret = 0; 789 int ret = 0;
852 790
853 ret = pp_check(pp_handle); 791 ret = pp_check(hwmgr);
854 792
855 if (ret) 793 if (ret)
856 return ret; 794 return ret;
857 795
858 hwmgr = pp_handle->hwmgr;
859
860 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 796 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
861 pr_info("%s was not implemented.\n", __func__); 797 pr_info("%s was not implemented.\n", __func__);
862 return 0; 798 return 0;
863 } 799 }
864 mutex_lock(&pp_handle->pp_lock); 800 mutex_lock(&hwmgr->smu_lock);
865 hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); 801 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
866 mutex_unlock(&pp_handle->pp_lock); 802 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
803 else
804 ret = -EINVAL;
805 mutex_unlock(&hwmgr->smu_lock);
867 return ret; 806 return ret;
868} 807}
869 808
870static int pp_dpm_print_clock_levels(void *handle, 809static int pp_dpm_print_clock_levels(void *handle,
871 enum pp_clock_type type, char *buf) 810 enum pp_clock_type type, char *buf)
872{ 811{
873 struct pp_hwmgr *hwmgr; 812 struct pp_hwmgr *hwmgr = handle;
874 struct pp_instance *pp_handle = (struct pp_instance *)handle;
875 int ret = 0; 813 int ret = 0;
876 814
877 ret = pp_check(pp_handle); 815 ret = pp_check(hwmgr);
878 816
879 if (ret) 817 if (ret)
880 return ret; 818 return ret;
881 819
882 hwmgr = pp_handle->hwmgr;
883
884 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 820 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
885 pr_info("%s was not implemented.\n", __func__); 821 pr_info("%s was not implemented.\n", __func__);
886 return 0; 822 return 0;
887 } 823 }
888 mutex_lock(&pp_handle->pp_lock); 824 mutex_lock(&hwmgr->smu_lock);
889 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 825 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
890 mutex_unlock(&pp_handle->pp_lock); 826 mutex_unlock(&hwmgr->smu_lock);
891 return ret; 827 return ret;
892} 828}
893 829
894static int pp_dpm_get_sclk_od(void *handle) 830static int pp_dpm_get_sclk_od(void *handle)
895{ 831{
896 struct pp_hwmgr *hwmgr; 832 struct pp_hwmgr *hwmgr = handle;
897 struct pp_instance *pp_handle = (struct pp_instance *)handle;
898 int ret = 0; 833 int ret = 0;
899 834
900 ret = pp_check(pp_handle); 835 ret = pp_check(hwmgr);
901 836
902 if (ret) 837 if (ret)
903 return ret; 838 return ret;
904 839
905 hwmgr = pp_handle->hwmgr;
906
907 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 840 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
908 pr_info("%s was not implemented.\n", __func__); 841 pr_info("%s was not implemented.\n", __func__);
909 return 0; 842 return 0;
910 } 843 }
911 mutex_lock(&pp_handle->pp_lock); 844 mutex_lock(&hwmgr->smu_lock);
912 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr); 845 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
913 mutex_unlock(&pp_handle->pp_lock); 846 mutex_unlock(&hwmgr->smu_lock);
914 return ret; 847 return ret;
915} 848}
916 849
917static int pp_dpm_set_sclk_od(void *handle, uint32_t value) 850static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
918{ 851{
919 struct pp_hwmgr *hwmgr; 852 struct pp_hwmgr *hwmgr = handle;
920 struct pp_instance *pp_handle = (struct pp_instance *)handle;
921 int ret = 0; 853 int ret = 0;
922 854
923 ret = pp_check(pp_handle); 855 ret = pp_check(hwmgr);
924 856
925 if (ret) 857 if (ret)
926 return ret; 858 return ret;
927 859
928 hwmgr = pp_handle->hwmgr;
929
930 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 860 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
931 pr_info("%s was not implemented.\n", __func__); 861 pr_info("%s was not implemented.\n", __func__);
932 return 0; 862 return 0;
933 } 863 }
934 864
935 mutex_lock(&pp_handle->pp_lock); 865 mutex_lock(&hwmgr->smu_lock);
936 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 866 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
937 mutex_unlock(&pp_handle->pp_lock); 867 mutex_unlock(&hwmgr->smu_lock);
938 return ret; 868 return ret;
939} 869}
940 870
941static int pp_dpm_get_mclk_od(void *handle) 871static int pp_dpm_get_mclk_od(void *handle)
942{ 872{
943 struct pp_hwmgr *hwmgr; 873 struct pp_hwmgr *hwmgr = handle;
944 struct pp_instance *pp_handle = (struct pp_instance *)handle;
945 int ret = 0; 874 int ret = 0;
946 875
947 ret = pp_check(pp_handle); 876 ret = pp_check(hwmgr);
948 877
949 if (ret) 878 if (ret)
950 return ret; 879 return ret;
951 880
952 hwmgr = pp_handle->hwmgr;
953
954 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 881 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
955 pr_info("%s was not implemented.\n", __func__); 882 pr_info("%s was not implemented.\n", __func__);
956 return 0; 883 return 0;
957 } 884 }
958 mutex_lock(&pp_handle->pp_lock); 885 mutex_lock(&hwmgr->smu_lock);
959 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr); 886 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
960 mutex_unlock(&pp_handle->pp_lock); 887 mutex_unlock(&hwmgr->smu_lock);
961 return ret; 888 return ret;
962} 889}
963 890
964static int pp_dpm_set_mclk_od(void *handle, uint32_t value) 891static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
965{ 892{
966 struct pp_hwmgr *hwmgr; 893 struct pp_hwmgr *hwmgr = handle;
967 struct pp_instance *pp_handle = (struct pp_instance *)handle;
968 int ret = 0; 894 int ret = 0;
969 895
970 ret = pp_check(pp_handle); 896 ret = pp_check(hwmgr);
971 897
972 if (ret) 898 if (ret)
973 return ret; 899 return ret;
974 900
975 hwmgr = pp_handle->hwmgr;
976
977 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 901 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
978 pr_info("%s was not implemented.\n", __func__); 902 pr_info("%s was not implemented.\n", __func__);
979 return 0; 903 return 0;
980 } 904 }
981 mutex_lock(&pp_handle->pp_lock); 905 mutex_lock(&hwmgr->smu_lock);
982 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 906 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
983 mutex_unlock(&pp_handle->pp_lock); 907 mutex_unlock(&hwmgr->smu_lock);
984 return ret; 908 return ret;
985} 909}
986 910
987static int pp_dpm_read_sensor(void *handle, int idx, 911static int pp_dpm_read_sensor(void *handle, int idx,
988 void *value, int *size) 912 void *value, int *size)
989{ 913{
990 struct pp_hwmgr *hwmgr; 914 struct pp_hwmgr *hwmgr = handle;
991 struct pp_instance *pp_handle = (struct pp_instance *)handle;
992 int ret = 0; 915 int ret = 0;
993 916
994 ret = pp_check(pp_handle); 917 ret = pp_check(hwmgr);
995
996 if (ret) 918 if (ret)
997 return ret; 919 return ret;
998 920
999 hwmgr = pp_handle->hwmgr; 921 if (value == NULL)
922 return -EINVAL;
1000 923
1001 if (hwmgr->hwmgr_func->read_sensor == NULL) { 924 switch (idx) {
1002 pr_info("%s was not implemented.\n", __func__); 925 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
926 *((uint32_t *)value) = hwmgr->pstate_sclk;
927 return 0;
928 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
929 *((uint32_t *)value) = hwmgr->pstate_mclk;
1003 return 0; 930 return 0;
931 default:
932 mutex_lock(&hwmgr->smu_lock);
933 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
934 mutex_unlock(&hwmgr->smu_lock);
935 return ret;
1004 } 936 }
1005
1006 mutex_lock(&pp_handle->pp_lock);
1007 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
1008 mutex_unlock(&pp_handle->pp_lock);
1009
1010 return ret;
1011} 937}
1012 938
1013static struct amd_vce_state* 939static struct amd_vce_state*
1014pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 940pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
1015{ 941{
1016 struct pp_hwmgr *hwmgr; 942 struct pp_hwmgr *hwmgr = handle;
1017 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1018 int ret = 0; 943 int ret = 0;
1019 944
1020 ret = pp_check(pp_handle); 945 ret = pp_check(hwmgr);
1021 946
1022 if (ret) 947 if (ret)
1023 return NULL; 948 return NULL;
1024 949
1025 hwmgr = pp_handle->hwmgr;
1026
1027 if (hwmgr && idx < hwmgr->num_vce_state_tables) 950 if (hwmgr && idx < hwmgr->num_vce_state_tables)
1028 return &hwmgr->vce_states[idx]; 951 return &hwmgr->vce_states[idx];
1029 return NULL; 952 return NULL;
1030} 953}
1031 954
1032static int pp_dpm_reset_power_profile_state(void *handle, 955static int pp_get_power_profile_mode(void *handle, char *buf)
1033 struct amd_pp_profile *request)
1034{ 956{
1035 struct pp_hwmgr *hwmgr; 957 struct pp_hwmgr *hwmgr = handle;
1036 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1037 958
1038 if (!request || pp_check(pp_handle)) 959 if (!buf || pp_check(hwmgr))
1039 return -EINVAL; 960 return -EINVAL;
1040 961
1041 hwmgr = pp_handle->hwmgr; 962 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
1042
1043 if (hwmgr->hwmgr_func->set_power_profile_state == NULL) {
1044 pr_info("%s was not implemented.\n", __func__); 963 pr_info("%s was not implemented.\n", __func__);
1045 return 0; 964 return snprintf(buf, PAGE_SIZE, "\n");
1046 } 965 }
1047 966
1048 if (request->type == AMD_PP_GFX_PROFILE) { 967 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
1049 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
1050 return hwmgr->hwmgr_func->set_power_profile_state(hwmgr,
1051 &hwmgr->gfx_power_profile);
1052 } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
1053 hwmgr->compute_power_profile =
1054 hwmgr->default_compute_power_profile;
1055 return hwmgr->hwmgr_func->set_power_profile_state(hwmgr,
1056 &hwmgr->compute_power_profile);
1057 } else
1058 return -EINVAL;
1059} 968}
1060 969
1061static int pp_dpm_get_power_profile_state(void *handle, 970static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
1062 struct amd_pp_profile *query)
1063{ 971{
1064 struct pp_hwmgr *hwmgr; 972 struct pp_hwmgr *hwmgr = handle;
1065 struct pp_instance *pp_handle = (struct pp_instance *)handle; 973 int ret = -EINVAL;
1066 974
1067 if (!query || pp_check(pp_handle)) 975 if (pp_check(hwmgr))
1068 return -EINVAL; 976 return -EINVAL;
1069 977
1070 hwmgr = pp_handle->hwmgr; 978 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1071 979 pr_info("%s was not implemented.\n", __func__);
1072 if (query->type == AMD_PP_GFX_PROFILE)
1073 memcpy(query, &hwmgr->gfx_power_profile,
1074 sizeof(struct amd_pp_profile));
1075 else if (query->type == AMD_PP_COMPUTE_PROFILE)
1076 memcpy(query, &hwmgr->compute_power_profile,
1077 sizeof(struct amd_pp_profile));
1078 else
1079 return -EINVAL; 980 return -EINVAL;
1080 981 }
1081 return 0; 982 mutex_lock(&hwmgr->smu_lock);
983 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
984 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
985 mutex_unlock(&hwmgr->smu_lock);
986 return ret;
1082} 987}
1083 988
1084static int pp_dpm_set_power_profile_state(void *handle, 989static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
1085 struct amd_pp_profile *request)
1086{ 990{
1087 struct pp_hwmgr *hwmgr; 991 struct pp_hwmgr *hwmgr = handle;
1088 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1089 int ret = -1;
1090 992
1091 if (!request || pp_check(pp_handle)) 993 if (pp_check(hwmgr))
1092 return -EINVAL; 994 return -EINVAL;
1093 995
1094 hwmgr = pp_handle->hwmgr; 996 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
1095
1096 if (hwmgr->hwmgr_func->set_power_profile_state == NULL) {
1097 pr_info("%s was not implemented.\n", __func__); 997 pr_info("%s was not implemented.\n", __func__);
1098 return 0; 998 return -EINVAL;
1099 }
1100
1101 if (request->min_sclk ||
1102 request->min_mclk ||
1103 request->activity_threshold ||
1104 request->up_hyst ||
1105 request->down_hyst) {
1106 if (request->type == AMD_PP_GFX_PROFILE)
1107 memcpy(&hwmgr->gfx_power_profile, request,
1108 sizeof(struct amd_pp_profile));
1109 else if (request->type == AMD_PP_COMPUTE_PROFILE)
1110 memcpy(&hwmgr->compute_power_profile, request,
1111 sizeof(struct amd_pp_profile));
1112 else
1113 return -EINVAL;
1114
1115 if (request->type == hwmgr->current_power_profile)
1116 ret = hwmgr->hwmgr_func->set_power_profile_state(
1117 hwmgr,
1118 request);
1119 } else {
1120 /* set power profile if it exists */
1121 switch (request->type) {
1122 case AMD_PP_GFX_PROFILE:
1123 ret = hwmgr->hwmgr_func->set_power_profile_state(
1124 hwmgr,
1125 &hwmgr->gfx_power_profile);
1126 break;
1127 case AMD_PP_COMPUTE_PROFILE:
1128 ret = hwmgr->hwmgr_func->set_power_profile_state(
1129 hwmgr,
1130 &hwmgr->compute_power_profile);
1131 break;
1132 default:
1133 return -EINVAL;
1134 }
1135 } 999 }
1136 1000
1137 if (!ret) 1001 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
1138 hwmgr->current_power_profile = request->type;
1139
1140 return 0;
1141} 1002}
1142 1003
1143static int pp_dpm_switch_power_profile(void *handle, 1004static int pp_dpm_switch_power_profile(void *handle,
1144 enum amd_pp_profile_type type) 1005 enum PP_SMC_POWER_PROFILE type, bool en)
1145{ 1006{
1146 struct pp_hwmgr *hwmgr; 1007 struct pp_hwmgr *hwmgr = handle;
1147 struct amd_pp_profile request = {0}; 1008 long workload;
1148 struct pp_instance *pp_handle = (struct pp_instance *)handle; 1009 uint32_t index;
1010
1011 if (pp_check(hwmgr))
1012 return -EINVAL;
1013
1014 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1015 pr_info("%s was not implemented.\n", __func__);
1016 return -EINVAL;
1017 }
1149 1018
1150 if (pp_check(pp_handle)) 1019 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1151 return -EINVAL; 1020 return -EINVAL;
1152 1021
1153 hwmgr = pp_handle->hwmgr; 1022 mutex_lock(&hwmgr->smu_lock);
1154 1023
1155 if (hwmgr->current_power_profile != type) { 1024 if (!en) {
1156 request.type = type; 1025 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
1157 pp_dpm_set_power_profile_state(handle, &request); 1026 index = fls(hwmgr->workload_mask);
1027 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
1028 workload = hwmgr->workload_setting[index];
1029 } else {
1030 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
1031 index = fls(hwmgr->workload_mask);
1032 index = index <= Workload_Policy_Max ? index - 1 : 0;
1033 workload = hwmgr->workload_setting[index];
1158 } 1034 }
1159 1035
1036 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1037 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1038 mutex_unlock(&hwmgr->smu_lock);
1039
1160 return 0; 1040 return 0;
1161} 1041}
1162 1042
@@ -1167,29 +1047,79 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
1167 uint32_t mc_addr_hi, 1047 uint32_t mc_addr_hi,
1168 uint32_t size) 1048 uint32_t size)
1169{ 1049{
1170 struct pp_hwmgr *hwmgr; 1050 struct pp_hwmgr *hwmgr = handle;
1171 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1172 int ret = 0; 1051 int ret = 0;
1173 1052
1174 ret = pp_check(pp_handle); 1053 ret = pp_check(hwmgr);
1175 1054
1176 if (ret) 1055 if (ret)
1177 return ret; 1056 return ret;
1178 1057
1179 hwmgr = pp_handle->hwmgr;
1180
1181 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) { 1058 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
1182 pr_info("%s was not implemented.\n", __func__); 1059 pr_info("%s was not implemented.\n", __func__);
1183 return -EINVAL; 1060 return -EINVAL;
1184 } 1061 }
1185 1062
1186 mutex_lock(&pp_handle->pp_lock); 1063 mutex_lock(&hwmgr->smu_lock);
1187 1064
1188 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low, 1065 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
1189 virtual_addr_hi, mc_addr_low, mc_addr_hi, 1066 virtual_addr_hi, mc_addr_low, mc_addr_hi,
1190 size); 1067 size);
1191 1068
1192 mutex_unlock(&pp_handle->pp_lock); 1069 mutex_unlock(&hwmgr->smu_lock);
1070
1071 return ret;
1072}
1073
1074static int pp_set_power_limit(void *handle, uint32_t limit)
1075{
1076 struct pp_hwmgr *hwmgr = handle;
1077 int ret = 0;
1078
1079 ret = pp_check(hwmgr);
1080
1081 if (ret)
1082 return ret;
1083
1084 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1085 pr_info("%s was not implemented.\n", __func__);
1086 return -EINVAL;
1087 }
1088
1089 if (limit == 0)
1090 limit = hwmgr->default_power_limit;
1091
1092 if (limit > hwmgr->default_power_limit)
1093 return -EINVAL;
1094
1095 mutex_lock(&hwmgr->smu_lock);
1096 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1097 hwmgr->power_limit = limit;
1098 mutex_unlock(&hwmgr->smu_lock);
1099 return ret;
1100}
1101
1102static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1103{
1104 struct pp_hwmgr *hwmgr = handle;
1105 int ret = 0;
1106
1107 ret = pp_check(hwmgr);
1108
1109 if (ret)
1110 return ret;
1111
1112 if (limit == NULL)
1113 return -EINVAL;
1114
1115 mutex_lock(&hwmgr->smu_lock);
1116
1117 if (default_limit)
1118 *limit = hwmgr->default_power_limit;
1119 else
1120 *limit = hwmgr->power_limit;
1121
1122 mutex_unlock(&hwmgr->smu_lock);
1193 1123
1194 return ret; 1124 return ret;
1195} 1125}
@@ -1197,42 +1127,37 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
1197static int pp_display_configuration_change(void *handle, 1127static int pp_display_configuration_change(void *handle,
1198 const struct amd_pp_display_configuration *display_config) 1128 const struct amd_pp_display_configuration *display_config)
1199{ 1129{
1200 struct pp_hwmgr *hwmgr; 1130 struct pp_hwmgr *hwmgr = handle;
1201 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1202 int ret = 0; 1131 int ret = 0;
1203 1132
1204 ret = pp_check(pp_handle); 1133 ret = pp_check(hwmgr);
1205 1134
1206 if (ret) 1135 if (ret)
1207 return ret; 1136 return ret;
1208 1137
1209 hwmgr = pp_handle->hwmgr; 1138 mutex_lock(&hwmgr->smu_lock);
1210 mutex_lock(&pp_handle->pp_lock);
1211 phm_store_dal_configuration_data(hwmgr, display_config); 1139 phm_store_dal_configuration_data(hwmgr, display_config);
1212 mutex_unlock(&pp_handle->pp_lock); 1140 mutex_unlock(&hwmgr->smu_lock);
1213 return 0; 1141 return 0;
1214} 1142}
1215 1143
1216static int pp_get_display_power_level(void *handle, 1144static int pp_get_display_power_level(void *handle,
1217 struct amd_pp_simple_clock_info *output) 1145 struct amd_pp_simple_clock_info *output)
1218{ 1146{
1219 struct pp_hwmgr *hwmgr; 1147 struct pp_hwmgr *hwmgr = handle;
1220 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1221 int ret = 0; 1148 int ret = 0;
1222 1149
1223 ret = pp_check(pp_handle); 1150 ret = pp_check(hwmgr);
1224 1151
1225 if (ret) 1152 if (ret)
1226 return ret; 1153 return ret;
1227 1154
1228 hwmgr = pp_handle->hwmgr;
1229
1230 if (output == NULL) 1155 if (output == NULL)
1231 return -EINVAL; 1156 return -EINVAL;
1232 1157
1233 mutex_lock(&pp_handle->pp_lock); 1158 mutex_lock(&hwmgr->smu_lock);
1234 ret = phm_get_dal_power_level(hwmgr, output); 1159 ret = phm_get_dal_power_level(hwmgr, output);
1235 mutex_unlock(&pp_handle->pp_lock); 1160 mutex_unlock(&hwmgr->smu_lock);
1236 return ret; 1161 return ret;
1237} 1162}
1238 1163
@@ -1241,18 +1166,15 @@ static int pp_get_current_clocks(void *handle,
1241{ 1166{
1242 struct amd_pp_simple_clock_info simple_clocks; 1167 struct amd_pp_simple_clock_info simple_clocks;
1243 struct pp_clock_info hw_clocks; 1168 struct pp_clock_info hw_clocks;
1244 struct pp_hwmgr *hwmgr; 1169 struct pp_hwmgr *hwmgr = handle;
1245 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1246 int ret = 0; 1170 int ret = 0;
1247 1171
1248 ret = pp_check(pp_handle); 1172 ret = pp_check(hwmgr);
1249 1173
1250 if (ret) 1174 if (ret)
1251 return ret; 1175 return ret;
1252 1176
1253 hwmgr = pp_handle->hwmgr; 1177 mutex_lock(&hwmgr->smu_lock);
1254
1255 mutex_lock(&pp_handle->pp_lock);
1256 1178
1257 phm_get_dal_power_level(hwmgr, &simple_clocks); 1179 phm_get_dal_power_level(hwmgr, &simple_clocks);
1258 1180
@@ -1266,7 +1188,7 @@ static int pp_get_current_clocks(void *handle,
1266 1188
1267 if (ret) { 1189 if (ret) {
1268 pr_info("Error in phm_get_clock_info \n"); 1190 pr_info("Error in phm_get_clock_info \n");
1269 mutex_unlock(&pp_handle->pp_lock); 1191 mutex_unlock(&hwmgr->smu_lock);
1270 return -EINVAL; 1192 return -EINVAL;
1271 } 1193 }
1272 1194
@@ -1286,29 +1208,26 @@ static int pp_get_current_clocks(void *handle,
1286 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1208 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1287 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1209 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1288 } 1210 }
1289 mutex_unlock(&pp_handle->pp_lock); 1211 mutex_unlock(&hwmgr->smu_lock);
1290 return 0; 1212 return 0;
1291} 1213}
1292 1214
1293static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 1215static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1294{ 1216{
1295 struct pp_hwmgr *hwmgr; 1217 struct pp_hwmgr *hwmgr = handle;
1296 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1297 int ret = 0; 1218 int ret = 0;
1298 1219
1299 ret = pp_check(pp_handle); 1220 ret = pp_check(hwmgr);
1300 1221
1301 if (ret) 1222 if (ret)
1302 return ret; 1223 return ret;
1303 1224
1304 hwmgr = pp_handle->hwmgr;
1305
1306 if (clocks == NULL) 1225 if (clocks == NULL)
1307 return -EINVAL; 1226 return -EINVAL;
1308 1227
1309 mutex_lock(&pp_handle->pp_lock); 1228 mutex_lock(&hwmgr->smu_lock);
1310 ret = phm_get_clock_by_type(hwmgr, type, clocks); 1229 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1311 mutex_unlock(&pp_handle->pp_lock); 1230 mutex_unlock(&hwmgr->smu_lock);
1312 return ret; 1231 return ret;
1313} 1232}
1314 1233
@@ -1316,21 +1235,19 @@ static int pp_get_clock_by_type_with_latency(void *handle,
1316 enum amd_pp_clock_type type, 1235 enum amd_pp_clock_type type,
1317 struct pp_clock_levels_with_latency *clocks) 1236 struct pp_clock_levels_with_latency *clocks)
1318{ 1237{
1319 struct pp_hwmgr *hwmgr; 1238 struct pp_hwmgr *hwmgr = handle;
1320 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1321 int ret = 0; 1239 int ret = 0;
1322 1240
1323 ret = pp_check(pp_handle); 1241 ret = pp_check(hwmgr);
1324 if (ret) 1242 if (ret)
1325 return ret; 1243 return ret;
1326 1244
1327 if (!clocks) 1245 if (!clocks)
1328 return -EINVAL; 1246 return -EINVAL;
1329 1247
1330 mutex_lock(&pp_handle->pp_lock); 1248 mutex_lock(&hwmgr->smu_lock);
1331 hwmgr = ((struct pp_instance *)handle)->hwmgr;
1332 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks); 1249 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1333 mutex_unlock(&pp_handle->pp_lock); 1250 mutex_unlock(&hwmgr->smu_lock);
1334 return ret; 1251 return ret;
1335} 1252}
1336 1253
@@ -1338,47 +1255,41 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
1338 enum amd_pp_clock_type type, 1255 enum amd_pp_clock_type type,
1339 struct pp_clock_levels_with_voltage *clocks) 1256 struct pp_clock_levels_with_voltage *clocks)
1340{ 1257{
1341 struct pp_hwmgr *hwmgr; 1258 struct pp_hwmgr *hwmgr = handle;
1342 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1343 int ret = 0; 1259 int ret = 0;
1344 1260
1345 ret = pp_check(pp_handle); 1261 ret = pp_check(hwmgr);
1346 if (ret) 1262 if (ret)
1347 return ret; 1263 return ret;
1348 1264
1349 if (!clocks) 1265 if (!clocks)
1350 return -EINVAL; 1266 return -EINVAL;
1351 1267
1352 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1268 mutex_lock(&hwmgr->smu_lock);
1353
1354 mutex_lock(&pp_handle->pp_lock);
1355 1269
1356 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); 1270 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1357 1271
1358 mutex_unlock(&pp_handle->pp_lock); 1272 mutex_unlock(&hwmgr->smu_lock);
1359 return ret; 1273 return ret;
1360} 1274}
1361 1275
1362static int pp_set_watermarks_for_clocks_ranges(void *handle, 1276static int pp_set_watermarks_for_clocks_ranges(void *handle,
1363 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 1277 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1364{ 1278{
1365 struct pp_hwmgr *hwmgr; 1279 struct pp_hwmgr *hwmgr = handle;
1366 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1367 int ret = 0; 1280 int ret = 0;
1368 1281
1369 ret = pp_check(pp_handle); 1282 ret = pp_check(hwmgr);
1370 if (ret) 1283 if (ret)
1371 return ret; 1284 return ret;
1372 1285
1373 if (!wm_with_clock_ranges) 1286 if (!wm_with_clock_ranges)
1374 return -EINVAL; 1287 return -EINVAL;
1375 1288
1376 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1289 mutex_lock(&hwmgr->smu_lock);
1377
1378 mutex_lock(&pp_handle->pp_lock);
1379 ret = phm_set_watermarks_for_clocks_ranges(hwmgr, 1290 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1380 wm_with_clock_ranges); 1291 wm_with_clock_ranges);
1381 mutex_unlock(&pp_handle->pp_lock); 1292 mutex_unlock(&hwmgr->smu_lock);
1382 1293
1383 return ret; 1294 return ret;
1384} 1295}
@@ -1386,22 +1297,19 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
1386static int pp_display_clock_voltage_request(void *handle, 1297static int pp_display_clock_voltage_request(void *handle,
1387 struct pp_display_clock_request *clock) 1298 struct pp_display_clock_request *clock)
1388{ 1299{
1389 struct pp_hwmgr *hwmgr; 1300 struct pp_hwmgr *hwmgr = handle;
1390 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1391 int ret = 0; 1301 int ret = 0;
1392 1302
1393 ret = pp_check(pp_handle); 1303 ret = pp_check(hwmgr);
1394 if (ret) 1304 if (ret)
1395 return ret; 1305 return ret;
1396 1306
1397 if (!clock) 1307 if (!clock)
1398 return -EINVAL; 1308 return -EINVAL;
1399 1309
1400 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1310 mutex_lock(&hwmgr->smu_lock);
1401
1402 mutex_lock(&pp_handle->pp_lock);
1403 ret = phm_display_clock_voltage_request(hwmgr, clock); 1311 ret = phm_display_clock_voltage_request(hwmgr, clock);
1404 mutex_unlock(&pp_handle->pp_lock); 1312 mutex_unlock(&hwmgr->smu_lock);
1405 1313
1406 return ret; 1314 return ret;
1407} 1315}
@@ -1409,31 +1317,45 @@ static int pp_display_clock_voltage_request(void *handle,
1409static int pp_get_display_mode_validation_clocks(void *handle, 1317static int pp_get_display_mode_validation_clocks(void *handle,
1410 struct amd_pp_simple_clock_info *clocks) 1318 struct amd_pp_simple_clock_info *clocks)
1411{ 1319{
1412 struct pp_hwmgr *hwmgr; 1320 struct pp_hwmgr *hwmgr = handle;
1413 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1414 int ret = 0; 1321 int ret = 0;
1415 1322
1416 ret = pp_check(pp_handle); 1323 ret = pp_check(hwmgr);
1417 1324
1418 if (ret) 1325 if (ret)
1419 return ret; 1326 return ret;
1420 1327
1421 hwmgr = pp_handle->hwmgr;
1422
1423 if (clocks == NULL) 1328 if (clocks == NULL)
1424 return -EINVAL; 1329 return -EINVAL;
1425 1330
1426 mutex_lock(&pp_handle->pp_lock); 1331 mutex_lock(&hwmgr->smu_lock);
1427 1332
1428 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1333 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1429 ret = phm_get_max_high_clocks(hwmgr, clocks); 1334 ret = phm_get_max_high_clocks(hwmgr, clocks);
1430 1335
1431 mutex_unlock(&pp_handle->pp_lock); 1336 mutex_unlock(&hwmgr->smu_lock);
1432 return ret; 1337 return ret;
1433} 1338}
1434 1339
1435const struct amd_pm_funcs pp_dpm_funcs = { 1340static int pp_set_mmhub_powergating_by_smu(void *handle)
1436 .get_temperature = pp_dpm_get_temperature, 1341{
1342 struct pp_hwmgr *hwmgr = handle;
1343 int ret = 0;
1344
1345 ret = pp_check(hwmgr);
1346
1347 if (ret)
1348 return ret;
1349
1350 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
1351 pr_info("%s was not implemented.\n", __func__);
1352 return 0;
1353 }
1354
1355 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
1356}
1357
1358static const struct amd_pm_funcs pp_dpm_funcs = {
1437 .load_firmware = pp_dpm_load_fw, 1359 .load_firmware = pp_dpm_load_fw,
1438 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1360 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1439 .force_performance_level = pp_dpm_force_performance_level, 1361 .force_performance_level = pp_dpm_force_performance_level,
@@ -1458,12 +1380,14 @@ const struct amd_pm_funcs pp_dpm_funcs = {
1458 .set_mclk_od = pp_dpm_set_mclk_od, 1380 .set_mclk_od = pp_dpm_set_mclk_od,
1459 .read_sensor = pp_dpm_read_sensor, 1381 .read_sensor = pp_dpm_read_sensor,
1460 .get_vce_clock_state = pp_dpm_get_vce_clock_state, 1382 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1461 .reset_power_profile_state = pp_dpm_reset_power_profile_state,
1462 .get_power_profile_state = pp_dpm_get_power_profile_state,
1463 .set_power_profile_state = pp_dpm_set_power_profile_state,
1464 .switch_power_profile = pp_dpm_switch_power_profile, 1383 .switch_power_profile = pp_dpm_switch_power_profile,
1465 .set_clockgating_by_smu = pp_set_clockgating_by_smu, 1384 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1466 .notify_smu_memory_info = pp_dpm_notify_smu_memory_info, 1385 .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1386 .get_power_profile_mode = pp_get_power_profile_mode,
1387 .set_power_profile_mode = pp_set_power_profile_mode,
1388 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1389 .set_power_limit = pp_set_power_limit,
1390 .get_power_limit = pp_get_power_limit,
1467/* export to DC */ 1391/* export to DC */
1468 .get_sclk = pp_dpm_get_sclk, 1392 .get_sclk = pp_dpm_get_sclk,
1469 .get_mclk = pp_dpm_get_mclk, 1393 .get_mclk = pp_dpm_get_mclk,
@@ -1476,4 +1400,5 @@ const struct amd_pm_funcs pp_dpm_funcs = {
1476 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges, 1400 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1477 .display_clock_voltage_request = pp_display_clock_voltage_request, 1401 .display_clock_voltage_request = pp_display_clock_voltage_request,
1478 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, 1402 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1403 .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1479}; 1404};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index a212c27f2e17..f868b955da92 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -24,14 +24,14 @@
24# It provides the hardware management services for the driver. 24# It provides the hardware management services for the driver.
25 25
26HARDWARE_MGR = hwmgr.o processpptables.o \ 26HARDWARE_MGR = hwmgr.o processpptables.o \
27 hardwaremanager.o pp_acpi.o cz_hwmgr.o \ 27 hardwaremanager.o smu8_hwmgr.o \
28 cz_clockpowergating.o pppcielanes.o\ 28 pppcielanes.o\
29 process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \ 29 process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
30 smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ 30 smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
31 smu7_clockpowergating.o \ 31 smu7_clockpowergating.o \
32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ 32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
33 vega10_thermal.o rv_hwmgr.o pp_psm.o\ 33 vega10_thermal.o smu10_hwmgr.o pp_psm.o\
34 pp_overdriver.o 34 pp_overdriver.o smu_helper.o
35 35
36AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 36AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
37 37
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
deleted file mode 100644
index 44de0874629f..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "cz_clockpowergating.h"
26#include "cz_ppsmc.h"
27
28/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
29 0 GFX0L (3:0), (27:24),
30 1 GFX0H (7:4), (31:28),
31 2 GFX1L (3:0), (19:16),
32 3 GFX1H (7:4), (23:20),
33 4 DDIL (3:0), (11: 8),
34 5 DDIH (7:4), (15:12),
35 6 DDI2L (3:0), ( 3: 0),
36 7 DDI2H (7:4), ( 7: 4),
37*/
38#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
39#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
40
41
42int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
43{
44 int ret = 0;
45
46 switch (block) {
47 case PHM_AsicBlock_UVD_MVC:
48 case PHM_AsicBlock_UVD:
49 case PHM_AsicBlock_UVD_HD:
50 case PHM_AsicBlock_UVD_SD:
51 if (gating == PHM_ClockGateSetting_StaticOff)
52 ret = cz_dpm_powerdown_uvd(hwmgr);
53 else
54 ret = cz_dpm_powerup_uvd(hwmgr);
55 break;
56 case PHM_AsicBlock_GFX:
57 default:
58 break;
59 }
60
61 return ret;
62}
63
64
65bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
66{
67 return true;
68}
69
70
71int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
72{
73 return 0;
74}
75
76int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
77{
78 /* TODO */
79 return 0;
80}
81
82int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
83{
84 /* TODO */
85 return 0;
86}
87
88int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
89{
90 /* TODO */
91 return 0;
92}
93
94int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
95{
96 /* TODO */
97 return 0;
98}
99
100int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
101{
102 /* TODO */
103 return 0;
104}
105
106int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
107{
108 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
109 uint32_t dpm_features = 0;
110
111 if (enable &&
112 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
113 PHM_PlatformCaps_UVDDPM)) {
114 cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
115 dpm_features |= UVD_DPM_MASK;
116 smum_send_msg_to_smc_with_parameter(hwmgr,
117 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
118 } else {
119 dpm_features |= UVD_DPM_MASK;
120 cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
121 smum_send_msg_to_smc_with_parameter(hwmgr,
122 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
123 }
124 return 0;
125}
126
127int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
128{
129 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
130 uint32_t dpm_features = 0;
131
132 if (enable && phm_cap_enabled(
133 hwmgr->platform_descriptor.platformCaps,
134 PHM_PlatformCaps_VCEDPM)) {
135 cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
136 dpm_features |= VCE_DPM_MASK;
137 smum_send_msg_to_smc_with_parameter(hwmgr,
138 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
139 } else {
140 dpm_features |= VCE_DPM_MASK;
141 cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
142 smum_send_msg_to_smc_with_parameter(hwmgr,
143 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
144 }
145
146 return 0;
147}
148
149
150void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
151{
152 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
153
154 cz_hwmgr->uvd_power_gated = bgate;
155
156 if (bgate) {
157 cgs_set_powergating_state(hwmgr->device,
158 AMD_IP_BLOCK_TYPE_UVD,
159 AMD_PG_STATE_GATE);
160 cgs_set_clockgating_state(hwmgr->device,
161 AMD_IP_BLOCK_TYPE_UVD,
162 AMD_CG_STATE_GATE);
163 cz_dpm_update_uvd_dpm(hwmgr, true);
164 cz_dpm_powerdown_uvd(hwmgr);
165 } else {
166 cz_dpm_powerup_uvd(hwmgr);
167 cgs_set_clockgating_state(hwmgr->device,
168 AMD_IP_BLOCK_TYPE_UVD,
169 AMD_PG_STATE_UNGATE);
170 cgs_set_powergating_state(hwmgr->device,
171 AMD_IP_BLOCK_TYPE_UVD,
172 AMD_CG_STATE_UNGATE);
173 cz_dpm_update_uvd_dpm(hwmgr, false);
174 }
175
176}
177
178void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
179{
180 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
181
182 if (bgate) {
183 cgs_set_powergating_state(
184 hwmgr->device,
185 AMD_IP_BLOCK_TYPE_VCE,
186 AMD_PG_STATE_GATE);
187 cgs_set_clockgating_state(
188 hwmgr->device,
189 AMD_IP_BLOCK_TYPE_VCE,
190 AMD_CG_STATE_GATE);
191 cz_enable_disable_vce_dpm(hwmgr, false);
192 cz_dpm_powerdown_vce(hwmgr);
193 cz_hwmgr->vce_power_gated = true;
194 } else {
195 cz_dpm_powerup_vce(hwmgr);
196 cz_hwmgr->vce_power_gated = false;
197 cgs_set_clockgating_state(
198 hwmgr->device,
199 AMD_IP_BLOCK_TYPE_VCE,
200 AMD_PG_STATE_UNGATE);
201 cgs_set_powergating_state(
202 hwmgr->device,
203 AMD_IP_BLOCK_TYPE_VCE,
204 AMD_CG_STATE_UNGATE);
205 cz_dpm_update_vce_dpm(hwmgr);
206 cz_enable_disable_vce_dpm(hwmgr, true);
207 }
208}
209
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
deleted file mode 100644
index 92f707bc46e7..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _CZ_CLOCK_POWER_GATING_H_
25#define _CZ_CLOCK_POWER_GATING_H_
26
27#include "cz_hwmgr.h"
28#include "pp_asicblocks.h"
29
30extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
31extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
32extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
33extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
34extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
35extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
36#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 2b0c53fe4c8d..b784131d0f87 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -79,6 +79,11 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
79 bool enabled; 79 bool enabled;
80 PHM_FUNC_CHECK(hwmgr); 80 PHM_FUNC_CHECK(hwmgr);
81 81
82 if (smum_is_dpm_running(hwmgr)) {
83 pr_info("dpm has been enabled\n");
84 return 0;
85 }
86
82 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) 87 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
83 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); 88 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
84 89
@@ -96,6 +101,11 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
96 101
97 PHM_FUNC_CHECK(hwmgr); 102 PHM_FUNC_CHECK(hwmgr);
98 103
104 if (!smum_is_dpm_running(hwmgr)) {
105 pr_info("dpm has been disabled\n");
106 return 0;
107 }
108
99 if (hwmgr->hwmgr_func->dynamic_state_management_disable) 109 if (hwmgr->hwmgr_func->dynamic_state_management_disable)
100 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); 110 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
101 111
@@ -118,23 +128,6 @@ int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level
118 return ret; 128 return ret;
119} 129}
120 130
121int phm_reset_power_profile_state(struct pp_hwmgr *hwmgr)
122{
123 int ret = 0;
124
125 if (hwmgr->hwmgr_func->set_power_profile_state) {
126 if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE)
127 ret = hwmgr->hwmgr_func->set_power_profile_state(
128 hwmgr,
129 &hwmgr->gfx_power_profile);
130 else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE)
131 ret = hwmgr->hwmgr_func->set_power_profile_state(
132 hwmgr,
133 &hwmgr->compute_power_profile);
134 }
135 return ret;
136}
137
138int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 131int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
139 struct pp_power_state *adjusted_ps, 132 struct pp_power_state *adjusted_ps,
140 const struct pp_power_state *current_ps) 133 const struct pp_power_state *current_ps)
@@ -223,26 +216,27 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
223* Initializes the thermal controller subsystem. 216* Initializes the thermal controller subsystem.
224* 217*
225* @param pHwMgr the address of the powerplay hardware manager. 218* @param pHwMgr the address of the powerplay hardware manager.
226* @param pTemperatureRange the address of the structure holding the temperature range.
227* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher. 219* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
228*/ 220*/
229int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range) 221int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
230{ 222{
231 struct PP_TemperatureRange range; 223 int ret = 0;
232 224 struct PP_TemperatureRange range = {TEMP_RANGE_MIN, TEMP_RANGE_MAX};
233 if (temperature_range == NULL) { 225 struct amdgpu_device *adev = hwmgr->adev;
234 range.max = TEMP_RANGE_MAX; 226
235 range.min = TEMP_RANGE_MIN; 227 if (hwmgr->hwmgr_func->get_thermal_temperature_range)
236 } else { 228 hwmgr->hwmgr_func->get_thermal_temperature_range(
237 range.max = temperature_range->max; 229 hwmgr, &range);
238 range.min = temperature_range->min; 230
239 }
240 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 231 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
241 PHM_PlatformCaps_ThermalController) 232 PHM_PlatformCaps_ThermalController)
242 && hwmgr->hwmgr_func->start_thermal_controller != NULL) 233 && hwmgr->hwmgr_func->start_thermal_controller != NULL)
243 return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range); 234 ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
244 235
245 return 0; 236 adev->pm.dpm.thermal.min_temp = range.min;
237 adev->pm.dpm.thermal.max_temp = range.max;
238
239 return ret;
246} 240}
247 241
248 242
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 0229f774f7a9..229030027f3e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,23 +30,24 @@
30#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
31#include "power_state.h" 31#include "power_state.h"
32#include "hwmgr.h" 32#include "hwmgr.h"
33#include "pppcielanes.h"
34#include "ppatomctrl.h"
35#include "ppsmc.h" 33#include "ppsmc.h"
36#include "pp_acpi.h"
37#include "amd_acpi.h" 34#include "amd_acpi.h"
38#include "pp_psm.h" 35#include "pp_psm.h"
39 36
40extern const struct pp_smumgr_func ci_smu_funcs; 37extern const struct pp_smumgr_func ci_smu_funcs;
41extern const struct pp_smumgr_func cz_smu_funcs; 38extern const struct pp_smumgr_func smu8_smu_funcs;
42extern const struct pp_smumgr_func iceland_smu_funcs; 39extern const struct pp_smumgr_func iceland_smu_funcs;
43extern const struct pp_smumgr_func tonga_smu_funcs; 40extern const struct pp_smumgr_func tonga_smu_funcs;
44extern const struct pp_smumgr_func fiji_smu_funcs; 41extern const struct pp_smumgr_func fiji_smu_funcs;
45extern const struct pp_smumgr_func polaris10_smu_funcs; 42extern const struct pp_smumgr_func polaris10_smu_funcs;
46extern const struct pp_smumgr_func vega10_smu_funcs; 43extern const struct pp_smumgr_func vega10_smu_funcs;
47extern const struct pp_smumgr_func rv_smu_funcs; 44extern const struct pp_smumgr_func smu10_smu_funcs;
45
46extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
47extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
48extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
49extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
48 50
49extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
50static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); 51static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
51static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); 52static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
52static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); 53static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
@@ -55,35 +56,16 @@ static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
55static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr); 56static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
56static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr); 57static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
57 58
58uint8_t convert_to_vid(uint16_t vddc)
59{
60 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
61}
62
63static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr,
64 struct cgs_system_info *sys_info)
65{
66 sys_info->size = sizeof(struct cgs_system_info);
67 sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN;
68
69 return cgs_query_system_info(hwmgr->device, sys_info);
70}
71
72static int phm_thermal_l2h_irq(void *private_data, 59static int phm_thermal_l2h_irq(void *private_data,
73 unsigned src_id, const uint32_t *iv_entry) 60 unsigned src_id, const uint32_t *iv_entry)
74{ 61{
75 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; 62 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
76 struct cgs_system_info sys_info = {0}; 63 struct amdgpu_device *adev = hwmgr->adev;
77 int result;
78
79 result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
80 if (result)
81 return -EINVAL;
82 64
83 pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n", 65 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
84 PCI_BUS_NUM(sys_info.value), 66 PCI_BUS_NUM(adev->pdev->devfn),
85 PCI_SLOT(sys_info.value), 67 PCI_SLOT(adev->pdev->devfn),
86 PCI_FUNC(sys_info.value)); 68 PCI_FUNC(adev->pdev->devfn));
87 return 0; 69 return 0;
88} 70}
89 71
@@ -91,17 +73,12 @@ static int phm_thermal_h2l_irq(void *private_data,
91 unsigned src_id, const uint32_t *iv_entry) 73 unsigned src_id, const uint32_t *iv_entry)
92{ 74{
93 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; 75 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
94 struct cgs_system_info sys_info = {0}; 76 struct amdgpu_device *adev = hwmgr->adev;
95 int result;
96 77
97 result = phm_get_pci_bus_devfn(hwmgr, &sys_info); 78 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
98 if (result) 79 PCI_BUS_NUM(adev->pdev->devfn),
99 return -EINVAL; 80 PCI_SLOT(adev->pdev->devfn),
100 81 PCI_FUNC(adev->pdev->devfn));
101 pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n",
102 PCI_BUS_NUM(sys_info.value),
103 PCI_SLOT(sys_info.value),
104 PCI_FUNC(sys_info.value));
105 return 0; 82 return 0;
106} 83}
107 84
@@ -109,17 +86,12 @@ static int phm_ctf_irq(void *private_data,
109 unsigned src_id, const uint32_t *iv_entry) 86 unsigned src_id, const uint32_t *iv_entry)
110{ 87{
111 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; 88 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
112 struct cgs_system_info sys_info = {0}; 89 struct amdgpu_device *adev = hwmgr->adev;
113 int result;
114
115 result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
116 if (result)
117 return -EINVAL;
118 90
119 pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n", 91 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
120 PCI_BUS_NUM(sys_info.value), 92 PCI_BUS_NUM(adev->pdev->devfn),
121 PCI_SLOT(sys_info.value), 93 PCI_SLOT(adev->pdev->devfn),
122 PCI_FUNC(sys_info.value)); 94 PCI_FUNC(adev->pdev->devfn));
123 return 0; 95 return 0;
124} 96}
125 97
@@ -129,22 +101,26 @@ static const struct cgs_irq_src_funcs thermal_irq_src[3] = {
129 { .handler = phm_ctf_irq } 101 { .handler = phm_ctf_irq }
130}; 102};
131 103
132int hwmgr_early_init(struct pp_instance *handle) 104static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
133{ 105{
134 struct pp_hwmgr *hwmgr; 106 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
107 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
108 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
109 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
110 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
135 111
136 if (handle == NULL) 112 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
137 return -EINVAL; 113 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
114 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
115 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
116 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
117}
138 118
139 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); 119int hwmgr_early_init(struct pp_hwmgr *hwmgr)
120{
140 if (hwmgr == NULL) 121 if (hwmgr == NULL)
141 return -ENOMEM; 122 return -EINVAL;
142 123
143 handle->hwmgr = hwmgr;
144 hwmgr->device = handle->device;
145 hwmgr->chip_family = handle->chip_family;
146 hwmgr->chip_id = handle->chip_id;
147 hwmgr->feature_mask = handle->feature_mask;
148 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; 124 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
149 hwmgr->power_source = PP_PowerSource_AC; 125 hwmgr->power_source = PP_PowerSource_AC;
150 hwmgr->pp_table_version = PP_TABLE_V1; 126 hwmgr->pp_table_version = PP_TABLE_V1;
@@ -154,6 +130,7 @@ int hwmgr_early_init(struct pp_instance *handle)
154 hwmgr_set_user_specify_caps(hwmgr); 130 hwmgr_set_user_specify_caps(hwmgr);
155 hwmgr->fan_ctrl_is_in_default_mode = true; 131 hwmgr->fan_ctrl_is_in_default_mode = true;
156 hwmgr->reload_fw = 1; 132 hwmgr->reload_fw = 1;
133 hwmgr_init_workload_prority(hwmgr);
157 134
158 switch (hwmgr->chip_family) { 135 switch (hwmgr->chip_family) {
159 case AMDGPU_FAMILY_CI: 136 case AMDGPU_FAMILY_CI:
@@ -162,11 +139,13 @@ int hwmgr_early_init(struct pp_instance *handle)
162 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | 139 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
163 PP_ENABLE_GFX_CG_THRU_SMU); 140 PP_ENABLE_GFX_CG_THRU_SMU);
164 hwmgr->pp_table_version = PP_TABLE_V0; 141 hwmgr->pp_table_version = PP_TABLE_V0;
142 hwmgr->od_enabled = false;
165 smu7_init_function_pointers(hwmgr); 143 smu7_init_function_pointers(hwmgr);
166 break; 144 break;
167 case AMDGPU_FAMILY_CZ: 145 case AMDGPU_FAMILY_CZ:
168 hwmgr->smumgr_funcs = &cz_smu_funcs; 146 hwmgr->od_enabled = false;
169 cz_init_function_pointers(hwmgr); 147 hwmgr->smumgr_funcs = &smu8_smu_funcs;
148 smu8_init_function_pointers(hwmgr);
170 break; 149 break;
171 case AMDGPU_FAMILY_VI: 150 case AMDGPU_FAMILY_VI:
172 switch (hwmgr->chip_id) { 151 switch (hwmgr->chip_id) {
@@ -176,6 +155,7 @@ int hwmgr_early_init(struct pp_instance *handle)
176 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | 155 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
177 PP_ENABLE_GFX_CG_THRU_SMU); 156 PP_ENABLE_GFX_CG_THRU_SMU);
178 hwmgr->pp_table_version = PP_TABLE_V0; 157 hwmgr->pp_table_version = PP_TABLE_V0;
158 hwmgr->od_enabled = false;
179 break; 159 break;
180 case CHIP_TONGA: 160 case CHIP_TONGA:
181 hwmgr->smumgr_funcs = &tonga_smu_funcs; 161 hwmgr->smumgr_funcs = &tonga_smu_funcs;
@@ -213,8 +193,9 @@ int hwmgr_early_init(struct pp_instance *handle)
213 case AMDGPU_FAMILY_RV: 193 case AMDGPU_FAMILY_RV:
214 switch (hwmgr->chip_id) { 194 switch (hwmgr->chip_id) {
215 case CHIP_RAVEN: 195 case CHIP_RAVEN:
216 hwmgr->smumgr_funcs = &rv_smu_funcs; 196 hwmgr->od_enabled = false;
217 rv_init_function_pointers(hwmgr); 197 hwmgr->smumgr_funcs = &smu10_smu_funcs;
198 smu10_init_function_pointers(hwmgr);
218 break; 199 break;
219 default: 200 default:
220 return -EINVAL; 201 return -EINVAL;
@@ -227,16 +208,13 @@ int hwmgr_early_init(struct pp_instance *handle)
227 return 0; 208 return 0;
228} 209}
229 210
230int hwmgr_hw_init(struct pp_instance *handle) 211int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
231{ 212{
232 struct pp_hwmgr *hwmgr;
233 int ret = 0; 213 int ret = 0;
234 214
235 if (handle == NULL) 215 if (hwmgr == NULL)
236 return -EINVAL; 216 return -EINVAL;
237 217
238 hwmgr = handle->hwmgr;
239
240 if (hwmgr->pptable_func == NULL || 218 if (hwmgr->pptable_func == NULL ||
241 hwmgr->pptable_func->pptable_init == NULL || 219 hwmgr->pptable_func->pptable_init == NULL ||
242 hwmgr->hwmgr_func->backend_init == NULL) 220 hwmgr->hwmgr_func->backend_init == NULL)
@@ -261,7 +239,7 @@ int hwmgr_hw_init(struct pp_instance *handle)
261 ret = phm_enable_dynamic_state_management(hwmgr); 239 ret = phm_enable_dynamic_state_management(hwmgr);
262 if (ret) 240 if (ret)
263 goto err2; 241 goto err2;
264 ret = phm_start_thermal_controller(hwmgr, NULL); 242 ret = phm_start_thermal_controller(hwmgr);
265 ret |= psm_set_performance_states(hwmgr); 243 ret |= psm_set_performance_states(hwmgr);
266 if (ret) 244 if (ret)
267 goto err2; 245 goto err2;
@@ -282,15 +260,11 @@ err:
282 return ret; 260 return ret;
283} 261}
284 262
285int hwmgr_hw_fini(struct pp_instance *handle) 263int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
286{ 264{
287 struct pp_hwmgr *hwmgr; 265 if (hwmgr == NULL)
288
289 if (handle == NULL || handle->hwmgr == NULL)
290 return -EINVAL; 266 return -EINVAL;
291 267
292 hwmgr = handle->hwmgr;
293
294 phm_stop_thermal_controller(hwmgr); 268 phm_stop_thermal_controller(hwmgr);
295 psm_set_boot_states(hwmgr); 269 psm_set_boot_states(hwmgr);
296 psm_adjust_power_state_dynamic(hwmgr, false, NULL); 270 psm_adjust_power_state_dynamic(hwmgr, false, NULL);
@@ -304,15 +278,13 @@ int hwmgr_hw_fini(struct pp_instance *handle)
304 return psm_fini_power_state_table(hwmgr); 278 return psm_fini_power_state_table(hwmgr);
305} 279}
306 280
307int hwmgr_hw_suspend(struct pp_instance *handle) 281int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
308{ 282{
309 struct pp_hwmgr *hwmgr;
310 int ret = 0; 283 int ret = 0;
311 284
312 if (handle == NULL || handle->hwmgr == NULL) 285 if (hwmgr == NULL)
313 return -EINVAL; 286 return -EINVAL;
314 287
315 hwmgr = handle->hwmgr;
316 phm_disable_smc_firmware_ctf(hwmgr); 288 phm_disable_smc_firmware_ctf(hwmgr);
317 ret = psm_set_boot_states(hwmgr); 289 ret = psm_set_boot_states(hwmgr);
318 if (ret) 290 if (ret)
@@ -325,15 +297,13 @@ int hwmgr_hw_suspend(struct pp_instance *handle)
325 return ret; 297 return ret;
326} 298}
327 299
328int hwmgr_hw_resume(struct pp_instance *handle) 300int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
329{ 301{
330 struct pp_hwmgr *hwmgr;
331 int ret = 0; 302 int ret = 0;
332 303
333 if (handle == NULL || handle->hwmgr == NULL) 304 if (hwmgr == NULL)
334 return -EINVAL; 305 return -EINVAL;
335 306
336 hwmgr = handle->hwmgr;
337 ret = phm_setup_asic(hwmgr); 307 ret = phm_setup_asic(hwmgr);
338 if (ret) 308 if (ret)
339 return ret; 309 return ret;
@@ -341,7 +311,7 @@ int hwmgr_hw_resume(struct pp_instance *handle)
341 ret = phm_enable_dynamic_state_management(hwmgr); 311 ret = phm_enable_dynamic_state_management(hwmgr);
342 if (ret) 312 if (ret)
343 return ret; 313 return ret;
344 ret = phm_start_thermal_controller(hwmgr, NULL); 314 ret = phm_start_thermal_controller(hwmgr);
345 if (ret) 315 if (ret)
346 return ret; 316 return ret;
347 317
@@ -368,17 +338,14 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
368 } 338 }
369} 339}
370 340
371int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, 341int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
372 void *input, void *output) 342 enum amd_pm_state_type *user_state)
373{ 343{
374 int ret = 0; 344 int ret = 0;
375 struct pp_hwmgr *hwmgr;
376 345
377 if (handle == NULL || handle->hwmgr == NULL) 346 if (hwmgr == NULL)
378 return -EINVAL; 347 return -EINVAL;
379 348
380 hwmgr = handle->hwmgr;
381
382 switch (task_id) { 349 switch (task_id) {
383 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 350 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
384 ret = phm_set_cpu_power_state(hwmgr); 351 ret = phm_set_cpu_power_state(hwmgr);
@@ -391,17 +358,15 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
391 break; 358 break;
392 case AMD_PP_TASK_ENABLE_USER_STATE: 359 case AMD_PP_TASK_ENABLE_USER_STATE:
393 { 360 {
394 enum amd_pm_state_type ps;
395 enum PP_StateUILabel requested_ui_label; 361 enum PP_StateUILabel requested_ui_label;
396 struct pp_power_state *requested_ps = NULL; 362 struct pp_power_state *requested_ps = NULL;
397 363
398 if (input == NULL) { 364 if (user_state == NULL) {
399 ret = -EINVAL; 365 ret = -EINVAL;
400 break; 366 break;
401 } 367 }
402 ps = *(unsigned long *)input;
403 368
404 requested_ui_label = power_state_convert(ps); 369 requested_ui_label = power_state_convert(*user_state);
405 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps); 370 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
406 if (ret) 371 if (ret)
407 return ret; 372 return ret;
@@ -417,468 +382,6 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
417 } 382 }
418 return ret; 383 return ret;
419} 384}
420/**
421 * Returns once the part of the register indicated by the mask has
422 * reached the given value.
423 */
424int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
425 uint32_t value, uint32_t mask)
426{
427 uint32_t i;
428 uint32_t cur_value;
429
430 if (hwmgr == NULL || hwmgr->device == NULL) {
431 pr_err("Invalid Hardware Manager!");
432 return -EINVAL;
433 }
434
435 for (i = 0; i < hwmgr->usec_timeout; i++) {
436 cur_value = cgs_read_register(hwmgr->device, index);
437 if ((cur_value & mask) == (value & mask))
438 break;
439 udelay(1);
440 }
441
442 /* timeout means wrong logic*/
443 if (i == hwmgr->usec_timeout)
444 return -1;
445 return 0;
446}
447
448
449/**
450 * Returns once the part of the register indicated by the mask has
451 * reached the given value.The indirect space is described by giving
452 * the memory-mapped index of the indirect index register.
453 */
454int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
455 uint32_t indirect_port,
456 uint32_t index,
457 uint32_t value,
458 uint32_t mask)
459{
460 if (hwmgr == NULL || hwmgr->device == NULL) {
461 pr_err("Invalid Hardware Manager!");
462 return -EINVAL;
463 }
464
465 cgs_write_register(hwmgr->device, indirect_port, index);
466 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
467}
468
469int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
470 uint32_t index,
471 uint32_t value, uint32_t mask)
472{
473 uint32_t i;
474 uint32_t cur_value;
475
476 if (hwmgr == NULL || hwmgr->device == NULL)
477 return -EINVAL;
478
479 for (i = 0; i < hwmgr->usec_timeout; i++) {
480 cur_value = cgs_read_register(hwmgr->device,
481 index);
482 if ((cur_value & mask) != (value & mask))
483 break;
484 udelay(1);
485 }
486
487 /* timeout means wrong logic */
488 if (i == hwmgr->usec_timeout)
489 return -ETIME;
490 return 0;
491}
492
493int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
494 uint32_t indirect_port,
495 uint32_t index,
496 uint32_t value,
497 uint32_t mask)
498{
499 if (hwmgr == NULL || hwmgr->device == NULL)
500 return -EINVAL;
501
502 cgs_write_register(hwmgr->device, indirect_port, index);
503 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
504 value, mask);
505}
506
507bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
508{
509 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
510}
511
512bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
513{
514 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
515}
516
517
518int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
519{
520 uint32_t i, j;
521 uint16_t vvalue;
522 bool found = false;
523 struct pp_atomctrl_voltage_table *table;
524
525 PP_ASSERT_WITH_CODE((NULL != vol_table),
526 "Voltage Table empty.", return -EINVAL);
527
528 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
529 GFP_KERNEL);
530
531 if (NULL == table)
532 return -EINVAL;
533
534 table->mask_low = vol_table->mask_low;
535 table->phase_delay = vol_table->phase_delay;
536
537 for (i = 0; i < vol_table->count; i++) {
538 vvalue = vol_table->entries[i].value;
539 found = false;
540
541 for (j = 0; j < table->count; j++) {
542 if (vvalue == table->entries[j].value) {
543 found = true;
544 break;
545 }
546 }
547
548 if (!found) {
549 table->entries[table->count].value = vvalue;
550 table->entries[table->count].smio_low =
551 vol_table->entries[i].smio_low;
552 table->count++;
553 }
554 }
555
556 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
557 kfree(table);
558 table = NULL;
559 return 0;
560}
561
562int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
563 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
564{
565 uint32_t i;
566 int result;
567
568 PP_ASSERT_WITH_CODE((0 != dep_table->count),
569 "Voltage Dependency Table empty.", return -EINVAL);
570
571 PP_ASSERT_WITH_CODE((NULL != vol_table),
572 "vol_table empty.", return -EINVAL);
573
574 vol_table->mask_low = 0;
575 vol_table->phase_delay = 0;
576 vol_table->count = dep_table->count;
577
578 for (i = 0; i < dep_table->count; i++) {
579 vol_table->entries[i].value = dep_table->entries[i].mvdd;
580 vol_table->entries[i].smio_low = 0;
581 }
582
583 result = phm_trim_voltage_table(vol_table);
584 PP_ASSERT_WITH_CODE((0 == result),
585 "Failed to trim MVDD table.", return result);
586
587 return 0;
588}
589
590int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
591 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
592{
593 uint32_t i;
594 int result;
595
596 PP_ASSERT_WITH_CODE((0 != dep_table->count),
597 "Voltage Dependency Table empty.", return -EINVAL);
598
599 PP_ASSERT_WITH_CODE((NULL != vol_table),
600 "vol_table empty.", return -EINVAL);
601
602 vol_table->mask_low = 0;
603 vol_table->phase_delay = 0;
604 vol_table->count = dep_table->count;
605
606 for (i = 0; i < dep_table->count; i++) {
607 vol_table->entries[i].value = dep_table->entries[i].vddci;
608 vol_table->entries[i].smio_low = 0;
609 }
610
611 result = phm_trim_voltage_table(vol_table);
612 PP_ASSERT_WITH_CODE((0 == result),
613 "Failed to trim VDDCI table.", return result);
614
615 return 0;
616}
617
618int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
619 phm_ppt_v1_voltage_lookup_table *lookup_table)
620{
621 int i = 0;
622
623 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
624 "Voltage Lookup Table empty.", return -EINVAL);
625
626 PP_ASSERT_WITH_CODE((NULL != vol_table),
627 "vol_table empty.", return -EINVAL);
628
629 vol_table->mask_low = 0;
630 vol_table->phase_delay = 0;
631
632 vol_table->count = lookup_table->count;
633
634 for (i = 0; i < vol_table->count; i++) {
635 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
636 vol_table->entries[i].smio_low = 0;
637 }
638
639 return 0;
640}
641
642void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
643 struct pp_atomctrl_voltage_table *vol_table)
644{
645 unsigned int i, diff;
646
647 if (vol_table->count <= max_vol_steps)
648 return;
649
650 diff = vol_table->count - max_vol_steps;
651
652 for (i = 0; i < max_vol_steps; i++)
653 vol_table->entries[i] = vol_table->entries[i + diff];
654
655 vol_table->count = max_vol_steps;
656
657 return;
658}
659
660int phm_reset_single_dpm_table(void *table,
661 uint32_t count, int max)
662{
663 int i;
664
665 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
666
667 dpm_table->count = count > max ? max : count;
668
669 for (i = 0; i < dpm_table->count; i++)
670 dpm_table->dpm_level[i].enabled = false;
671
672 return 0;
673}
674
675void phm_setup_pcie_table_entry(
676 void *table,
677 uint32_t index, uint32_t pcie_gen,
678 uint32_t pcie_lanes)
679{
680 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
681 dpm_table->dpm_level[index].value = pcie_gen;
682 dpm_table->dpm_level[index].param1 = pcie_lanes;
683 dpm_table->dpm_level[index].enabled = 1;
684}
685
686int32_t phm_get_dpm_level_enable_mask_value(void *table)
687{
688 int32_t i;
689 int32_t mask = 0;
690 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
691
692 for (i = dpm_table->count; i > 0; i--) {
693 mask = mask << 1;
694 if (dpm_table->dpm_level[i - 1].enabled)
695 mask |= 0x1;
696 else
697 mask &= 0xFFFFFFFE;
698 }
699
700 return mask;
701}
702
703uint8_t phm_get_voltage_index(
704 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
705{
706 uint8_t count = (uint8_t) (lookup_table->count);
707 uint8_t i;
708
709 PP_ASSERT_WITH_CODE((NULL != lookup_table),
710 "Lookup Table empty.", return 0);
711 PP_ASSERT_WITH_CODE((0 != count),
712 "Lookup Table empty.", return 0);
713
714 for (i = 0; i < lookup_table->count; i++) {
715 /* find first voltage equal or bigger than requested */
716 if (lookup_table->entries[i].us_vdd >= voltage)
717 return i;
718 }
719 /* voltage is bigger than max voltage in the table */
720 return i - 1;
721}
722
723uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
724 uint32_t voltage)
725{
726 uint8_t count = (uint8_t) (voltage_table->count);
727 uint8_t i = 0;
728
729 PP_ASSERT_WITH_CODE((NULL != voltage_table),
730 "Voltage Table empty.", return 0;);
731 PP_ASSERT_WITH_CODE((0 != count),
732 "Voltage Table empty.", return 0;);
733
734 for (i = 0; i < count; i++) {
735 /* find first voltage bigger than requested */
736 if (voltage_table->entries[i].value >= voltage)
737 return i;
738 }
739
740 /* voltage is bigger than max voltage in the table */
741 return i - 1;
742}
743
744uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
745{
746 uint32_t i;
747
748 for (i = 0; i < vddci_table->count; i++) {
749 if (vddci_table->entries[i].value >= vddci)
750 return vddci_table->entries[i].value;
751 }
752
753 pr_debug("vddci is larger than max value in vddci_table\n");
754 return vddci_table->entries[i-1].value;
755}
756
757int phm_find_boot_level(void *table,
758 uint32_t value, uint32_t *boot_level)
759{
760 int result = -EINVAL;
761 uint32_t i;
762 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
763
764 for (i = 0; i < dpm_table->count; i++) {
765 if (value == dpm_table->dpm_level[i].value) {
766 *boot_level = i;
767 result = 0;
768 }
769 }
770
771 return result;
772}
773
774int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
775 phm_ppt_v1_voltage_lookup_table *lookup_table,
776 uint16_t virtual_voltage_id, int32_t *sclk)
777{
778 uint8_t entry_id;
779 uint8_t voltage_id;
780 struct phm_ppt_v1_information *table_info =
781 (struct phm_ppt_v1_information *)(hwmgr->pptable);
782
783 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
784
785 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
786 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
787 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
788 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
789 break;
790 }
791
792 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
793 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
794 return -EINVAL;
795 }
796
797 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
798
799 return 0;
800}
801
802/**
803 * Initialize Dynamic State Adjustment Rule Settings
804 *
805 * @param hwmgr the address of the powerplay hardware manager.
806 */
807int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
808{
809 uint32_t table_size;
810 struct phm_clock_voltage_dependency_table *table_clk_vlt;
811 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
812
813 /* initialize vddc_dep_on_dal_pwrl table */
814 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
815 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
816
817 if (NULL == table_clk_vlt) {
818 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
819 return -ENOMEM;
820 } else {
821 table_clk_vlt->count = 4;
822 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
823 table_clk_vlt->entries[0].v = 0;
824 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
825 table_clk_vlt->entries[1].v = 720;
826 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
827 table_clk_vlt->entries[2].v = 810;
828 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
829 table_clk_vlt->entries[3].v = 900;
830 if (pptable_info != NULL)
831 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
832 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
833 }
834
835 return 0;
836}
837
838uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
839{
840 uint32_t level = 0;
841
842 while (0 == (mask & (1 << level)))
843 level++;
844
845 return level;
846}
847
848void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
849{
850 struct phm_ppt_v1_information *table_info =
851 (struct phm_ppt_v1_information *)hwmgr->pptable;
852 struct phm_clock_voltage_dependency_table *table =
853 table_info->vddc_dep_on_dal_pwrl;
854 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
855 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
856 uint32_t req_vddc = 0, req_volt, i;
857
858 if (!table || table->count <= 0
859 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
860 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
861 return;
862
863 for (i = 0; i < table->count; i++) {
864 if (dal_power_level == table->entries[i].clk) {
865 req_vddc = table->entries[i].v;
866 break;
867 }
868 }
869
870 vddc_table = table_info->vdd_dep_on_sclk;
871 for (i = 0; i < vddc_table->count; i++) {
872 if (req_vddc <= vddc_table->entries[i].vddc) {
873 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
874 smum_send_msg_to_smc_with_parameter(hwmgr,
875 PPSMC_MSG_VddC_Request, req_volt);
876 return;
877 }
878 }
879 pr_err("DAL requested level can not"
880 " found a available voltage in VDDC DPM Table \n");
881}
882 385
883void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) 386void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
884{ 387{
@@ -887,9 +390,10 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
887 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM); 390 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
888 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM); 391 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
889 392
890 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && 393#if defined(CONFIG_ACPI)
891 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) 394 if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
892 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); 395 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
396#endif
893 397
894 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 398 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
895 PHM_PlatformCaps_DynamicPatchPowerState); 399 PHM_PlatformCaps_DynamicPatchPowerState);
@@ -932,26 +436,10 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
932 PHM_PlatformCaps_CAC); 436 PHM_PlatformCaps_CAC);
933 } 437 }
934 438
935 return 0; 439 if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
936} 440 hwmgr->od_enabled = true;
937 441
938int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 442 return 0;
939 uint32_t sclk, uint16_t id, uint16_t *voltage)
940{
941 uint32_t vol;
942 int ret = 0;
943
944 if (hwmgr->chip_id < CHIP_TONGA) {
945 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
946 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
947 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
948 if (*voltage >= 2000 || *voltage == 0)
949 *voltage = 1150;
950 } else {
951 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
952 *voltage = (uint16_t)(vol/100);
953 }
954 return ret;
955} 443}
956 444
957int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) 445int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
deleted file mode 100644
index f6b4dd96c0ec..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ /dev/null
@@ -1,114 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include "hwmgr.h"
27#include "amd_acpi.h"
28#include "pp_acpi.h"
29
30bool acpi_atcs_functions_supported(void *device, uint32_t index)
31{
32 int32_t result;
33 struct atcs_verify_interface output_buf = {0};
34
35 int32_t temp_buffer = 1;
36
37 result = cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
38 ATCS_FUNCTION_VERIFY_INTERFACE,
39 &temp_buffer,
40 &output_buf,
41 1,
42 sizeof(temp_buffer),
43 sizeof(output_buf));
44
45 return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
46}
47
48bool acpi_atcs_notify_pcie_device_ready(void *device)
49{
50 int32_t temp_buffer = 1;
51
52 return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
53 ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
54 &temp_buffer,
55 NULL,
56 0,
57 sizeof(temp_buffer),
58 0);
59}
60
61
62int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
63{
64 struct atcs_pref_req_input atcs_input;
65 struct atcs_pref_req_output atcs_output;
66 u32 retry = 3;
67 int result;
68 struct cgs_system_info info = {0};
69
70 if (acpi_atcs_notify_pcie_device_ready(device))
71 return -EINVAL;
72
73 info.size = sizeof(struct cgs_system_info);
74 info.info_id = CGS_SYSTEM_INFO_ADAPTER_BDF_ID;
75 result = cgs_query_system_info(device, &info);
76 if (result != 0)
77 return -EINVAL;
78 atcs_input.client_id = (uint16_t)info.value;
79 atcs_input.size = sizeof(struct atcs_pref_req_input);
80 atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
81 atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
82 if (advertise)
83 atcs_input.flags |= ATCS_ADVERTISE_CAPS;
84 atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
85 atcs_input.perf_req = perf_req;
86
87 atcs_output.size = sizeof(struct atcs_pref_req_input);
88
89 while (retry--) {
90 result = cgs_call_acpi_method(device,
91 CGS_ACPI_METHOD_ATCS,
92 ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
93 &atcs_input,
94 &atcs_output,
95 1,
96 sizeof(atcs_input),
97 sizeof(atcs_output));
98 if (result != 0)
99 return -EIO;
100
101 switch (atcs_output.ret_val) {
102 case ATCS_REQUEST_REFUSED:
103 default:
104 return -EINVAL;
105 case ATCS_REQUEST_COMPLETE:
106 return 0;
107 case ATCS_REQUEST_IN_PROGRESS:
108 udelay(10);
109 break;
110 }
111 }
112
113 return 0;
114}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 95ab772e0c3e..d0ef8f9c1361 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -220,6 +220,8 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
220 struct pp_power_state *pcurrent; 220 struct pp_power_state *pcurrent;
221 struct pp_power_state *requested; 221 struct pp_power_state *requested;
222 bool equal; 222 bool equal;
223 uint32_t index;
224 long workload;
223 225
224 if (skip) 226 if (skip)
225 return 0; 227 return 0;
@@ -247,7 +249,14 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
247 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) 249 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
248 hwmgr->dpm_level = hwmgr->request_dpm_level; 250 hwmgr->dpm_level = hwmgr->request_dpm_level;
249 251
250 phm_reset_power_profile_state(hwmgr); 252 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
253 index = fls(hwmgr->workload_mask);
254 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
255 workload = hwmgr->workload_setting[index];
256
257 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
258 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
259 }
251 260
252 return 0; 261 return 0;
253} 262}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index b49d65c3e984..c9eecce5683f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -836,10 +836,10 @@ static int init_over_drive_limits(
836 hwmgr->platform_descriptor.maxOverdriveVDDC = 0; 836 hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
837 hwmgr->platform_descriptor.overdriveVDDCStep = 0; 837 hwmgr->platform_descriptor.overdriveVDDCStep = 0;
838 838
839 if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \ 839 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \
840 && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) { 840 || hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
841 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 841 hwmgr->od_enabled = false;
842 PHM_PlatformCaps_ACOverdriveSupport); 842 pr_debug("OverDrive feature not support by VBIOS\n");
843 } 843 }
844 844
845 return 0; 845 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index c3e7e34535e8..36ca7c419c90 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1074,12 +1074,11 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
1074 powerplay_table, 1074 powerplay_table,
1075 (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); 1075 (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info);
1076 1076
1077 if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 1077 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0
1078 && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0 1078 && hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
1079 && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1079 hwmgr->od_enabled = false;
1080 PHM_PlatformCaps_OverdriveDisabledByPowerBudget)) 1080 pr_debug("OverDrive feature not support by VBIOS\n");
1081 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1081 }
1082 PHM_PlatformCaps_ACOverdriveSupport);
1083 1082
1084 return result; 1083 return result;
1085} 1084}
@@ -1697,9 +1696,6 @@ static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1697 kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk); 1696 kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
1698 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL; 1697 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
1699 1698
1700 kfree(hwmgr->dyn_state.vq_budgeting_table);
1701 hwmgr->dyn_state.vq_budgeting_table = NULL;
1702
1703 return 0; 1699 return 0;
1704} 1700}
1705 1701
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 569073e3a5a1..10253b89b3d8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -32,53 +32,52 @@
32#include "hwmgr.h" 32#include "hwmgr.h"
33#include "hardwaremanager.h" 33#include "hardwaremanager.h"
34#include "rv_ppsmc.h" 34#include "rv_ppsmc.h"
35#include "rv_hwmgr.h" 35#include "smu10_hwmgr.h"
36#include "power_state.h" 36#include "power_state.h"
37#include "rv_smumgr.h"
38#include "pp_soc15.h" 37#include "pp_soc15.h"
39 38
40#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5 39#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
41#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ 40#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
42#define SCLK_MIN_DIV_INTV_SHIFT 12 41#define SCLK_MIN_DIV_INTV_SHIFT 12
43#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ 42#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
44#define SMC_RAM_END 0x40000 43#define SMC_RAM_END 0x40000
45 44
46static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic; 45static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
47 46
48 47
49int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 48static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
50 struct pp_display_clock_request *clock_req); 49 struct pp_display_clock_request *clock_req);
51 50
52 51
53static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps) 52static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
54{ 53{
55 if (PhwRaven_Magic != hw_ps->magic) 54 if (SMU10_Magic != hw_ps->magic)
56 return NULL; 55 return NULL;
57 56
58 return (struct rv_power_state *)hw_ps; 57 return (struct smu10_power_state *)hw_ps;
59} 58}
60 59
61static const struct rv_power_state *cast_const_rv_ps( 60static const struct smu10_power_state *cast_const_smu10_ps(
62 const struct pp_hw_power_state *hw_ps) 61 const struct pp_hw_power_state *hw_ps)
63{ 62{
64 if (PhwRaven_Magic != hw_ps->magic) 63 if (SMU10_Magic != hw_ps->magic)
65 return NULL; 64 return NULL;
66 65
67 return (struct rv_power_state *)hw_ps; 66 return (struct smu10_power_state *)hw_ps;
68} 67}
69 68
70static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) 69static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
71{ 70{
72 struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend); 71 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
73 72
74 rv_hwmgr->dce_slow_sclk_threshold = 30000; 73 smu10_data->dce_slow_sclk_threshold = 30000;
75 rv_hwmgr->thermal_auto_throttling_treshold = 0; 74 smu10_data->thermal_auto_throttling_treshold = 0;
76 rv_hwmgr->is_nb_dpm_enabled = 1; 75 smu10_data->is_nb_dpm_enabled = 1;
77 rv_hwmgr->dpm_flags = 1; 76 smu10_data->dpm_flags = 1;
78 rv_hwmgr->gfx_off_controled_by_driver = false; 77 smu10_data->gfx_off_controled_by_driver = false;
79 rv_hwmgr->need_min_deep_sleep_dcefclk = true; 78 smu10_data->need_min_deep_sleep_dcefclk = true;
80 rv_hwmgr->num_active_display = 0; 79 smu10_data->num_active_display = 0;
81 rv_hwmgr->deep_sleep_dcefclk = 0; 80 smu10_data->deep_sleep_dcefclk = 0;
82 81
83 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 82 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
84 PHM_PlatformCaps_SclkDeepSleep); 83 PHM_PlatformCaps_SclkDeepSleep);
@@ -91,13 +90,13 @@ static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
91 return 0; 90 return 0;
92} 91}
93 92
94static int rv_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, 93static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
95 struct phm_clock_and_voltage_limits *table) 94 struct phm_clock_and_voltage_limits *table)
96{ 95{
97 return 0; 96 return 0;
98} 97}
99 98
100static int rv_init_dynamic_state_adjustment_rule_settings( 99static int smu10_init_dynamic_state_adjustment_rule_settings(
101 struct pp_hwmgr *hwmgr) 100 struct pp_hwmgr *hwmgr)
102{ 101{
103 uint32_t table_size = 102 uint32_t table_size =
@@ -134,30 +133,30 @@ static int rv_init_dynamic_state_adjustment_rule_settings(
134 return 0; 133 return 0;
135} 134}
136 135
137static int rv_get_system_info_data(struct pp_hwmgr *hwmgr) 136static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
138{ 137{
139 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)hwmgr->backend; 138 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
140 139
141 rv_data->sys_info.htc_hyst_lmt = 5; 140 smu10_data->sys_info.htc_hyst_lmt = 5;
142 rv_data->sys_info.htc_tmp_lmt = 203; 141 smu10_data->sys_info.htc_tmp_lmt = 203;
143 142
144 if (rv_data->thermal_auto_throttling_treshold == 0) 143 if (smu10_data->thermal_auto_throttling_treshold == 0)
145 rv_data->thermal_auto_throttling_treshold = 203; 144 smu10_data->thermal_auto_throttling_treshold = 203;
146 145
147 rv_construct_max_power_limits_table (hwmgr, 146 smu10_construct_max_power_limits_table (hwmgr,
148 &hwmgr->dyn_state.max_clock_voltage_on_ac); 147 &hwmgr->dyn_state.max_clock_voltage_on_ac);
149 148
150 rv_init_dynamic_state_adjustment_rule_settings(hwmgr); 149 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
151 150
152 return 0; 151 return 0;
153} 152}
154 153
155static int rv_construct_boot_state(struct pp_hwmgr *hwmgr) 154static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
156{ 155{
157 return 0; 156 return 0;
158} 157}
159 158
160static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) 159static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
161{ 160{
162 struct PP_Clocks clocks = {0}; 161 struct PP_Clocks clocks = {0};
163 struct pp_display_clock_request clock_req; 162 struct pp_display_clock_request clock_req;
@@ -166,111 +165,109 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
166 clock_req.clock_type = amd_pp_dcf_clock; 165 clock_req.clock_type = amd_pp_dcf_clock;
167 clock_req.clock_freq_in_khz = clocks.dcefClock * 10; 166 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
168 167
169 PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req), 168 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
170 "Attempt to set DCF Clock Failed!", return -EINVAL); 169 "Attempt to set DCF Clock Failed!", return -EINVAL);
171 170
172 return 0; 171 return 0;
173} 172}
174 173
175static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) 174static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
176{ 175{
177 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 176 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
178 177
179 if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) { 178 if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
180 rv_data->deep_sleep_dcefclk = clock/100; 179 smu10_data->deep_sleep_dcefclk = clock/100;
181 smum_send_msg_to_smc_with_parameter(hwmgr, 180 smum_send_msg_to_smc_with_parameter(hwmgr,
182 PPSMC_MSG_SetMinDeepSleepDcefclk, 181 PPSMC_MSG_SetMinDeepSleepDcefclk,
183 rv_data->deep_sleep_dcefclk); 182 smu10_data->deep_sleep_dcefclk);
184 } 183 }
185 return 0; 184 return 0;
186} 185}
187 186
188static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) 187static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
189{ 188{
190 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 189 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
191 190
192 if (rv_data->num_active_display != count) { 191 if (smu10_data->num_active_display != count) {
193 rv_data->num_active_display = count; 192 smu10_data->num_active_display = count;
194 smum_send_msg_to_smc_with_parameter(hwmgr, 193 smum_send_msg_to_smc_with_parameter(hwmgr,
195 PPSMC_MSG_SetDisplayCount, 194 PPSMC_MSG_SetDisplayCount,
196 rv_data->num_active_display); 195 smu10_data->num_active_display);
197 } 196 }
198 197
199 return 0; 198 return 0;
200} 199}
201 200
202static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 201static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
203{ 202{
204 return rv_set_clock_limit(hwmgr, input); 203 return smu10_set_clock_limit(hwmgr, input);
205} 204}
206 205
207static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr) 206static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
208{ 207{
209 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 208 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
210 209
211 rv_data->vcn_power_gated = true; 210 smu10_data->vcn_power_gated = true;
212 rv_data->isp_tileA_power_gated = true; 211 smu10_data->isp_tileA_power_gated = true;
213 rv_data->isp_tileB_power_gated = true; 212 smu10_data->isp_tileB_power_gated = true;
214 213
215 return 0; 214 return 0;
216} 215}
217 216
218 217
219static int rv_setup_asic_task(struct pp_hwmgr *hwmgr) 218static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
220{ 219{
221 return rv_init_power_gate_state(hwmgr); 220 return smu10_init_power_gate_state(hwmgr);
222} 221}
223 222
224static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr) 223static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
225{ 224{
226 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 225 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
227 226
228 rv_data->separation_time = 0; 227 smu10_data->separation_time = 0;
229 rv_data->cc6_disable = false; 228 smu10_data->cc6_disable = false;
230 rv_data->pstate_disable = false; 229 smu10_data->pstate_disable = false;
231 rv_data->cc6_setting_changed = false; 230 smu10_data->cc6_setting_changed = false;
232 231
233 return 0; 232 return 0;
234} 233}
235 234
236static int rv_power_off_asic(struct pp_hwmgr *hwmgr) 235static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
237{ 236{
238 return rv_reset_cc6_data(hwmgr); 237 return smu10_reset_cc6_data(hwmgr);
239} 238}
240 239
241static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr) 240static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
242{ 241{
243 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 242 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
244 243
245 if (rv_data->gfx_off_controled_by_driver) 244 if (smu10_data->gfx_off_controled_by_driver)
246 smum_send_msg_to_smc(hwmgr, 245 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
247 PPSMC_MSG_DisableGfxOff);
248 246
249 return 0; 247 return 0;
250} 248}
251 249
252static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 250static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
253{ 251{
254 return rv_disable_gfx_off(hwmgr); 252 return smu10_disable_gfx_off(hwmgr);
255} 253}
256 254
257static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr) 255static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
258{ 256{
259 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 257 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
260 258
261 if (rv_data->gfx_off_controled_by_driver) 259 if (smu10_data->gfx_off_controled_by_driver)
262 smum_send_msg_to_smc(hwmgr, 260 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
263 PPSMC_MSG_EnableGfxOff);
264 261
265 return 0; 262 return 0;
266} 263}
267 264
268static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 265static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
269{ 266{
270 return rv_enable_gfx_off(hwmgr); 267 return smu10_enable_gfx_off(hwmgr);
271} 268}
272 269
273static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 270static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
274 struct pp_power_state *prequest_ps, 271 struct pp_power_state *prequest_ps,
275 const struct pp_power_state *pcurrent_ps) 272 const struct pp_power_state *pcurrent_ps)
276{ 273{
@@ -314,14 +311,14 @@ static const DpmClock_t VddPhyClk[]= {
314 { 810, 3600}, 311 { 810, 3600},
315}; 312};
316 313
317static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr, 314static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
318 struct rv_voltage_dependency_table **pptable, 315 struct smu10_voltage_dependency_table **pptable,
319 uint32_t num_entry, const DpmClock_t *pclk_dependency_table) 316 uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
320{ 317{
321 uint32_t table_size, i; 318 uint32_t table_size, i;
322 struct rv_voltage_dependency_table *ptable; 319 struct smu10_voltage_dependency_table *ptable;
323 320
324 table_size = sizeof(uint32_t) + sizeof(struct rv_voltage_dependency_table) * num_entry; 321 table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
325 ptable = kzalloc(table_size, GFP_KERNEL); 322 ptable = kzalloc(table_size, GFP_KERNEL);
326 323
327 if (NULL == ptable) 324 if (NULL == ptable)
@@ -341,107 +338,95 @@ static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
341} 338}
342 339
343 340
344static int rv_populate_clock_table(struct pp_hwmgr *hwmgr) 341static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
345{ 342{
346 int result; 343 int result;
347 344
348 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 345 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
349 DpmClocks_t *table = &(rv_data->clock_table); 346 DpmClocks_t *table = &(smu10_data->clock_table);
350 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 347 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
351 348
352 result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE); 349 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
353 350
354 PP_ASSERT_WITH_CODE((0 == result), 351 PP_ASSERT_WITH_CODE((0 == result),
355 "Attempt to copy clock table from smc failed", 352 "Attempt to copy clock table from smc failed",
356 return result); 353 return result);
357 354
358 if (0 == result && table->DcefClocks[0].Freq != 0) { 355 if (0 == result && table->DcefClocks[0].Freq != 0) {
359 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 356 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
360 NUM_DCEFCLK_DPM_LEVELS, 357 NUM_DCEFCLK_DPM_LEVELS,
361 &rv_data->clock_table.DcefClocks[0]); 358 &smu10_data->clock_table.DcefClocks[0]);
362 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 359 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
363 NUM_SOCCLK_DPM_LEVELS, 360 NUM_SOCCLK_DPM_LEVELS,
364 &rv_data->clock_table.SocClocks[0]); 361 &smu10_data->clock_table.SocClocks[0]);
365 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 362 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
366 NUM_FCLK_DPM_LEVELS, 363 NUM_FCLK_DPM_LEVELS,
367 &rv_data->clock_table.FClocks[0]); 364 &smu10_data->clock_table.FClocks[0]);
368 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk, 365 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
369 NUM_MEMCLK_DPM_LEVELS, 366 NUM_MEMCLK_DPM_LEVELS,
370 &rv_data->clock_table.MemClocks[0]); 367 &smu10_data->clock_table.MemClocks[0]);
371 } else { 368 } else {
372 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 369 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
373 ARRAY_SIZE(VddDcfClk), 370 ARRAY_SIZE(VddDcfClk),
374 &VddDcfClk[0]); 371 &VddDcfClk[0]);
375 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 372 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
376 ARRAY_SIZE(VddSocClk), 373 ARRAY_SIZE(VddSocClk),
377 &VddSocClk[0]); 374 &VddSocClk[0]);
378 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 375 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
379 ARRAY_SIZE(VddFClk), 376 ARRAY_SIZE(VddFClk),
380 &VddFClk[0]); 377 &VddFClk[0]);
381 } 378 }
382 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk, 379 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
383 ARRAY_SIZE(VddDispClk), 380 ARRAY_SIZE(VddDispClk),
384 &VddDispClk[0]); 381 &VddDispClk[0]);
385 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk, 382 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
386 ARRAY_SIZE(VddDppClk), &VddDppClk[0]); 383 ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
387 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk, 384 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
388 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]); 385 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
389 386
390 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 387 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
391 PPSMC_MSG_GetMinGfxclkFrequency), 388 result = smum_get_argument(hwmgr);
392 "Attempt to get min GFXCLK Failed!", 389 smu10_data->gfx_min_freq_limit = result * 100;
393 return -1); 390
394 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr, 391 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
395 &result), 392 result = smum_get_argument(hwmgr);
396 "Attempt to get min GFXCLK Failed!", 393 smu10_data->gfx_max_freq_limit = result * 100;
397 return -1);
398 rv_data->gfx_min_freq_limit = result * 100;
399
400 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
401 PPSMC_MSG_GetMaxGfxclkFrequency),
402 "Attempt to get max GFXCLK Failed!",
403 return -1);
404 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
405 &result),
406 "Attempt to get max GFXCLK Failed!",
407 return -1);
408 rv_data->gfx_max_freq_limit = result * 100;
409 394
410 return 0; 395 return 0;
411} 396}
412 397
413static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 398static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
414{ 399{
415 int result = 0; 400 int result = 0;
416 struct rv_hwmgr *data; 401 struct smu10_hwmgr *data;
417 402
418 data = kzalloc(sizeof(struct rv_hwmgr), GFP_KERNEL); 403 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
419 if (data == NULL) 404 if (data == NULL)
420 return -ENOMEM; 405 return -ENOMEM;
421 406
422 hwmgr->backend = data; 407 hwmgr->backend = data;
423 408
424 result = rv_initialize_dpm_defaults(hwmgr); 409 result = smu10_initialize_dpm_defaults(hwmgr);
425 if (result != 0) { 410 if (result != 0) {
426 pr_err("rv_initialize_dpm_defaults failed\n"); 411 pr_err("smu10_initialize_dpm_defaults failed\n");
427 return result; 412 return result;
428 } 413 }
429 414
430 rv_populate_clock_table(hwmgr); 415 smu10_populate_clock_table(hwmgr);
431 416
432 result = rv_get_system_info_data(hwmgr); 417 result = smu10_get_system_info_data(hwmgr);
433 if (result != 0) { 418 if (result != 0) {
434 pr_err("rv_get_system_info_data failed\n"); 419 pr_err("smu10_get_system_info_data failed\n");
435 return result; 420 return result;
436 } 421 }
437 422
438 rv_construct_boot_state(hwmgr); 423 smu10_construct_boot_state(hwmgr);
439 424
440 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 425 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
441 RAVEN_MAX_HARDWARE_POWERLEVELS; 426 SMU10_MAX_HARDWARE_POWERLEVELS;
442 427
443 hwmgr->platform_descriptor.hardwarePerformanceLevels = 428 hwmgr->platform_descriptor.hardwarePerformanceLevels =
444 RAVEN_MAX_HARDWARE_POWERLEVELS; 429 SMU10_MAX_HARDWARE_POWERLEVELS;
445 430
446 hwmgr->platform_descriptor.vbiosInterruptId = 0; 431 hwmgr->platform_descriptor.vbiosInterruptId = 0;
447 432
@@ -451,13 +436,16 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
451 436
452 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 437 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
453 438
439 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
440 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
441
454 return result; 442 return result;
455} 443}
456 444
457static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 445static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
458{ 446{
459 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 447 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
460 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 448 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
461 449
462 kfree(pinfo->vdd_dep_on_dcefclk); 450 kfree(pinfo->vdd_dep_on_dcefclk);
463 pinfo->vdd_dep_on_dcefclk = NULL; 451 pinfo->vdd_dep_on_dcefclk = NULL;
@@ -481,7 +469,7 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
481 return 0; 469 return 0;
482} 470}
483 471
484static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 472static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
485 enum amd_dpm_forced_level level) 473 enum amd_dpm_forced_level level)
486{ 474{
487 if (hwmgr->smu_version < 0x1E3700) { 475 if (hwmgr->smu_version < 0x1E3700) {
@@ -494,113 +482,113 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
494 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 482 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
495 smum_send_msg_to_smc_with_parameter(hwmgr, 483 smum_send_msg_to_smc_with_parameter(hwmgr,
496 PPSMC_MSG_SetHardMinGfxClk, 484 PPSMC_MSG_SetHardMinGfxClk,
497 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 485 SMU10_UMD_PSTATE_PEAK_GFXCLK);
498 smum_send_msg_to_smc_with_parameter(hwmgr, 486 smum_send_msg_to_smc_with_parameter(hwmgr,
499 PPSMC_MSG_SetHardMinFclkByFreq, 487 PPSMC_MSG_SetHardMinFclkByFreq,
500 RAVEN_UMD_PSTATE_PEAK_FCLK); 488 SMU10_UMD_PSTATE_PEAK_FCLK);
501 smum_send_msg_to_smc_with_parameter(hwmgr, 489 smum_send_msg_to_smc_with_parameter(hwmgr,
502 PPSMC_MSG_SetHardMinSocclkByFreq, 490 PPSMC_MSG_SetHardMinSocclkByFreq,
503 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 491 SMU10_UMD_PSTATE_PEAK_SOCCLK);
504 smum_send_msg_to_smc_with_parameter(hwmgr, 492 smum_send_msg_to_smc_with_parameter(hwmgr,
505 PPSMC_MSG_SetHardMinVcn, 493 PPSMC_MSG_SetHardMinVcn,
506 RAVEN_UMD_PSTATE_VCE); 494 SMU10_UMD_PSTATE_VCE);
507 495
508 smum_send_msg_to_smc_with_parameter(hwmgr, 496 smum_send_msg_to_smc_with_parameter(hwmgr,
509 PPSMC_MSG_SetSoftMaxGfxClk, 497 PPSMC_MSG_SetSoftMaxGfxClk,
510 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 498 SMU10_UMD_PSTATE_PEAK_GFXCLK);
511 smum_send_msg_to_smc_with_parameter(hwmgr, 499 smum_send_msg_to_smc_with_parameter(hwmgr,
512 PPSMC_MSG_SetSoftMaxFclkByFreq, 500 PPSMC_MSG_SetSoftMaxFclkByFreq,
513 RAVEN_UMD_PSTATE_PEAK_FCLK); 501 SMU10_UMD_PSTATE_PEAK_FCLK);
514 smum_send_msg_to_smc_with_parameter(hwmgr, 502 smum_send_msg_to_smc_with_parameter(hwmgr,
515 PPSMC_MSG_SetSoftMaxSocclkByFreq, 503 PPSMC_MSG_SetSoftMaxSocclkByFreq,
516 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 504 SMU10_UMD_PSTATE_PEAK_SOCCLK);
517 smum_send_msg_to_smc_with_parameter(hwmgr, 505 smum_send_msg_to_smc_with_parameter(hwmgr,
518 PPSMC_MSG_SetSoftMaxVcn, 506 PPSMC_MSG_SetSoftMaxVcn,
519 RAVEN_UMD_PSTATE_VCE); 507 SMU10_UMD_PSTATE_VCE);
520 break; 508 break;
521 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 509 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
522 smum_send_msg_to_smc_with_parameter(hwmgr, 510 smum_send_msg_to_smc_with_parameter(hwmgr,
523 PPSMC_MSG_SetHardMinGfxClk, 511 PPSMC_MSG_SetHardMinGfxClk,
524 RAVEN_UMD_PSTATE_MIN_GFXCLK); 512 SMU10_UMD_PSTATE_MIN_GFXCLK);
525 smum_send_msg_to_smc_with_parameter(hwmgr, 513 smum_send_msg_to_smc_with_parameter(hwmgr,
526 PPSMC_MSG_SetSoftMaxGfxClk, 514 PPSMC_MSG_SetSoftMaxGfxClk,
527 RAVEN_UMD_PSTATE_MIN_GFXCLK); 515 SMU10_UMD_PSTATE_MIN_GFXCLK);
528 break; 516 break;
529 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 517 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
530 smum_send_msg_to_smc_with_parameter(hwmgr, 518 smum_send_msg_to_smc_with_parameter(hwmgr,
531 PPSMC_MSG_SetHardMinFclkByFreq, 519 PPSMC_MSG_SetHardMinFclkByFreq,
532 RAVEN_UMD_PSTATE_MIN_FCLK); 520 SMU10_UMD_PSTATE_MIN_FCLK);
533 smum_send_msg_to_smc_with_parameter(hwmgr, 521 smum_send_msg_to_smc_with_parameter(hwmgr,
534 PPSMC_MSG_SetSoftMaxFclkByFreq, 522 PPSMC_MSG_SetSoftMaxFclkByFreq,
535 RAVEN_UMD_PSTATE_MIN_FCLK); 523 SMU10_UMD_PSTATE_MIN_FCLK);
536 break; 524 break;
537 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 525 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
538 smum_send_msg_to_smc_with_parameter(hwmgr, 526 smum_send_msg_to_smc_with_parameter(hwmgr,
539 PPSMC_MSG_SetHardMinGfxClk, 527 PPSMC_MSG_SetHardMinGfxClk,
540 RAVEN_UMD_PSTATE_GFXCLK); 528 SMU10_UMD_PSTATE_GFXCLK);
541 smum_send_msg_to_smc_with_parameter(hwmgr, 529 smum_send_msg_to_smc_with_parameter(hwmgr,
542 PPSMC_MSG_SetHardMinFclkByFreq, 530 PPSMC_MSG_SetHardMinFclkByFreq,
543 RAVEN_UMD_PSTATE_FCLK); 531 SMU10_UMD_PSTATE_FCLK);
544 smum_send_msg_to_smc_with_parameter(hwmgr, 532 smum_send_msg_to_smc_with_parameter(hwmgr,
545 PPSMC_MSG_SetHardMinSocclkByFreq, 533 PPSMC_MSG_SetHardMinSocclkByFreq,
546 RAVEN_UMD_PSTATE_SOCCLK); 534 SMU10_UMD_PSTATE_SOCCLK);
547 smum_send_msg_to_smc_with_parameter(hwmgr, 535 smum_send_msg_to_smc_with_parameter(hwmgr,
548 PPSMC_MSG_SetHardMinVcn, 536 PPSMC_MSG_SetHardMinVcn,
549 RAVEN_UMD_PSTATE_VCE); 537 SMU10_UMD_PSTATE_VCE);
550 538
551 smum_send_msg_to_smc_with_parameter(hwmgr, 539 smum_send_msg_to_smc_with_parameter(hwmgr,
552 PPSMC_MSG_SetSoftMaxGfxClk, 540 PPSMC_MSG_SetSoftMaxGfxClk,
553 RAVEN_UMD_PSTATE_GFXCLK); 541 SMU10_UMD_PSTATE_GFXCLK);
554 smum_send_msg_to_smc_with_parameter(hwmgr, 542 smum_send_msg_to_smc_with_parameter(hwmgr,
555 PPSMC_MSG_SetSoftMaxFclkByFreq, 543 PPSMC_MSG_SetSoftMaxFclkByFreq,
556 RAVEN_UMD_PSTATE_FCLK); 544 SMU10_UMD_PSTATE_FCLK);
557 smum_send_msg_to_smc_with_parameter(hwmgr, 545 smum_send_msg_to_smc_with_parameter(hwmgr,
558 PPSMC_MSG_SetSoftMaxSocclkByFreq, 546 PPSMC_MSG_SetSoftMaxSocclkByFreq,
559 RAVEN_UMD_PSTATE_SOCCLK); 547 SMU10_UMD_PSTATE_SOCCLK);
560 smum_send_msg_to_smc_with_parameter(hwmgr, 548 smum_send_msg_to_smc_with_parameter(hwmgr,
561 PPSMC_MSG_SetSoftMaxVcn, 549 PPSMC_MSG_SetSoftMaxVcn,
562 RAVEN_UMD_PSTATE_VCE); 550 SMU10_UMD_PSTATE_VCE);
563 break; 551 break;
564 case AMD_DPM_FORCED_LEVEL_AUTO: 552 case AMD_DPM_FORCED_LEVEL_AUTO:
565 smum_send_msg_to_smc_with_parameter(hwmgr, 553 smum_send_msg_to_smc_with_parameter(hwmgr,
566 PPSMC_MSG_SetHardMinGfxClk, 554 PPSMC_MSG_SetHardMinGfxClk,
567 RAVEN_UMD_PSTATE_MIN_GFXCLK); 555 SMU10_UMD_PSTATE_MIN_GFXCLK);
568 smum_send_msg_to_smc_with_parameter(hwmgr, 556 smum_send_msg_to_smc_with_parameter(hwmgr,
569 PPSMC_MSG_SetHardMinFclkByFreq, 557 PPSMC_MSG_SetHardMinFclkByFreq,
570 RAVEN_UMD_PSTATE_MIN_FCLK); 558 SMU10_UMD_PSTATE_MIN_FCLK);
571 smum_send_msg_to_smc_with_parameter(hwmgr, 559 smum_send_msg_to_smc_with_parameter(hwmgr,
572 PPSMC_MSG_SetHardMinSocclkByFreq, 560 PPSMC_MSG_SetHardMinSocclkByFreq,
573 RAVEN_UMD_PSTATE_MIN_SOCCLK); 561 SMU10_UMD_PSTATE_MIN_SOCCLK);
574 smum_send_msg_to_smc_with_parameter(hwmgr, 562 smum_send_msg_to_smc_with_parameter(hwmgr,
575 PPSMC_MSG_SetHardMinVcn, 563 PPSMC_MSG_SetHardMinVcn,
576 RAVEN_UMD_PSTATE_MIN_VCE); 564 SMU10_UMD_PSTATE_MIN_VCE);
577 565
578 smum_send_msg_to_smc_with_parameter(hwmgr, 566 smum_send_msg_to_smc_with_parameter(hwmgr,
579 PPSMC_MSG_SetSoftMaxGfxClk, 567 PPSMC_MSG_SetSoftMaxGfxClk,
580 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 568 SMU10_UMD_PSTATE_PEAK_GFXCLK);
581 smum_send_msg_to_smc_with_parameter(hwmgr, 569 smum_send_msg_to_smc_with_parameter(hwmgr,
582 PPSMC_MSG_SetSoftMaxFclkByFreq, 570 PPSMC_MSG_SetSoftMaxFclkByFreq,
583 RAVEN_UMD_PSTATE_PEAK_FCLK); 571 SMU10_UMD_PSTATE_PEAK_FCLK);
584 smum_send_msg_to_smc_with_parameter(hwmgr, 572 smum_send_msg_to_smc_with_parameter(hwmgr,
585 PPSMC_MSG_SetSoftMaxSocclkByFreq, 573 PPSMC_MSG_SetSoftMaxSocclkByFreq,
586 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 574 SMU10_UMD_PSTATE_PEAK_SOCCLK);
587 smum_send_msg_to_smc_with_parameter(hwmgr, 575 smum_send_msg_to_smc_with_parameter(hwmgr,
588 PPSMC_MSG_SetSoftMaxVcn, 576 PPSMC_MSG_SetSoftMaxVcn,
589 RAVEN_UMD_PSTATE_VCE); 577 SMU10_UMD_PSTATE_VCE);
590 break; 578 break;
591 case AMD_DPM_FORCED_LEVEL_LOW: 579 case AMD_DPM_FORCED_LEVEL_LOW:
592 smum_send_msg_to_smc_with_parameter(hwmgr, 580 smum_send_msg_to_smc_with_parameter(hwmgr,
593 PPSMC_MSG_SetHardMinGfxClk, 581 PPSMC_MSG_SetHardMinGfxClk,
594 RAVEN_UMD_PSTATE_MIN_GFXCLK); 582 SMU10_UMD_PSTATE_MIN_GFXCLK);
595 smum_send_msg_to_smc_with_parameter(hwmgr, 583 smum_send_msg_to_smc_with_parameter(hwmgr,
596 PPSMC_MSG_SetSoftMaxGfxClk, 584 PPSMC_MSG_SetSoftMaxGfxClk,
597 RAVEN_UMD_PSTATE_MIN_GFXCLK); 585 SMU10_UMD_PSTATE_MIN_GFXCLK);
598 smum_send_msg_to_smc_with_parameter(hwmgr, 586 smum_send_msg_to_smc_with_parameter(hwmgr,
599 PPSMC_MSG_SetHardMinFclkByFreq, 587 PPSMC_MSG_SetHardMinFclkByFreq,
600 RAVEN_UMD_PSTATE_MIN_FCLK); 588 SMU10_UMD_PSTATE_MIN_FCLK);
601 smum_send_msg_to_smc_with_parameter(hwmgr, 589 smum_send_msg_to_smc_with_parameter(hwmgr,
602 PPSMC_MSG_SetSoftMaxFclkByFreq, 590 PPSMC_MSG_SetSoftMaxFclkByFreq,
603 RAVEN_UMD_PSTATE_MIN_FCLK); 591 SMU10_UMD_PSTATE_MIN_FCLK);
604 break; 592 break;
605 case AMD_DPM_FORCED_LEVEL_MANUAL: 593 case AMD_DPM_FORCED_LEVEL_MANUAL:
606 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 594 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -610,14 +598,14 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
610 return 0; 598 return 0;
611} 599}
612 600
613static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 601static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
614{ 602{
615 struct rv_hwmgr *data; 603 struct smu10_hwmgr *data;
616 604
617 if (hwmgr == NULL) 605 if (hwmgr == NULL)
618 return -EINVAL; 606 return -EINVAL;
619 607
620 data = (struct rv_hwmgr *)(hwmgr->backend); 608 data = (struct smu10_hwmgr *)(hwmgr->backend);
621 609
622 if (low) 610 if (low)
623 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 611 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -626,14 +614,14 @@ static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
626 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk; 614 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
627} 615}
628 616
629static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 617static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
630{ 618{
631 struct rv_hwmgr *data; 619 struct smu10_hwmgr *data;
632 620
633 if (hwmgr == NULL) 621 if (hwmgr == NULL)
634 return -EINVAL; 622 return -EINVAL;
635 623
636 data = (struct rv_hwmgr *)(hwmgr->backend); 624 data = (struct smu10_hwmgr *)(hwmgr->backend);
637 625
638 if (low) 626 if (low)
639 return data->gfx_min_freq_limit; 627 return data->gfx_min_freq_limit;
@@ -641,34 +629,34 @@ static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
641 return data->gfx_max_freq_limit; 629 return data->gfx_max_freq_limit;
642} 630}
643 631
644static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 632static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
645 struct pp_hw_power_state *hw_ps) 633 struct pp_hw_power_state *hw_ps)
646{ 634{
647 return 0; 635 return 0;
648} 636}
649 637
650static int rv_dpm_get_pp_table_entry_callback( 638static int smu10_dpm_get_pp_table_entry_callback(
651 struct pp_hwmgr *hwmgr, 639 struct pp_hwmgr *hwmgr,
652 struct pp_hw_power_state *hw_ps, 640 struct pp_hw_power_state *hw_ps,
653 unsigned int index, 641 unsigned int index,
654 const void *clock_info) 642 const void *clock_info)
655{ 643{
656 struct rv_power_state *rv_ps = cast_rv_ps(hw_ps); 644 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
657 645
658 rv_ps->levels[index].engine_clock = 0; 646 smu10_ps->levels[index].engine_clock = 0;
659 647
660 rv_ps->levels[index].vddc_index = 0; 648 smu10_ps->levels[index].vddc_index = 0;
661 rv_ps->level = index + 1; 649 smu10_ps->level = index + 1;
662 650
663 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 651 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
664 rv_ps->levels[index].ds_divider_index = 5; 652 smu10_ps->levels[index].ds_divider_index = 5;
665 rv_ps->levels[index].ss_divider_index = 5; 653 smu10_ps->levels[index].ss_divider_index = 5;
666 } 654 }
667 655
668 return 0; 656 return 0;
669} 657}
670 658
671static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) 659static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
672{ 660{
673 int result; 661 int result;
674 unsigned long ret = 0; 662 unsigned long ret = 0;
@@ -678,72 +666,66 @@ static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
678 return result ? 0 : ret; 666 return result ? 0 : ret;
679} 667}
680 668
681static int rv_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, 669static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
682 unsigned long entry, struct pp_power_state *ps) 670 unsigned long entry, struct pp_power_state *ps)
683{ 671{
684 int result; 672 int result;
685 struct rv_power_state *rv_ps; 673 struct smu10_power_state *smu10_ps;
686 674
687 ps->hardware.magic = PhwRaven_Magic; 675 ps->hardware.magic = SMU10_Magic;
688 676
689 rv_ps = cast_rv_ps(&(ps->hardware)); 677 smu10_ps = cast_smu10_ps(&(ps->hardware));
690 678
691 result = pp_tables_get_entry(hwmgr, entry, ps, 679 result = pp_tables_get_entry(hwmgr, entry, ps,
692 rv_dpm_get_pp_table_entry_callback); 680 smu10_dpm_get_pp_table_entry_callback);
693 681
694 rv_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; 682 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
695 rv_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; 683 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
696 684
697 return result; 685 return result;
698} 686}
699 687
700static int rv_get_power_state_size(struct pp_hwmgr *hwmgr) 688static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
701{ 689{
702 return sizeof(struct rv_power_state); 690 return sizeof(struct smu10_power_state);
703} 691}
704 692
705static int rv_set_cpu_power_state(struct pp_hwmgr *hwmgr) 693static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
706{ 694{
707 return 0; 695 return 0;
708} 696}
709 697
710 698
711static int rv_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 699static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
712 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 700 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
713{ 701{
714 return 0; 702 return 0;
715} 703}
716 704
717static int rv_get_dal_power_level(struct pp_hwmgr *hwmgr, 705static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
718 struct amd_pp_simple_clock_info *info) 706 struct amd_pp_simple_clock_info *info)
719{ 707{
720 return -EINVAL; 708 return -EINVAL;
721} 709}
722 710
723static int rv_force_clock_level(struct pp_hwmgr *hwmgr, 711static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
724 enum pp_clock_type type, uint32_t mask) 712 enum pp_clock_type type, uint32_t mask)
725{ 713{
726 return 0; 714 return 0;
727} 715}
728 716
729static int rv_print_clock_levels(struct pp_hwmgr *hwmgr, 717static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
730 enum pp_clock_type type, char *buf) 718 enum pp_clock_type type, char *buf)
731{ 719{
732 struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend); 720 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
733 struct rv_voltage_dependency_table *mclk_table = 721 struct smu10_voltage_dependency_table *mclk_table =
734 data->clock_vol_info.vdd_dep_on_fclk; 722 data->clock_vol_info.vdd_dep_on_fclk;
735 int i, now, size = 0; 723 int i, now, size = 0;
736 724
737 switch (type) { 725 switch (type) {
738 case PP_SCLK: 726 case PP_SCLK:
739 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 727 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
740 PPSMC_MSG_GetGfxclkFrequency), 728 now = smum_get_argument(hwmgr);
741 "Attempt to get current GFXCLK Failed!",
742 return -1);
743 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
744 &now),
745 "Attempt to get current GFXCLK Failed!",
746 return -1);
747 729
748 size += sprintf(buf + size, "0: %uMhz %s\n", 730 size += sprintf(buf + size, "0: %uMhz %s\n",
749 data->gfx_min_freq_limit / 100, 731 data->gfx_min_freq_limit / 100,
@@ -755,14 +737,8 @@ static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
755 == now) ? "*" : ""); 737 == now) ? "*" : "");
756 break; 738 break;
757 case PP_MCLK: 739 case PP_MCLK:
758 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 740 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
759 PPSMC_MSG_GetFclkFrequency), 741 now = smum_get_argument(hwmgr);
760 "Attempt to get current MEMCLK Failed!",
761 return -1);
762 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
763 &now),
764 "Attempt to get current MEMCLK Failed!",
765 return -1);
766 742
767 for (i = 0; i < mclk_table->count; i++) 743 for (i = 0; i < mclk_table->count; i++)
768 size += sprintf(buf + size, "%d: %uMhz %s\n", 744 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -778,16 +754,16 @@ static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
778 return size; 754 return size;
779} 755}
780 756
781static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 757static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
782 PHM_PerformanceLevelDesignation designation, uint32_t index, 758 PHM_PerformanceLevelDesignation designation, uint32_t index,
783 PHM_PerformanceLevel *level) 759 PHM_PerformanceLevel *level)
784{ 760{
785 struct rv_hwmgr *data; 761 struct smu10_hwmgr *data;
786 762
787 if (level == NULL || hwmgr == NULL || state == NULL) 763 if (level == NULL || hwmgr == NULL || state == NULL)
788 return -EINVAL; 764 return -EINVAL;
789 765
790 data = (struct rv_hwmgr *)(hwmgr->backend); 766 data = (struct smu10_hwmgr *)(hwmgr->backend);
791 767
792 if (index == 0) { 768 if (index == 0) {
793 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 769 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -804,10 +780,10 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
804 return 0; 780 return 0;
805} 781}
806 782
807static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 783static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
808 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 784 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
809{ 785{
810 const struct rv_power_state *ps = cast_const_rv_ps(state); 786 const struct smu10_power_state *ps = cast_const_smu10_ps(state);
811 787
812 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index)); 788 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
813 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index)); 789 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
@@ -822,7 +798,7 @@ static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
822#define MEM_LATENCY_ERR 0xFFFF 798#define MEM_LATENCY_ERR 0xFFFF
823 799
824 800
825static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr, 801static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
826 uint32_t clock) 802 uint32_t clock)
827{ 803{
828 if (clock >= MEM_FREQ_LOW_LATENCY && 804 if (clock >= MEM_FREQ_LOW_LATENCY &&
@@ -834,14 +810,14 @@ static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
834 return MEM_LATENCY_ERR; 810 return MEM_LATENCY_ERR;
835} 811}
836 812
837static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 813static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
838 enum amd_pp_clock_type type, 814 enum amd_pp_clock_type type,
839 struct pp_clock_levels_with_latency *clocks) 815 struct pp_clock_levels_with_latency *clocks)
840{ 816{
841 uint32_t i; 817 uint32_t i;
842 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 818 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
843 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 819 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
844 struct rv_voltage_dependency_table *pclk_vol_table; 820 struct smu10_voltage_dependency_table *pclk_vol_table;
845 bool latency_required = false; 821 bool latency_required = false;
846 822
847 if (pinfo == NULL) 823 if (pinfo == NULL)
@@ -878,7 +854,7 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
878 for (i = 0; i < pclk_vol_table->count; i++) { 854 for (i = 0; i < pclk_vol_table->count; i++) {
879 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; 855 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
880 clocks->data[i].latency_in_us = latency_required ? 856 clocks->data[i].latency_in_us = latency_required ?
881 rv_get_mem_latency(hwmgr, 857 smu10_get_mem_latency(hwmgr,
882 pclk_vol_table->entries[i].clk) : 858 pclk_vol_table->entries[i].clk) :
883 0; 859 0;
884 clocks->num_levels++; 860 clocks->num_levels++;
@@ -887,14 +863,14 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
887 return 0; 863 return 0;
888} 864}
889 865
890static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 866static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
891 enum amd_pp_clock_type type, 867 enum amd_pp_clock_type type,
892 struct pp_clock_levels_with_voltage *clocks) 868 struct pp_clock_levels_with_voltage *clocks)
893{ 869{
894 uint32_t i; 870 uint32_t i;
895 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 871 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
896 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 872 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
897 struct rv_voltage_dependency_table *pclk_vol_table = NULL; 873 struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
898 874
899 if (pinfo == NULL) 875 if (pinfo == NULL)
900 return -EINVAL; 876 return -EINVAL;
@@ -929,29 +905,28 @@ static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
929 return 0; 905 return 0;
930} 906}
931 907
932int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 908static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
933 struct pp_display_clock_request *clock_req) 909 struct pp_display_clock_request *clock_req)
934{ 910{
935 int result = 0; 911 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
936 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
937 enum amd_pp_clock_type clk_type = clock_req->clock_type; 912 enum amd_pp_clock_type clk_type = clock_req->clock_type;
938 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 913 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
939 PPSMC_Msg msg; 914 PPSMC_Msg msg;
940 915
941 switch (clk_type) { 916 switch (clk_type) {
942 case amd_pp_dcf_clock: 917 case amd_pp_dcf_clock:
943 if (clk_freq == rv_data->dcf_actual_hard_min_freq) 918 if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
944 return 0; 919 return 0;
945 msg = PPSMC_MSG_SetHardMinDcefclkByFreq; 920 msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
946 rv_data->dcf_actual_hard_min_freq = clk_freq; 921 smu10_data->dcf_actual_hard_min_freq = clk_freq;
947 break; 922 break;
948 case amd_pp_soc_clock: 923 case amd_pp_soc_clock:
949 msg = PPSMC_MSG_SetHardMinSocclkByFreq; 924 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
950 break; 925 break;
951 case amd_pp_f_clock: 926 case amd_pp_f_clock:
952 if (clk_freq == rv_data->f_actual_hard_min_freq) 927 if (clk_freq == smu10_data->f_actual_hard_min_freq)
953 return 0; 928 return 0;
954 rv_data->f_actual_hard_min_freq = clk_freq; 929 smu10_data->f_actual_hard_min_freq = clk_freq;
955 msg = PPSMC_MSG_SetHardMinFclkByFreq; 930 msg = PPSMC_MSG_SetHardMinFclkByFreq;
956 break; 931 break;
957 default: 932 default:
@@ -959,19 +934,18 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
959 return -EINVAL; 934 return -EINVAL;
960 } 935 }
961 936
962 result = smum_send_msg_to_smc_with_parameter(hwmgr, msg, 937 smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
963 clk_freq);
964 938
965 return result; 939 return 0;
966} 940}
967 941
968static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 942static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
969{ 943{
970 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */ 944 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
971 return 0; 945 return 0;
972} 946}
973 947
974static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr) 948static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
975{ 949{
976 uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0, 950 uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
977 mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP); 951 mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
@@ -987,7 +961,7 @@ static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
987 return cur_temp; 961 return cur_temp;
988} 962}
989 963
990static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx, 964static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
991 void *value, int *size) 965 void *value, int *size)
992{ 966{
993 uint32_t sclk, mclk; 967 uint32_t sclk, mclk;
@@ -995,25 +969,21 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
995 969
996 switch (idx) { 970 switch (idx) {
997 case AMDGPU_PP_SENSOR_GFX_SCLK: 971 case AMDGPU_PP_SENSOR_GFX_SCLK:
998 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); 972 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
999 if (!ret) { 973 sclk = smum_get_argument(hwmgr);
1000 rv_read_arg_from_smc(hwmgr, &sclk);
1001 /* in units of 10KHZ */ 974 /* in units of 10KHZ */
1002 *((uint32_t *)value) = sclk * 100; 975 *((uint32_t *)value) = sclk * 100;
1003 *size = 4; 976 *size = 4;
1004 }
1005 break; 977 break;
1006 case AMDGPU_PP_SENSOR_GFX_MCLK: 978 case AMDGPU_PP_SENSOR_GFX_MCLK:
1007 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); 979 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
1008 if (!ret) { 980 mclk = smum_get_argument(hwmgr);
1009 rv_read_arg_from_smc(hwmgr, &mclk);
1010 /* in units of 10KHZ */ 981 /* in units of 10KHZ */
1011 *((uint32_t *)value) = mclk * 100; 982 *((uint32_t *)value) = mclk * 100;
1012 *size = 4; 983 *size = 4;
1013 }
1014 break; 984 break;
1015 case AMDGPU_PP_SENSOR_GPU_TEMP: 985 case AMDGPU_PP_SENSOR_GPU_TEMP:
1016 *((uint32_t *)value) = rv_thermal_get_temperature(hwmgr); 986 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1017 break; 987 break;
1018 default: 988 default:
1019 ret = -EINVAL; 989 ret = -EINVAL;
@@ -1023,44 +993,50 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1023 return ret; 993 return ret;
1024} 994}
1025 995
1026static const struct pp_hwmgr_func rv_hwmgr_funcs = { 996static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
1027 .backend_init = rv_hwmgr_backend_init, 997{
1028 .backend_fini = rv_hwmgr_backend_fini, 998 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
999}
1000
1001static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1002 .backend_init = smu10_hwmgr_backend_init,
1003 .backend_fini = smu10_hwmgr_backend_fini,
1029 .asic_setup = NULL, 1004 .asic_setup = NULL,
1030 .apply_state_adjust_rules = rv_apply_state_adjust_rules, 1005 .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1031 .force_dpm_level = rv_dpm_force_dpm_level, 1006 .force_dpm_level = smu10_dpm_force_dpm_level,
1032 .get_power_state_size = rv_get_power_state_size, 1007 .get_power_state_size = smu10_get_power_state_size,
1033 .powerdown_uvd = NULL, 1008 .powerdown_uvd = NULL,
1034 .powergate_uvd = NULL, 1009 .powergate_uvd = NULL,
1035 .powergate_vce = NULL, 1010 .powergate_vce = NULL,
1036 .get_mclk = rv_dpm_get_mclk, 1011 .get_mclk = smu10_dpm_get_mclk,
1037 .get_sclk = rv_dpm_get_sclk, 1012 .get_sclk = smu10_dpm_get_sclk,
1038 .patch_boot_state = rv_dpm_patch_boot_state, 1013 .patch_boot_state = smu10_dpm_patch_boot_state,
1039 .get_pp_table_entry = rv_dpm_get_pp_table_entry, 1014 .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1040 .get_num_of_pp_table_entries = rv_dpm_get_num_of_pp_table_entries, 1015 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1041 .set_cpu_power_state = rv_set_cpu_power_state, 1016 .set_cpu_power_state = smu10_set_cpu_power_state,
1042 .store_cc6_data = rv_store_cc6_data, 1017 .store_cc6_data = smu10_store_cc6_data,
1043 .force_clock_level = rv_force_clock_level, 1018 .force_clock_level = smu10_force_clock_level,
1044 .print_clock_levels = rv_print_clock_levels, 1019 .print_clock_levels = smu10_print_clock_levels,
1045 .get_dal_power_level = rv_get_dal_power_level, 1020 .get_dal_power_level = smu10_get_dal_power_level,
1046 .get_performance_level = rv_get_performance_level, 1021 .get_performance_level = smu10_get_performance_level,
1047 .get_current_shallow_sleep_clocks = rv_get_current_shallow_sleep_clocks, 1022 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1048 .get_clock_by_type_with_latency = rv_get_clock_by_type_with_latency, 1023 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1049 .get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage, 1024 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1050 .get_max_high_clocks = rv_get_max_high_clocks, 1025 .get_max_high_clocks = smu10_get_max_high_clocks,
1051 .read_sensor = rv_read_sensor, 1026 .read_sensor = smu10_read_sensor,
1052 .set_active_display_count = rv_set_active_display_count, 1027 .set_active_display_count = smu10_set_active_display_count,
1053 .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk, 1028 .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
1054 .dynamic_state_management_enable = rv_enable_dpm_tasks, 1029 .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1055 .power_off_asic = rv_power_off_asic, 1030 .power_off_asic = smu10_power_off_asic,
1056 .asic_setup = rv_setup_asic_task, 1031 .asic_setup = smu10_setup_asic_task,
1057 .power_state_set = rv_set_power_state_tasks, 1032 .power_state_set = smu10_set_power_state_tasks,
1058 .dynamic_state_management_disable = rv_disable_dpm_tasks, 1033 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1034 .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
1059}; 1035};
1060 1036
1061int rv_init_function_pointers(struct pp_hwmgr *hwmgr) 1037int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1062{ 1038{
1063 hwmgr->hwmgr_func = &rv_hwmgr_funcs; 1039 hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1064 hwmgr->pptable_func = &pptable_funcs; 1040 hwmgr->pptable_func = &pptable_funcs;
1065 return 0; 1041 return 0;
1066} 1042}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index c3bc311dc59f..175c3a592b6c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -21,17 +21,17 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef RAVEN_HWMGR_H 24#ifndef SMU10_HWMGR_H
25#define RAVEN_HWMGR_H 25#define SMU10_HWMGR_H
26 26
27#include "hwmgr.h" 27#include "hwmgr.h"
28#include "rv_inc.h" 28#include "smu10_inc.h"
29#include "smu10_driver_if.h" 29#include "smu10_driver_if.h"
30#include "rv_ppsmc.h" 30#include "rv_ppsmc.h"
31 31
32 32
33#define RAVEN_MAX_HARDWARE_POWERLEVELS 8 33#define SMU10_MAX_HARDWARE_POWERLEVELS 8
34#define PHMRAVEN_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 34#define SMU10_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
35 35
36#define DPMFlags_SCLK_Enabled 0x00000001 36#define DPMFlags_SCLK_Enabled 0x00000001
37#define DPMFlags_UVD_Enabled 0x00000002 37#define DPMFlags_UVD_Enabled 0x00000002
@@ -47,10 +47,10 @@
47 47
48#define SMU_PHYID_SHIFT 8 48#define SMU_PHYID_SHIFT 8
49 49
50#define RAVEN_PCIE_POWERGATING_TARGET_GFX 0 50#define SMU10_PCIE_POWERGATING_TARGET_GFX 0
51#define RAVEN_PCIE_POWERGATING_TARGET_DDI 1 51#define SMU10_PCIE_POWERGATING_TARGET_DDI 1
52#define RAVEN_PCIE_POWERGATING_TARGET_PLLCASCADE 2 52#define SMU10_PCIE_POWERGATING_TARGET_PLLCASCADE 2
53#define RAVEN_PCIE_POWERGATING_TARGET_PHY 3 53#define SMU10_PCIE_POWERGATING_TARGET_PHY 3
54 54
55enum VQ_TYPE { 55enum VQ_TYPE {
56 CLOCK_TYPE_DCLK = 0L, 56 CLOCK_TYPE_DCLK = 0L,
@@ -65,14 +65,14 @@ enum VQ_TYPE {
65#define SUSTAINABLE_CU_MASK 0xff000000 65#define SUSTAINABLE_CU_MASK 0xff000000
66#define SUSTAINABLE_CU_SHIFT 24 66#define SUSTAINABLE_CU_SHIFT 24
67 67
68struct rv_dpm_entry { 68struct smu10_dpm_entry {
69 uint32_t soft_min_clk; 69 uint32_t soft_min_clk;
70 uint32_t hard_min_clk; 70 uint32_t hard_min_clk;
71 uint32_t soft_max_clk; 71 uint32_t soft_max_clk;
72 uint32_t hard_max_clk; 72 uint32_t hard_max_clk;
73}; 73};
74 74
75struct rv_power_level { 75struct smu10_power_level {
76 uint32_t engine_clock; 76 uint32_t engine_clock;
77 uint8_t vddc_index; 77 uint8_t vddc_index;
78 uint8_t ds_divider_index; 78 uint8_t ds_divider_index;
@@ -86,14 +86,14 @@ struct rv_power_level {
86 uint8_t rsv[3]; 86 uint8_t rsv[3];
87}; 87};
88 88
89/*used for the nbpsFlags field in rv_power state*/ 89/*used for the nbpsFlags field in smu10_power state*/
90#define RAVEN_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0) 90#define SMU10_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
91#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1) 91#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
92#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2) 92#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
93 93
94#define RAVEN_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0) 94#define SMU10_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0)
95 95
96struct rv_uvd_clocks { 96struct smu10_uvd_clocks {
97 uint32_t vclk; 97 uint32_t vclk;
98 uint32_t dclk; 98 uint32_t dclk;
99 uint32_t vclk_low_divider; 99 uint32_t vclk_low_divider;
@@ -118,16 +118,16 @@ struct pp_disable_nbpslo_flags {
118}; 118};
119 119
120 120
121enum rv_pstate_previous_action { 121enum smu10_pstate_previous_action {
122 DO_NOTHING = 1, 122 DO_NOTHING = 1,
123 FORCE_HIGH, 123 FORCE_HIGH,
124 CANCEL_FORCE_HIGH 124 CANCEL_FORCE_HIGH
125}; 125};
126 126
127struct rv_power_state { 127struct smu10_power_state {
128 unsigned int magic; 128 unsigned int magic;
129 uint32_t level; 129 uint32_t level;
130 struct rv_uvd_clocks uvd_clocks; 130 struct smu10_uvd_clocks uvd_clocks;
131 uint32_t evclk; 131 uint32_t evclk;
132 uint32_t ecclk; 132 uint32_t ecclk;
133 uint32_t samclk; 133 uint32_t samclk;
@@ -141,79 +141,79 @@ struct rv_power_state {
141 uint8_t dpm_x_nbps_low; 141 uint8_t dpm_x_nbps_low;
142 uint8_t dpm_x_nbps_high; 142 uint8_t dpm_x_nbps_high;
143 143
144 enum rv_pstate_previous_action action; 144 enum smu10_pstate_previous_action action;
145 145
146 struct rv_power_level levels[RAVEN_MAX_HARDWARE_POWERLEVELS]; 146 struct smu10_power_level levels[SMU10_MAX_HARDWARE_POWERLEVELS];
147 struct pp_disable_nbpslo_flags nbpslo_flags; 147 struct pp_disable_nbpslo_flags nbpslo_flags;
148}; 148};
149 149
150#define RAVEN_NUM_NBPSTATES 4 150#define SMU10_NUM_NBPSTATES 4
151#define RAVEN_NUM_NBPMEMORYCLOCK 2 151#define SMU10_NUM_NBPMEMORYCLOCK 2
152 152
153 153
154struct rv_display_phy_info_entry { 154struct smu10_display_phy_info_entry {
155 uint8_t phy_present; 155 uint8_t phy_present;
156 uint8_t active_lane_mapping; 156 uint8_t active_lane_mapping;
157 uint8_t display_config_type; 157 uint8_t display_config_type;
158 uint8_t active_num_of_lanes; 158 uint8_t active_num_of_lanes;
159}; 159};
160 160
161#define RAVEN_MAX_DISPLAYPHY_IDS 10 161#define SMU10_MAX_DISPLAYPHY_IDS 10
162 162
163struct rv_display_phy_info { 163struct smu10_display_phy_info {
164 bool display_phy_access_initialized; 164 bool display_phy_access_initialized;
165 struct rv_display_phy_info_entry entries[RAVEN_MAX_DISPLAYPHY_IDS]; 165 struct smu10_display_phy_info_entry entries[SMU10_MAX_DISPLAYPHY_IDS];
166}; 166};
167 167
168#define MAX_DISPLAY_CLOCK_LEVEL 8 168#define MAX_DISPLAY_CLOCK_LEVEL 8
169 169
170struct rv_system_info{ 170struct smu10_system_info{
171 uint8_t htc_tmp_lmt; 171 uint8_t htc_tmp_lmt;
172 uint8_t htc_hyst_lmt; 172 uint8_t htc_hyst_lmt;
173}; 173};
174 174
175#define MAX_REGULAR_DPM_NUMBER 8 175#define MAX_REGULAR_DPM_NUMBER 8
176 176
177struct rv_mclk_latency_entries { 177struct smu10_mclk_latency_entries {
178 uint32_t frequency; 178 uint32_t frequency;
179 uint32_t latency; 179 uint32_t latency;
180}; 180};
181 181
182struct rv_mclk_latency_table { 182struct smu10_mclk_latency_table {
183 uint32_t count; 183 uint32_t count;
184 struct rv_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; 184 struct smu10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
185}; 185};
186 186
187struct rv_clock_voltage_dependency_record { 187struct smu10_clock_voltage_dependency_record {
188 uint32_t clk; 188 uint32_t clk;
189 uint32_t vol; 189 uint32_t vol;
190}; 190};
191 191
192 192
193struct rv_voltage_dependency_table { 193struct smu10_voltage_dependency_table {
194 uint32_t count; 194 uint32_t count;
195 struct rv_clock_voltage_dependency_record entries[1]; 195 struct smu10_clock_voltage_dependency_record entries[1];
196}; 196};
197 197
198struct rv_clock_voltage_information { 198struct smu10_clock_voltage_information {
199 struct rv_voltage_dependency_table *vdd_dep_on_dcefclk; 199 struct smu10_voltage_dependency_table *vdd_dep_on_dcefclk;
200 struct rv_voltage_dependency_table *vdd_dep_on_socclk; 200 struct smu10_voltage_dependency_table *vdd_dep_on_socclk;
201 struct rv_voltage_dependency_table *vdd_dep_on_fclk; 201 struct smu10_voltage_dependency_table *vdd_dep_on_fclk;
202 struct rv_voltage_dependency_table *vdd_dep_on_mclk; 202 struct smu10_voltage_dependency_table *vdd_dep_on_mclk;
203 struct rv_voltage_dependency_table *vdd_dep_on_dispclk; 203 struct smu10_voltage_dependency_table *vdd_dep_on_dispclk;
204 struct rv_voltage_dependency_table *vdd_dep_on_dppclk; 204 struct smu10_voltage_dependency_table *vdd_dep_on_dppclk;
205 struct rv_voltage_dependency_table *vdd_dep_on_phyclk; 205 struct smu10_voltage_dependency_table *vdd_dep_on_phyclk;
206}; 206};
207 207
208struct rv_hwmgr { 208struct smu10_hwmgr {
209 uint32_t disable_driver_thermal_policy; 209 uint32_t disable_driver_thermal_policy;
210 uint32_t thermal_auto_throttling_treshold; 210 uint32_t thermal_auto_throttling_treshold;
211 struct rv_system_info sys_info; 211 struct smu10_system_info sys_info;
212 struct rv_mclk_latency_table mclk_latency_table; 212 struct smu10_mclk_latency_table mclk_latency_table;
213 213
214 uint32_t ddi_power_gating_disabled; 214 uint32_t ddi_power_gating_disabled;
215 215
216 struct rv_display_phy_info_entry display_phy_info; 216 struct smu10_display_phy_info_entry display_phy_info;
217 uint32_t dce_slow_sclk_threshold; 217 uint32_t dce_slow_sclk_threshold;
218 218
219 bool disp_clk_bypass; 219 bool disp_clk_bypass;
@@ -255,10 +255,10 @@ struct rv_hwmgr {
255 uint32_t fps_low_threshold; 255 uint32_t fps_low_threshold;
256 256
257 uint32_t dpm_flags; 257 uint32_t dpm_flags;
258 struct rv_dpm_entry sclk_dpm; 258 struct smu10_dpm_entry sclk_dpm;
259 struct rv_dpm_entry uvd_dpm; 259 struct smu10_dpm_entry uvd_dpm;
260 struct rv_dpm_entry vce_dpm; 260 struct smu10_dpm_entry vce_dpm;
261 struct rv_dpm_entry acp_dpm; 261 struct smu10_dpm_entry acp_dpm;
262 bool acp_power_up_no_dsp; 262 bool acp_power_up_no_dsp;
263 263
264 uint32_t max_sclk_level; 264 uint32_t max_sclk_level;
@@ -291,7 +291,7 @@ struct rv_hwmgr {
291 291
292 bool gfx_off_controled_by_driver; 292 bool gfx_off_controled_by_driver;
293 Watermarks_t water_marks_table; 293 Watermarks_t water_marks_table;
294 struct rv_clock_voltage_information clock_vol_info; 294 struct smu10_clock_voltage_information clock_vol_info;
295 DpmClocks_t clock_table; 295 DpmClocks_t clock_table;
296 296
297 uint32_t active_process_mask; 297 uint32_t active_process_mask;
@@ -302,21 +302,21 @@ struct rv_hwmgr {
302 302
303struct pp_hwmgr; 303struct pp_hwmgr;
304 304
305int rv_init_function_pointers(struct pp_hwmgr *hwmgr); 305int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
306 306
307/* UMD PState Raven Msg Parameters in MHz */ 307/* UMD PState SMU10 Msg Parameters in MHz */
308#define RAVEN_UMD_PSTATE_GFXCLK 700 308#define SMU10_UMD_PSTATE_GFXCLK 700
309#define RAVEN_UMD_PSTATE_SOCCLK 626 309#define SMU10_UMD_PSTATE_SOCCLK 626
310#define RAVEN_UMD_PSTATE_FCLK 933 310#define SMU10_UMD_PSTATE_FCLK 933
311#define RAVEN_UMD_PSTATE_VCE 0x03C00320 311#define SMU10_UMD_PSTATE_VCE 0x03C00320
312 312
313#define RAVEN_UMD_PSTATE_PEAK_GFXCLK 1100 313#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100
314#define RAVEN_UMD_PSTATE_PEAK_SOCCLK 757 314#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
315#define RAVEN_UMD_PSTATE_PEAK_FCLK 1200 315#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
316 316
317#define RAVEN_UMD_PSTATE_MIN_GFXCLK 200 317#define SMU10_UMD_PSTATE_MIN_GFXCLK 200
318#define RAVEN_UMD_PSTATE_MIN_FCLK 400 318#define SMU10_UMD_PSTATE_MIN_FCLK 400
319#define RAVEN_UMD_PSTATE_MIN_SOCCLK 200 319#define SMU10_UMD_PSTATE_MIN_SOCCLK 200
320#define RAVEN_UMD_PSTATE_MIN_VCE 0x0190012C 320#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C
321 321
322#endif 322#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
index ae59a3fdea8a..edb68e302f6f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef RAVEN_INC_H 24#ifndef SMU10_INC_H
25#define RAVEN_INC_H 25#define SMU10_INC_H
26 26
27 27
28#include "asic_reg/mp/mp_10_0_default.h" 28#include "asic_reg/mp/mp_10_0_default.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 69a0678ace98..f4cbaee4e2ca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -162,7 +162,7 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
162 AMD_CG_STATE_UNGATE); 162 AMD_CG_STATE_UNGATE);
163 cgs_set_powergating_state(hwmgr->device, 163 cgs_set_powergating_state(hwmgr->device,
164 AMD_IP_BLOCK_TYPE_UVD, 164 AMD_IP_BLOCK_TYPE_UVD,
165 AMD_CG_STATE_UNGATE); 165 AMD_PG_STATE_UNGATE);
166 smu7_update_uvd_dpm(hwmgr, false); 166 smu7_update_uvd_dpm(hwmgr, false);
167 } 167 }
168 168
@@ -472,23 +472,12 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
472 */ 472 */
473int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) 473int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
474{ 474{
475 struct cgs_system_info sys_info = {0}; 475 struct amdgpu_device *adev = hwmgr->adev;
476 uint32_t active_cus;
477 int result;
478
479 sys_info.size = sizeof(struct cgs_system_info);
480 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
481
482 result = cgs_query_system_info(hwmgr->device, &sys_info);
483
484 if (result)
485 return -EINVAL;
486
487 active_cus = sys_info.value;
488 476
489 if (enable) 477 if (enable)
490 return smum_send_msg_to_smc_with_parameter(hwmgr, 478 return smum_send_msg_to_smc_with_parameter(hwmgr,
491 PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus); 479 PPSMC_MSG_GFX_CU_PG_ENABLE,
480 adev->gfx.cu_info.number);
492 else 481 else
493 return smum_send_msg_to_smc(hwmgr, 482 return smum_send_msg_to_smc(hwmgr,
494 PPSMC_MSG_GFX_CU_PG_DISABLE); 483 PPSMC_MSG_GFX_CU_PG_DISABLE);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 7b54d48b2ce2..1ddce023218a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -25,7 +25,6 @@
25#define _SMU7_CLOCK_POWER_GATING_H_ 25#define _SMU7_CLOCK_POWER_GATING_H_
26 26
27#include "smu7_hwmgr.h" 27#include "smu7_hwmgr.h"
28#include "pp_asicblocks.h"
29 28
30void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); 29void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
31void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); 30void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
index f967613191cf..3477d4dfff70 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
@@ -50,6 +50,6 @@
50#define SMU7_CGULVCONTROL_DFLT 0x00007450 50#define SMU7_CGULVCONTROL_DFLT 0x00007450
51#define SMU7_TARGETACTIVITY_DFLT 50 51#define SMU7_TARGETACTIVITY_DFLT 50
52#define SMU7_MCLK_TARGETACTIVITY_DFLT 10 52#define SMU7_MCLK_TARGETACTIVITY_DFLT 10
53 53#define SMU7_SCLK_TARGETACTIVITY_DFLT 30
54#endif 54#endif
55 55
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 41e42beff213..7a87209f7258 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -27,7 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <asm/div64.h> 28#include <asm/div64.h>
29#include <drm/amdgpu_drm.h> 29#include <drm/amdgpu_drm.h>
30#include "pp_acpi.h"
31#include "ppatomctrl.h" 30#include "ppatomctrl.h"
32#include "atombios.h" 31#include "atombios.h"
33#include "pptable_v1_0.h" 32#include "pptable_v1_0.h"
@@ -41,13 +40,13 @@
41 40
42#include "hwmgr.h" 41#include "hwmgr.h"
43#include "smu7_hwmgr.h" 42#include "smu7_hwmgr.h"
44#include "smu7_smumgr.h"
45#include "smu_ucode_xfer_vi.h" 43#include "smu_ucode_xfer_vi.h"
46#include "smu7_powertune.h" 44#include "smu7_powertune.h"
47#include "smu7_dyn_defaults.h" 45#include "smu7_dyn_defaults.h"
48#include "smu7_thermal.h" 46#include "smu7_thermal.h"
49#include "smu7_clockpowergating.h" 47#include "smu7_clockpowergating.h"
50#include "processpptables.h" 48#include "processpptables.h"
49#include "pp_thermal.h"
51 50
52#define MC_CG_ARB_FREQ_F0 0x0a 51#define MC_CG_ARB_FREQ_F0 0x0a
53#define MC_CG_ARB_FREQ_F1 0x0b 52#define MC_CG_ARB_FREQ_F1 0x0b
@@ -80,6 +79,13 @@
80#define PCIE_BUS_CLK 10000 79#define PCIE_BUS_CLK 10000
81#define TCLK (PCIE_BUS_CLK / 10) 80#define TCLK (PCIE_BUS_CLK / 10)
82 81
82static const struct profile_mode_setting smu7_profiling[5] =
83 {{1, 0, 100, 30, 1, 0, 100, 10},
84 {1, 10, 0, 30, 0, 0, 0, 0},
85 {0, 0, 0, 0, 1, 10, 16, 31},
86 {1, 0, 11, 50, 1, 0, 100, 10},
87 {1, 0, 5, 30, 0, 0, 0, 0},
88 };
83 89
84/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ 90/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
85enum DPM_EVENT_SRC { 91enum DPM_EVENT_SRC {
@@ -90,7 +96,6 @@ enum DPM_EVENT_SRC {
90 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 96 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
91}; 97};
92 98
93static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable);
94static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); 99static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
95static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, 100static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
96 enum pp_clock_type type, uint32_t mask); 101 enum pp_clock_type type, uint32_t mask);
@@ -792,6 +797,76 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
792 return 0; 797 return 0;
793} 798}
794 799
800static int smu7_get_voltage_dependency_table(
801 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
802 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
803{
804 uint8_t i = 0;
805 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
806 "Voltage Lookup Table empty",
807 return -EINVAL);
808
809 dep_table->count = allowed_dep_table->count;
810 for (i=0; i<dep_table->count; i++) {
811 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
812 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
813 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
814 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
815 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
816 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
817 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
818 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
819 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
820 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
821 }
822
823 return 0;
824}
825
826static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
827{
828 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
829 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
830 struct phm_ppt_v1_information *table_info =
831 (struct phm_ppt_v1_information *)(hwmgr->pptable);
832 uint32_t i;
833
834 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
835 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
836
837 if (table_info == NULL)
838 return -EINVAL;
839
840 dep_sclk_table = table_info->vdd_dep_on_sclk;
841 dep_mclk_table = table_info->vdd_dep_on_mclk;
842
843 odn_table->odn_core_clock_dpm_levels.num_of_pl =
844 data->golden_dpm_table.sclk_table.count;
845 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
846 odn_table->odn_core_clock_dpm_levels.entries[i].clock =
847 data->golden_dpm_table.sclk_table.dpm_levels[i].value;
848 odn_table->odn_core_clock_dpm_levels.entries[i].enabled = true;
849 odn_table->odn_core_clock_dpm_levels.entries[i].vddc = dep_sclk_table->entries[i].vddc;
850 }
851
852 smu7_get_voltage_dependency_table(dep_sclk_table,
853 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
854
855 odn_table->odn_memory_clock_dpm_levels.num_of_pl =
856 data->golden_dpm_table.mclk_table.count;
857 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
858 odn_table->odn_memory_clock_dpm_levels.entries[i].clock =
859 data->golden_dpm_table.mclk_table.dpm_levels[i].value;
860 odn_table->odn_memory_clock_dpm_levels.entries[i].enabled = true;
861 odn_table->odn_memory_clock_dpm_levels.entries[i].vddc = dep_mclk_table->entries[i].vddc;
862 }
863
864 smu7_get_voltage_dependency_table(dep_mclk_table,
865 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
866
867 return 0;
868}
869
795static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 870static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
796{ 871{
797 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 872 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -808,6 +883,11 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
808 /* save a copy of the default DPM table */ 883 /* save a copy of the default DPM table */
809 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 884 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
810 sizeof(struct smu7_dpm_table)); 885 sizeof(struct smu7_dpm_table));
886
887 /* initialize ODN table */
888 if (hwmgr->od_enabled)
889 smu7_odn_initial_default_setting(hwmgr);
890
811 return 0; 891 return 0;
812} 892}
813 893
@@ -1164,11 +1244,6 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1164 int tmp_result = 0; 1244 int tmp_result = 0;
1165 int result = 0; 1245 int result = 0;
1166 1246
1167 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1168 PP_ASSERT_WITH_CODE(tmp_result == 0,
1169 "DPM is already running",
1170 );
1171
1172 if (smu7_voltage_control(hwmgr)) { 1247 if (smu7_voltage_control(hwmgr)) {
1173 tmp_result = smu7_enable_voltage_control(hwmgr); 1248 tmp_result = smu7_enable_voltage_control(hwmgr);
1174 PP_ASSERT_WITH_CODE(tmp_result == 0, 1249 PP_ASSERT_WITH_CODE(tmp_result == 0,
@@ -1275,15 +1350,53 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1275 return 0; 1350 return 0;
1276} 1351}
1277 1352
1353static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1354{
1355 if (!hwmgr->avfs_supported)
1356 return 0;
1357
1358 if (enable) {
1359 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1360 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1361 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1362 hwmgr, PPSMC_MSG_EnableAvfs),
1363 "Failed to enable AVFS!",
1364 return -EINVAL);
1365 }
1366 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1367 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1368 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1369 hwmgr, PPSMC_MSG_DisableAvfs),
1370 "Failed to disable AVFS!",
1371 return -EINVAL);
1372 }
1373
1374 return 0;
1375}
1376
1377static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1378{
1379 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1380
1381 if (!hwmgr->avfs_supported)
1382 return 0;
1383
1384 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1385 smu7_avfs_control(hwmgr, false);
1386 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1387 smu7_avfs_control(hwmgr, false);
1388 smu7_avfs_control(hwmgr, true);
1389 } else {
1390 smu7_avfs_control(hwmgr, true);
1391 }
1392
1393 return 0;
1394}
1395
1278int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 1396int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1279{ 1397{
1280 int tmp_result, result = 0; 1398 int tmp_result, result = 0;
1281 1399
1282 tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
1283 PP_ASSERT_WITH_CODE(tmp_result == 0,
1284 "DPM is not running right now, no need to disable DPM!",
1285 return 0);
1286
1287 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1400 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1288 PHM_PlatformCaps_ThermalController)) 1401 PHM_PlatformCaps_ThermalController))
1289 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1402 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -1352,12 +1465,10 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1352 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1465 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1353 struct phm_ppt_v1_information *table_info = 1466 struct phm_ppt_v1_information *table_info =
1354 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1467 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1355 struct cgs_system_info sys_info = {0}; 1468 struct amdgpu_device *adev = hwmgr->adev;
1356 int result;
1357 1469
1358 data->dll_default_on = false; 1470 data->dll_default_on = false;
1359 data->mclk_dpm0_activity_target = 0xa; 1471 data->mclk_dpm0_activity_target = 0xa;
1360 data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
1361 data->vddc_vddgfx_delta = 300; 1472 data->vddc_vddgfx_delta = 300;
1362 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; 1473 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1363 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; 1474 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
@@ -1381,6 +1492,17 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1381 data->enable_pkg_pwr_tracking_feature = true; 1492 data->enable_pkg_pwr_tracking_feature = true;
1382 data->force_pcie_gen = PP_PCIEGenInvalid; 1493 data->force_pcie_gen = PP_PCIEGenInvalid;
1383 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; 1494 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1495 data->current_profile_setting.bupdate_sclk = 1;
1496 data->current_profile_setting.sclk_up_hyst = 0;
1497 data->current_profile_setting.sclk_down_hyst = 100;
1498 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1499 data->current_profile_setting.bupdate_sclk = 1;
1500 data->current_profile_setting.mclk_up_hyst = 0;
1501 data->current_profile_setting.mclk_down_hyst = 100;
1502 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1503 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1504 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1505 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1384 1506
1385 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) { 1507 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1386 uint8_t tmp1, tmp2; 1508 uint8_t tmp1, tmp2;
@@ -1467,17 +1589,13 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1467 data->pcie_lane_power_saving.max = 0; 1589 data->pcie_lane_power_saving.max = 0;
1468 data->pcie_lane_power_saving.min = 16; 1590 data->pcie_lane_power_saving.min = 16;
1469 1591
1470 sys_info.size = sizeof(struct cgs_system_info); 1592
1471 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; 1593 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1472 result = cgs_query_system_info(hwmgr->device, &sys_info); 1594 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1473 if (!result) { 1595 PHM_PlatformCaps_UVDPowerGating);
1474 if (sys_info.value & AMD_PG_SUPPORT_UVD) 1596 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1475 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1597 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1476 PHM_PlatformCaps_UVDPowerGating); 1598 PHM_PlatformCaps_VCEPowerGating);
1477 if (sys_info.value & AMD_PG_SUPPORT_VCE)
1478 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1479 PHM_PlatformCaps_VCEPowerGating);
1480 }
1481} 1599}
1482 1600
1483/** 1601/**
@@ -1912,7 +2030,7 @@ static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
1912 struct phm_ppt_v1_voltage_lookup_table *lookup_table; 2030 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
1913 uint32_t i; 2031 uint32_t i;
1914 uint32_t hw_revision, sub_vendor_id, sub_sys_id; 2032 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
1915 struct cgs_system_info sys_info = {0}; 2033 struct amdgpu_device *adev = hwmgr->adev;
1916 2034
1917 if (table_info != NULL) { 2035 if (table_info != NULL) {
1918 dep_mclk_table = table_info->vdd_dep_on_mclk; 2036 dep_mclk_table = table_info->vdd_dep_on_mclk;
@@ -1920,19 +2038,9 @@ static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
1920 } else 2038 } else
1921 return 0; 2039 return 0;
1922 2040
1923 sys_info.size = sizeof(struct cgs_system_info); 2041 hw_revision = adev->pdev->revision;
1924 2042 sub_sys_id = adev->pdev->subsystem_device;
1925 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; 2043 sub_vendor_id = adev->pdev->subsystem_vendor;
1926 cgs_query_system_info(hwmgr->device, &sys_info);
1927 hw_revision = (uint32_t)sys_info.value;
1928
1929 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
1930 cgs_query_system_info(hwmgr->device, &sys_info);
1931 sub_sys_id = (uint32_t)sys_info.value;
1932
1933 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
1934 cgs_query_system_info(hwmgr->device, &sys_info);
1935 sub_vendor_id = (uint32_t)sys_info.value;
1936 2044
1937 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && 2045 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
1938 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || 2046 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
@@ -2266,14 +2374,18 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2266 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; 2374 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2267 2375
2268 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, 2376 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2269 "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); 2377 "VDDC dependency on SCLK table is missing. This table is mandatory",
2378 return -EINVAL);
2270 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, 2379 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2271 "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); 2380 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2381 return -EINVAL);
2272 2382
2273 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, 2383 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2274 "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); 2384 "VDDC dependency on MCLK table is missing. This table is mandatory",
2385 return -EINVAL);
2275 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, 2386 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2276 "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); 2387 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2388 return -EINVAL);
2277 2389
2278 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; 2390 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2279 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 2391 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
@@ -2371,7 +2483,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2371 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); 2483 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2372 2484
2373 if (0 == result) { 2485 if (0 == result) {
2374 struct cgs_system_info sys_info = {0}; 2486 struct amdgpu_device *adev = hwmgr->adev;
2375 2487
2376 data->is_tlu_enabled = false; 2488 data->is_tlu_enabled = false;
2377 2489
@@ -2380,22 +2492,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2380 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 2492 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2381 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 2493 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2382 2494
2383 sys_info.size = sizeof(struct cgs_system_info); 2495 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2384 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
2385 result = cgs_query_system_info(hwmgr->device, &sys_info);
2386 if (result)
2387 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2388 else
2389 data->pcie_gen_cap = (uint32_t)sys_info.value;
2390 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 2496 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2391 data->pcie_spc_cap = 20; 2497 data->pcie_spc_cap = 20;
2392 sys_info.size = sizeof(struct cgs_system_info); 2498 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2393 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
2394 result = cgs_query_system_info(hwmgr->device, &sys_info);
2395 if (result)
2396 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2397 else
2398 data->pcie_lane_cap = (uint32_t)sys_info.value;
2399 2499
2400 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 2500 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2401/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 2501/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
@@ -2574,8 +2674,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
2574 break; 2674 break;
2575 } 2675 }
2576 } 2676 }
2577 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 2677 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2578 *sclk_mask = 0; 2678 *sclk_mask = 0;
2679 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2680 }
2579 2681
2580 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2682 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2581 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 2683 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
@@ -2590,8 +2692,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
2590 break; 2692 break;
2591 } 2693 }
2592 } 2694 }
2593 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 2695 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2594 *sclk_mask = 0; 2696 *sclk_mask = 0;
2697 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2698 }
2595 2699
2596 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2700 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2597 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 2701 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -2603,6 +2707,9 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
2603 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 2707 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2604 2708
2605 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; 2709 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2710 hwmgr->pstate_sclk = tmp_sclk;
2711 hwmgr->pstate_mclk = tmp_mclk;
2712
2606 return 0; 2713 return 0;
2607} 2714}
2608 2715
@@ -2614,6 +2721,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2614 uint32_t mclk_mask = 0; 2721 uint32_t mclk_mask = 0;
2615 uint32_t pcie_mask = 0; 2722 uint32_t pcie_mask = 0;
2616 2723
2724 if (hwmgr->pstate_sclk == 0)
2725 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2726
2617 switch (level) { 2727 switch (level) {
2618 case AMD_DPM_FORCED_LEVEL_HIGH: 2728 case AMD_DPM_FORCED_LEVEL_HIGH:
2619 ret = smu7_force_dpm_highest(hwmgr); 2729 ret = smu7_force_dpm_highest(hwmgr);
@@ -2756,10 +2866,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2756 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 2866 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2757 2867
2758 2868
2759 disable_mclk_switching = ((1 < info.display_count) || 2869 if (info.display_count == 0)
2760 disable_mclk_switching_for_frame_lock || 2870 disable_mclk_switching = false;
2761 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || 2871 else
2762 (mode_info.refresh_rate > 120)); 2872 disable_mclk_switching = ((1 < info.display_count) ||
2873 disable_mclk_switching_for_frame_lock ||
2874 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us));
2763 2875
2764 sclk = smu7_ps->performance_levels[0].engine_clock; 2876 sclk = smu7_ps->performance_levels[0].engine_clock;
2765 mclk = smu7_ps->performance_levels[0].memory_clock; 2877 mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -3312,7 +3424,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3312 void *value, int *size) 3424 void *value, int *size)
3313{ 3425{
3314 uint32_t sclk, mclk, activity_percent; 3426 uint32_t sclk, mclk, activity_percent;
3315 uint32_t offset; 3427 uint32_t offset, val_vid;
3316 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3428 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3317 3429
3318 /* size must be at least 4 bytes for all sensors */ 3430 /* size must be at least 4 bytes for all sensors */
@@ -3360,6 +3472,16 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3360 return -EINVAL; 3472 return -EINVAL;
3361 *size = sizeof(struct pp_gpu_power); 3473 *size = sizeof(struct pp_gpu_power);
3362 return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); 3474 return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3475 case AMDGPU_PP_SENSOR_VDDGFX:
3476 if ((data->vr_config & 0xff) == 0x2)
3477 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3478 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3479 else
3480 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3481 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3482
3483 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3484 return 0;
3363 default: 3485 default:
3364 return -EINVAL; 3486 return -EINVAL;
3365 } 3487 }
@@ -3382,8 +3504,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
3382 uint32_t i; 3504 uint32_t i;
3383 struct cgs_display_info info = {0}; 3505 struct cgs_display_info info = {0};
3384 3506
3385 data->need_update_smu7_dpm_table = 0;
3386
3387 for (i = 0; i < sclk_table->count; i++) { 3507 for (i = 0; i < sclk_table->count; i++) {
3388 if (sclk == sclk_table->dpm_levels[i].value) 3508 if (sclk == sclk_table->dpm_levels[i].value)
3389 break; 3509 break;
@@ -3466,15 +3586,17 @@ static int smu7_request_link_speed_change_before_state_change(
3466 3586
3467 if (target_link_speed > current_link_speed) { 3587 if (target_link_speed > current_link_speed) {
3468 switch (target_link_speed) { 3588 switch (target_link_speed) {
3589#ifdef CONFIG_ACPI
3469 case PP_PCIEGen3: 3590 case PP_PCIEGen3:
3470 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) 3591 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3471 break; 3592 break;
3472 data->force_pcie_gen = PP_PCIEGen2; 3593 data->force_pcie_gen = PP_PCIEGen2;
3473 if (current_link_speed == PP_PCIEGen2) 3594 if (current_link_speed == PP_PCIEGen2)
3474 break; 3595 break;
3475 case PP_PCIEGen2: 3596 case PP_PCIEGen2:
3476 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) 3597 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3477 break; 3598 break;
3599#endif
3478 default: 3600 default:
3479 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); 3601 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3480 break; 3602 break;
@@ -3525,108 +3647,27 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3525 struct pp_hwmgr *hwmgr, const void *input) 3647 struct pp_hwmgr *hwmgr, const void *input)
3526{ 3648{
3527 int result = 0; 3649 int result = 0;
3528 const struct phm_set_power_state_input *states =
3529 (const struct phm_set_power_state_input *)input;
3530 const struct smu7_power_state *smu7_ps =
3531 cast_const_phw_smu7_power_state(states->pnew_state);
3532 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3650 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3533 uint32_t sclk = smu7_ps->performance_levels
3534 [smu7_ps->performance_level_count - 1].engine_clock;
3535 uint32_t mclk = smu7_ps->performance_levels
3536 [smu7_ps->performance_level_count - 1].memory_clock;
3537 struct smu7_dpm_table *dpm_table = &data->dpm_table; 3651 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3538 3652 uint32_t count;
3539 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; 3653 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3540 uint32_t dpm_count, clock_percent; 3654 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3541 uint32_t i; 3655 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3542 3656
3543 if (0 == data->need_update_smu7_dpm_table) 3657 if (0 == data->need_update_smu7_dpm_table)
3544 return 0; 3658 return 0;
3545 3659
3546 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 3660 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3547 dpm_table->sclk_table.dpm_levels 3661 for (count = 0; count < dpm_table->sclk_table.count; count++) {
3548 [dpm_table->sclk_table.count - 1].value = sclk; 3662 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3549 3663 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3550 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3551 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3552 /* Need to do calculation based on the golden DPM table
3553 * as the Heatmap GPU Clock axis is also based on the default values
3554 */
3555 PP_ASSERT_WITH_CODE(
3556 (golden_dpm_table->sclk_table.dpm_levels
3557 [golden_dpm_table->sclk_table.count - 1].value != 0),
3558 "Divide by 0!",
3559 return -EINVAL);
3560 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
3561
3562 for (i = dpm_count; i > 1; i--) {
3563 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
3564 clock_percent =
3565 ((sclk
3566 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
3567 ) * 100)
3568 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3569
3570 dpm_table->sclk_table.dpm_levels[i].value =
3571 golden_dpm_table->sclk_table.dpm_levels[i].value +
3572 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3573 clock_percent)/100;
3574
3575 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
3576 clock_percent =
3577 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
3578 - sclk) * 100)
3579 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3580
3581 dpm_table->sclk_table.dpm_levels[i].value =
3582 golden_dpm_table->sclk_table.dpm_levels[i].value -
3583 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3584 clock_percent) / 100;
3585 } else
3586 dpm_table->sclk_table.dpm_levels[i].value =
3587 golden_dpm_table->sclk_table.dpm_levels[i].value;
3588 }
3589 } 3664 }
3590 } 3665 }
3591 3666
3592 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { 3667 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3593 dpm_table->mclk_table.dpm_levels 3668 for (count = 0; count < dpm_table->mclk_table.count; count++) {
3594 [dpm_table->mclk_table.count - 1].value = mclk; 3669 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3595 3670 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3596 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3597 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3598
3599 PP_ASSERT_WITH_CODE(
3600 (golden_dpm_table->mclk_table.dpm_levels
3601 [golden_dpm_table->mclk_table.count-1].value != 0),
3602 "Divide by 0!",
3603 return -EINVAL);
3604 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
3605 for (i = dpm_count; i > 1; i--) {
3606 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
3607 clock_percent = ((mclk -
3608 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
3609 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3610
3611 dpm_table->mclk_table.dpm_levels[i].value =
3612 golden_dpm_table->mclk_table.dpm_levels[i].value +
3613 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3614 clock_percent) / 100;
3615
3616 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
3617 clock_percent = (
3618 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
3619 * 100)
3620 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3621
3622 dpm_table->mclk_table.dpm_levels[i].value =
3623 golden_dpm_table->mclk_table.dpm_levels[i].value -
3624 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3625 clock_percent) / 100;
3626 } else
3627 dpm_table->mclk_table.dpm_levels[i].value =
3628 golden_dpm_table->mclk_table.dpm_levels[i].value;
3629 }
3630 } 3671 }
3631 } 3672 }
3632 3673
@@ -3748,7 +3789,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3748 return -EINVAL); 3789 return -EINVAL);
3749 } 3790 }
3750 3791
3751 data->need_update_smu7_dpm_table = 0; 3792 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3752 3793
3753 return 0; 3794 return 0;
3754} 3795}
@@ -3776,12 +3817,14 @@ static int smu7_notify_link_speed_change_after_state_change(
3776 smu7_get_current_pcie_speed(hwmgr) > 0) 3817 smu7_get_current_pcie_speed(hwmgr) > 0)
3777 return 0; 3818 return 0;
3778 3819
3779 if (acpi_pcie_perf_request(hwmgr->device, request, false)) { 3820#ifdef CONFIG_ACPI
3821 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3780 if (PP_PCIEGen2 == target_link_speed) 3822 if (PP_PCIEGen2 == target_link_speed)
3781 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); 3823 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3782 else 3824 else
3783 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); 3825 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3784 } 3826 }
3827#endif
3785 } 3828 }
3786 3829
3787 return 0; 3830 return 0;
@@ -3825,6 +3868,11 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3825 "Failed to populate and upload SCLK MCLK DPM levels!", 3868 "Failed to populate and upload SCLK MCLK DPM levels!",
3826 result = tmp_result); 3869 result = tmp_result);
3827 3870
3871 tmp_result = smu7_update_avfs(hwmgr);
3872 PP_ASSERT_WITH_CODE((0 == tmp_result),
3873 "Failed to update avfs voltages!",
3874 result = tmp_result);
3875
3828 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); 3876 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3829 PP_ASSERT_WITH_CODE((0 == tmp_result), 3877 PP_ASSERT_WITH_CODE((0 == tmp_result),
3830 "Failed to generate DPM level enabled mask!", 3878 "Failed to generate DPM level enabled mask!",
@@ -4016,6 +4064,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4016 const struct smu7_power_state *psa; 4064 const struct smu7_power_state *psa;
4017 const struct smu7_power_state *psb; 4065 const struct smu7_power_state *psb;
4018 int i; 4066 int i;
4067 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4019 4068
4020 if (pstate1 == NULL || pstate2 == NULL || equal == NULL) 4069 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4021 return -EINVAL; 4070 return -EINVAL;
@@ -4040,6 +4089,10 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4040 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); 4089 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4041 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); 4090 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4042 *equal &= (psa->sclk_threshold == psb->sclk_threshold); 4091 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4092 /* For OD call, set value based on flag */
4093 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4094 DPMTABLE_OD_UPDATE_MCLK |
4095 DPMTABLE_OD_UPDATE_VDDC));
4043 4096
4044 return 0; 4097 return 0;
4045} 4098}
@@ -4211,9 +4264,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4211{ 4264{
4212 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4265 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4213 4266
4214 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | 4267 if (mask == 0)
4215 AMD_DPM_FORCED_LEVEL_LOW |
4216 AMD_DPM_FORCED_LEVEL_HIGH))
4217 return -EINVAL; 4268 return -EINVAL;
4218 4269
4219 switch (type) { 4270 switch (type) {
@@ -4232,15 +4283,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4232 case PP_PCIE: 4283 case PP_PCIE:
4233 { 4284 {
4234 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; 4285 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4235 uint32_t level = 0;
4236 4286
4237 while (tmp >>= 1) 4287 if (!data->pcie_dpm_key_disabled) {
4238 level++; 4288 if (fls(tmp) != ffs(tmp))
4239 4289 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
4240 if (!data->pcie_dpm_key_disabled) 4290 else
4241 smum_send_msg_to_smc_with_parameter(hwmgr, 4291 smum_send_msg_to_smc_with_parameter(hwmgr,
4242 PPSMC_MSG_PCIeDPM_ForceLevel, 4292 PPSMC_MSG_PCIeDPM_ForceLevel,
4243 level); 4293 fls(tmp) - 1);
4294 }
4244 break; 4295 break;
4245 } 4296 }
4246 default: 4297 default:
@@ -4257,6 +4308,9 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4257 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4308 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4258 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4309 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4259 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); 4310 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4311 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4312 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4313 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4260 int i, now, size = 0; 4314 int i, now, size = 0;
4261 uint32_t clock, pcie_speed; 4315 uint32_t clock, pcie_speed;
4262 4316
@@ -4309,6 +4363,24 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4309 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", 4363 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4310 (i == now) ? "*" : ""); 4364 (i == now) ? "*" : "");
4311 break; 4365 break;
4366 case OD_SCLK:
4367 if (hwmgr->od_enabled) {
4368 size = sprintf(buf, "%s: \n", "OD_SCLK");
4369 for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4370 size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
4371 i, odn_sclk_table->entries[i].clock / 100,
4372 odn_sclk_table->entries[i].vddc);
4373 }
4374 break;
4375 case OD_MCLK:
4376 if (hwmgr->od_enabled) {
4377 size = sprintf(buf, "%s: \n", "OD_MCLK");
4378 for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4379 size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
4380 i, odn_mclk_table->entries[i].clock / 100,
4381 odn_mclk_table->entries[i].vddc);
4382 }
4383 break;
4312 default: 4384 default:
4313 break; 4385 break;
4314 } 4386 }
@@ -4506,110 +4578,6 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
4506 return 0; 4578 return 0;
4507} 4579}
4508 4580
4509static void smu7_find_min_clock_masks(struct pp_hwmgr *hwmgr,
4510 uint32_t *sclk_mask, uint32_t *mclk_mask,
4511 uint32_t min_sclk, uint32_t min_mclk)
4512{
4513 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4514 struct smu7_dpm_table *dpm_table = &(data->dpm_table);
4515 uint32_t i;
4516
4517 for (i = 0; i < dpm_table->sclk_table.count; i++) {
4518 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
4519 dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
4520 *sclk_mask |= 1 << i;
4521 }
4522
4523 for (i = 0; i < dpm_table->mclk_table.count; i++) {
4524 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
4525 dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
4526 *mclk_mask |= 1 << i;
4527 }
4528}
4529
4530static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
4531 struct amd_pp_profile *request)
4532{
4533 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4534 int tmp_result, result = 0;
4535 uint32_t sclk_mask = 0, mclk_mask = 0;
4536
4537 if (hwmgr->chip_id == CHIP_FIJI) {
4538 if (request->type == AMD_PP_GFX_PROFILE)
4539 smu7_enable_power_containment(hwmgr);
4540 else if (request->type == AMD_PP_COMPUTE_PROFILE)
4541 smu7_disable_power_containment(hwmgr);
4542 }
4543
4544 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4545 return -EINVAL;
4546
4547 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4548 PP_ASSERT_WITH_CODE(!tmp_result,
4549 "Failed to freeze SCLK MCLK DPM!",
4550 result = tmp_result);
4551
4552 tmp_result = smum_populate_requested_graphic_levels(hwmgr, request);
4553 PP_ASSERT_WITH_CODE(!tmp_result,
4554 "Failed to populate requested graphic levels!",
4555 result = tmp_result);
4556
4557 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4558 PP_ASSERT_WITH_CODE(!tmp_result,
4559 "Failed to unfreeze SCLK MCLK DPM!",
4560 result = tmp_result);
4561
4562 smu7_find_min_clock_masks(hwmgr, &sclk_mask, &mclk_mask,
4563 request->min_sclk, request->min_mclk);
4564
4565 if (sclk_mask) {
4566 if (!data->sclk_dpm_key_disabled)
4567 smum_send_msg_to_smc_with_parameter(hwmgr,
4568 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4569 data->dpm_level_enable_mask.
4570 sclk_dpm_enable_mask &
4571 sclk_mask);
4572 }
4573
4574 if (mclk_mask) {
4575 if (!data->mclk_dpm_key_disabled)
4576 smum_send_msg_to_smc_with_parameter(hwmgr,
4577 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4578 data->dpm_level_enable_mask.
4579 mclk_dpm_enable_mask &
4580 mclk_mask);
4581 }
4582
4583 return result;
4584}
4585
4586static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
4587{
4588 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
4589
4590 if (smu_data == NULL)
4591 return -EINVAL;
4592
4593 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
4594 return 0;
4595
4596 if (enable) {
4597 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
4598 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
4599 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
4600 hwmgr, PPSMC_MSG_EnableAvfs),
4601 "Failed to enable AVFS!",
4602 return -EINVAL);
4603 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
4604 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
4605 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
4606 hwmgr, PPSMC_MSG_DisableAvfs),
4607 "Failed to disable AVFS!",
4608 return -EINVAL);
4609
4610 return 0;
4611}
4612
4613static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 4581static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4614 uint32_t virtual_addr_low, 4582 uint32_t virtual_addr_low,
4615 uint32_t virtual_addr_hi, 4583 uint32_t virtual_addr_hi,
@@ -4670,6 +4638,344 @@ static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4670 return 0; 4638 return 0;
4671} 4639}
4672 4640
4641static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4642 struct PP_TemperatureRange *thermal_data)
4643{
4644 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4645 struct phm_ppt_v1_information *table_info =
4646 (struct phm_ppt_v1_information *)hwmgr->pptable;
4647
4648 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
4649
4650 if (hwmgr->pp_table_version == PP_TABLE_V1)
4651 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
4652 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4653 else if (hwmgr->pp_table_version == PP_TABLE_V0)
4654 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
4655 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4656
4657 return 0;
4658}
4659
4660static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4661 enum PP_OD_DPM_TABLE_COMMAND type,
4662 uint32_t clk,
4663 uint32_t voltage)
4664{
4665 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4666
4667 struct phm_ppt_v1_information *table_info =
4668 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4669 uint32_t min_vddc;
4670 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
4671
4672 if (table_info == NULL)
4673 return false;
4674
4675 dep_sclk_table = table_info->vdd_dep_on_sclk;
4676 min_vddc = dep_sclk_table->entries[0].vddc;
4677
4678 if (voltage < min_vddc || voltage > 2000) {
4679 pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc);
4680 return false;
4681 }
4682
4683 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4684 if (data->vbios_boot_state.sclk_bootup_value > clk ||
4685 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4686 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4687 data->vbios_boot_state.sclk_bootup_value,
4688 hwmgr->platform_descriptor.overdriveLimit.engineClock / 100);
4689 return false;
4690 }
4691 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4692 if (data->vbios_boot_state.mclk_bootup_value > clk ||
4693 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4694 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4695 data->vbios_boot_state.mclk_bootup_value/100,
4696 hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100);
4697 return false;
4698 }
4699 } else {
4700 return false;
4701 }
4702
4703 return true;
4704}
4705
4706static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4707{
4708 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4709 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4710 struct phm_ppt_v1_information *table_info =
4711 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4712 uint32_t i;
4713
4714 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4715 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
4716
4717 if (table_info == NULL)
4718 return;
4719
4720 for (i=0; i<data->dpm_table.sclk_table.count; i++) {
4721 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
4722 data->dpm_table.sclk_table.dpm_levels[i].value) {
4723 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4724 break;
4725 }
4726 }
4727
4728 for (i=0; i<data->dpm_table.sclk_table.count; i++) {
4729 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
4730 data->dpm_table.mclk_table.dpm_levels[i].value) {
4731 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4732 break;
4733 }
4734 }
4735
4736 dep_table = table_info->vdd_dep_on_mclk;
4737 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
4738
4739 for (i=0; i < dep_table->count; i++) {
4740 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4741 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4742 break;
4743 }
4744 }
4745 if (i == dep_table->count)
4746 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4747
4748 dep_table = table_info->vdd_dep_on_sclk;
4749 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
4750 for (i=0; i < dep_table->count; i++) {
4751 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4752 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4753 break;
4754 }
4755 }
4756 if (i == dep_table->count)
4757 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4758}
4759
4760static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4761 enum PP_OD_DPM_TABLE_COMMAND type,
4762 long *input, uint32_t size)
4763{
4764 uint32_t i;
4765 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
4766 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
4767 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4768
4769 uint32_t input_clk;
4770 uint32_t input_vol;
4771 uint32_t input_level;
4772
4773 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4774 return -EINVAL);
4775
4776 if (!hwmgr->od_enabled) {
4777 pr_info("OverDrive feature not enabled\n");
4778 return -EINVAL;
4779 }
4780
4781 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4782 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
4783 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
4784 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4785 "Failed to get ODN SCLK and Voltage tables",
4786 return -EINVAL);
4787 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4788 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
4789 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
4790
4791 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4792 "Failed to get ODN MCLK and Voltage tables",
4793 return -EINVAL);
4794 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4795 smu7_odn_initial_default_setting(hwmgr);
4796 return 0;
4797 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4798 smu7_check_dpm_table_updated(hwmgr);
4799 return 0;
4800 } else {
4801 return -EINVAL;
4802 }
4803
4804 for (i = 0; i < size; i += 3) {
4805 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
4806 pr_info("invalid clock voltage input \n");
4807 return 0;
4808 }
4809 input_level = input[i];
4810 input_clk = input[i+1] * 100;
4811 input_vol = input[i+2];
4812
4813 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4814 podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
4815 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
4816 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
4817 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
4818 } else {
4819 return -EINVAL;
4820 }
4821 }
4822
4823 return 0;
4824}
4825
4826static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4827{
4828 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4829 uint32_t i, size = 0;
4830 uint32_t len;
4831
4832 static const char *profile_name[6] = {"3D_FULL_SCREEN",
4833 "POWER_SAVING",
4834 "VIDEO",
4835 "VR",
4836 "COMPUTE",
4837 "CUSTOM"};
4838
4839 static const char *title[8] = {"NUM",
4840 "MODE_NAME",
4841 "SCLK_UP_HYST",
4842 "SCLK_DOWN_HYST",
4843 "SCLK_ACTIVE_LEVEL",
4844 "MCLK_UP_HYST",
4845 "MCLK_DOWN_HYST",
4846 "MCLK_ACTIVE_LEVEL"};
4847
4848 if (!buf)
4849 return -EINVAL;
4850
4851 size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
4852 title[0], title[1], title[2], title[3],
4853 title[4], title[5], title[6], title[7]);
4854
4855 len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
4856
4857 for (i = 0; i < len; i++) {
4858 if (smu7_profiling[i].bupdate_sclk)
4859 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4860 i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
4861 smu7_profiling[i].sclk_down_hyst,
4862 smu7_profiling[i].sclk_activity);
4863 else
4864 size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
4865 i, profile_name[i], "-", "-", "-");
4866
4867 if (smu7_profiling[i].bupdate_mclk)
4868 size += sprintf(buf + size, "%16d %16d %16d\n",
4869 smu7_profiling[i].mclk_up_hyst,
4870 smu7_profiling[i].mclk_down_hyst,
4871 smu7_profiling[i].mclk_activity);
4872 else
4873 size += sprintf(buf + size, "%16s %16s %16s\n",
4874 "-", "-", "-");
4875 }
4876
4877 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n",
4878 i, profile_name[i],
4879 data->custom_profile_setting.sclk_up_hyst,
4880 data->custom_profile_setting.sclk_down_hyst,
4881 data->custom_profile_setting.sclk_activity,
4882 data->custom_profile_setting.mclk_up_hyst,
4883 data->custom_profile_setting.mclk_down_hyst,
4884 data->custom_profile_setting.mclk_activity);
4885
4886 size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n",
4887 "*", "CURRENT",
4888 data->current_profile_setting.sclk_up_hyst,
4889 data->current_profile_setting.sclk_down_hyst,
4890 data->current_profile_setting.sclk_activity,
4891 data->current_profile_setting.mclk_up_hyst,
4892 data->current_profile_setting.mclk_down_hyst,
4893 data->current_profile_setting.mclk_activity);
4894
4895 return size;
4896}
4897
4898static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
4899 enum PP_SMC_POWER_PROFILE requst)
4900{
4901 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4902 uint32_t tmp, level;
4903
4904 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
4905 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4906 level = 0;
4907 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
4908 while (tmp >>= 1)
4909 level++;
4910 if (level > 0)
4911 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
4912 }
4913 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
4914 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4915 }
4916}
4917
4918static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4919{
4920 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4921 struct profile_mode_setting tmp;
4922 enum PP_SMC_POWER_PROFILE mode;
4923
4924 if (input == NULL)
4925 return -EINVAL;
4926
4927 mode = input[size];
4928 switch (mode) {
4929 case PP_SMC_POWER_PROFILE_CUSTOM:
4930 if (size < 8)
4931 return -EINVAL;
4932
4933 data->custom_profile_setting.bupdate_sclk = input[0];
4934 data->custom_profile_setting.sclk_up_hyst = input[1];
4935 data->custom_profile_setting.sclk_down_hyst = input[2];
4936 data->custom_profile_setting.sclk_activity = input[3];
4937 data->custom_profile_setting.bupdate_mclk = input[4];
4938 data->custom_profile_setting.mclk_up_hyst = input[5];
4939 data->custom_profile_setting.mclk_down_hyst = input[6];
4940 data->custom_profile_setting.mclk_activity = input[7];
4941 if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) {
4942 memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting));
4943 hwmgr->power_profile_mode = mode;
4944 }
4945 break;
4946 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
4947 case PP_SMC_POWER_PROFILE_POWERSAVING:
4948 case PP_SMC_POWER_PROFILE_VIDEO:
4949 case PP_SMC_POWER_PROFILE_VR:
4950 case PP_SMC_POWER_PROFILE_COMPUTE:
4951 if (mode == hwmgr->power_profile_mode)
4952 return 0;
4953
4954 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
4955 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4956 if (tmp.bupdate_sclk) {
4957 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
4958 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
4959 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
4960 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
4961 }
4962 if (tmp.bupdate_mclk) {
4963 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
4964 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
4965 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
4966 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
4967 }
4968 smu7_patch_compute_profile_mode(hwmgr, mode);
4969 hwmgr->power_profile_mode = mode;
4970 }
4971 break;
4972 default:
4973 return -EINVAL;
4974 }
4975
4976 return 0;
4977}
4978
4673static const struct pp_hwmgr_func smu7_hwmgr_funcs = { 4979static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4674 .backend_init = &smu7_hwmgr_backend_init, 4980 .backend_init = &smu7_hwmgr_backend_init,
4675 .backend_fini = &smu7_hwmgr_backend_fini, 4981 .backend_fini = &smu7_hwmgr_backend_fini,
@@ -4693,7 +4999,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4693 .display_config_changed = smu7_display_configuration_changed_task, 4999 .display_config_changed = smu7_display_configuration_changed_task,
4694 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, 5000 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
4695 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, 5001 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
4696 .get_temperature = smu7_thermal_get_temperature,
4697 .stop_thermal_controller = smu7_thermal_stop_thermal_controller, 5002 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
4698 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, 5003 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
4699 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, 5004 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
@@ -4717,12 +5022,16 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4717 .get_clock_by_type = smu7_get_clock_by_type, 5022 .get_clock_by_type = smu7_get_clock_by_type,
4718 .read_sensor = smu7_read_sensor, 5023 .read_sensor = smu7_read_sensor,
4719 .dynamic_state_management_disable = smu7_disable_dpm_tasks, 5024 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
4720 .set_power_profile_state = smu7_set_power_profile_state,
4721 .avfs_control = smu7_avfs_control, 5025 .avfs_control = smu7_avfs_control,
4722 .disable_smc_firmware_ctf = smu7_thermal_disable_alert, 5026 .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
4723 .start_thermal_controller = smu7_start_thermal_controller, 5027 .start_thermal_controller = smu7_start_thermal_controller,
4724 .notify_cac_buffer_info = smu7_notify_cac_buffer_info, 5028 .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
4725 .get_max_high_clocks = smu7_get_max_high_clocks, 5029 .get_max_high_clocks = smu7_get_max_high_clocks,
5030 .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5031 .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5032 .set_power_limit = smu7_set_power_limit,
5033 .get_power_profile_mode = smu7_get_power_profile_mode,
5034 .set_power_profile_mode = smu7_set_power_profile_mode,
4726}; 5035};
4727 5036
4728uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, 5037uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
@@ -4754,4 +5063,3 @@ int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
4754 5063
4755 return ret; 5064 return ret;
4756} 5065}
4757
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index e021154aedbd..3bcfc61cd5a2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -34,11 +34,6 @@
34#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2 34#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2
35#define SMU7_VOLTAGE_CONTROL_MERGED 0x3 35#define SMU7_VOLTAGE_CONTROL_MERGED 0x3
36 36
37#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
38#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
39#define DPMTABLE_UPDATE_SCLK 0x00000004
40#define DPMTABLE_UPDATE_MCLK 0x00000008
41
42enum gpu_pt_config_reg_type { 37enum gpu_pt_config_reg_type {
43 GPU_CONFIGREG_MMR = 0, 38 GPU_CONFIGREG_MMR = 0,
44 GPU_CONFIGREG_SMC_IND, 39 GPU_CONFIGREG_SMC_IND,
@@ -178,9 +173,34 @@ struct smu7_pcie_perf_range {
178 uint16_t min; 173 uint16_t min;
179}; 174};
180 175
176struct smu7_odn_clock_voltage_dependency_table {
177 uint32_t count;
178 phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER];
179};
180
181struct smu7_odn_dpm_table {
182 struct phm_odn_clock_levels odn_core_clock_dpm_levels;
183 struct phm_odn_clock_levels odn_memory_clock_dpm_levels;
184 struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
185 struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
186 uint32_t odn_mclk_min_limit;
187};
188
189struct profile_mode_setting {
190 uint8_t bupdate_sclk;
191 uint8_t sclk_up_hyst;
192 uint8_t sclk_down_hyst;
193 uint16_t sclk_activity;
194 uint8_t bupdate_mclk;
195 uint8_t mclk_up_hyst;
196 uint8_t mclk_down_hyst;
197 uint16_t mclk_activity;
198};
199
181struct smu7_hwmgr { 200struct smu7_hwmgr {
182 struct smu7_dpm_table dpm_table; 201 struct smu7_dpm_table dpm_table;
183 struct smu7_dpm_table golden_dpm_table; 202 struct smu7_dpm_table golden_dpm_table;
203 struct smu7_odn_dpm_table odn_dpm_table;
184 204
185 uint32_t voting_rights_clients[8]; 205 uint32_t voting_rights_clients[8];
186 uint32_t static_screen_threshold_unit; 206 uint32_t static_screen_threshold_unit;
@@ -280,7 +300,6 @@ struct smu7_hwmgr {
280 struct smu7_pcie_perf_range pcie_lane_power_saving; 300 struct smu7_pcie_perf_range pcie_lane_power_saving;
281 bool use_pcie_performance_levels; 301 bool use_pcie_performance_levels;
282 bool use_pcie_power_saving_levels; 302 bool use_pcie_power_saving_levels;
283 uint32_t mclk_activity_target;
284 uint32_t mclk_dpm0_activity_target; 303 uint32_t mclk_dpm0_activity_target;
285 uint32_t low_sclk_interrupt_threshold; 304 uint32_t low_sclk_interrupt_threshold;
286 uint32_t last_mclk_dpm_enable_mask; 305 uint32_t last_mclk_dpm_enable_mask;
@@ -305,6 +324,9 @@ struct smu7_hwmgr {
305 uint32_t frame_time_x2; 324 uint32_t frame_time_x2;
306 uint16_t mem_latency_high; 325 uint16_t mem_latency_high;
307 uint16_t mem_latency_low; 326 uint16_t mem_latency_low;
327 uint32_t vr_config;
328 struct profile_mode_setting custom_profile_setting;
329 struct profile_mode_setting current_profile_setting;
308}; 330};
309 331
310/* To convert to Q8.8 format for firmware */ 332/* To convert to Q8.8 format for firmware */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 85ca16abb626..03bc7453f3b1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -731,14 +731,9 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
731 int result; 731 int result;
732 uint32_t num_se = 0; 732 uint32_t num_se = 0;
733 uint32_t count, value, value2; 733 uint32_t count, value, value2;
734 struct cgs_system_info sys_info = {0}; 734 struct amdgpu_device *adev = hwmgr->adev;
735 735
736 sys_info.size = sizeof(struct cgs_system_info); 736 num_se = adev->gfx.config.max_shader_engines;
737 sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
738 result = cgs_query_system_info(hwmgr->device, &sys_info);
739
740 if (result == 0)
741 num_se = sys_info.value;
742 737
743 if (PP_CAP(PHM_PlatformCaps_SQRamping) || 738 if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
744 PP_CAP(PHM_PlatformCaps_DBRamping) || 739 PP_CAP(PHM_PlatformCaps_DBRamping) ||
@@ -857,6 +852,8 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
857{ 852{
858 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 853 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
859 854
855 n = (n & 0xff) << 8;
856
860 if (data->power_containment_features & 857 if (data->power_containment_features &
861 POWERCONTAINMENT_FEATURE_PkgPwrLimit) 858 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
862 return smum_send_msg_to_smc_with_parameter(hwmgr, 859 return smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -903,12 +900,12 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
903 PP_ASSERT_WITH_CODE((0 == smc_result), 900 PP_ASSERT_WITH_CODE((0 == smc_result),
904 "Failed to enable PkgPwrTracking in SMC.", result = -1;); 901 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
905 if (0 == smc_result) { 902 if (0 == smc_result) {
906 uint32_t default_limit = 903 hwmgr->default_power_limit = hwmgr->power_limit =
907 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); 904 cac_table->usMaximumPowerDeliveryLimit;
908 data->power_containment_features |= 905 data->power_containment_features |=
909 POWERCONTAINMENT_FEATURE_PkgPwrLimit; 906 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
910 907
911 if (smu7_set_power_limit(hwmgr, default_limit)) 908 if (smu7_set_power_limit(hwmgr, hwmgr->power_limit))
912 pr_err("Failed to set Default Power Limit in SMC!"); 909 pr_err("Failed to set Default Power Limit in SMC!");
913 } 910 }
914 } 911 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index d7aa643cdb51..f6573ed0357d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -310,9 +310,9 @@ int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
310static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 310static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
311 uint32_t low_temp, uint32_t high_temp) 311 uint32_t low_temp, uint32_t high_temp)
312{ 312{
313 uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP * 313 int low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
314 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 314 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
315 uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP * 315 int high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
316 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 316 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
317 317
318 if (low < low_temp) 318 if (low < low_temp)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index b314d09d41af..75a465f771f0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -35,9 +35,9 @@
35#include "hwmgr.h" 35#include "hwmgr.h"
36#include "hardwaremanager.h" 36#include "hardwaremanager.h"
37#include "cz_ppsmc.h" 37#include "cz_ppsmc.h"
38#include "cz_hwmgr.h" 38#include "smu8_hwmgr.h"
39#include "power_state.h" 39#include "power_state.h"
40#include "cz_clockpowergating.h" 40#include "pp_thermal.h"
41 41
42#define ixSMUSVI_NB_CURRENTVID 0xD8230044 42#define ixSMUSVI_NB_CURRENTVID 0xD8230044
43#define CURRENT_NB_VID_MASK 0xff000000 43#define CURRENT_NB_VID_MASK 0xff000000
@@ -46,26 +46,26 @@
46#define CURRENT_GFX_VID_MASK 0xff000000 46#define CURRENT_GFX_VID_MASK 0xff000000
47#define CURRENT_GFX_VID__SHIFT 24 47#define CURRENT_GFX_VID__SHIFT 24
48 48
49static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic; 49static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
50 50
51static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps) 51static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
52{ 52{
53 if (PhwCz_Magic != hw_ps->magic) 53 if (smu8_magic != hw_ps->magic)
54 return NULL; 54 return NULL;
55 55
56 return (struct cz_power_state *)hw_ps; 56 return (struct smu8_power_state *)hw_ps;
57} 57}
58 58
59static const struct cz_power_state *cast_const_PhwCzPowerState( 59static const struct smu8_power_state *cast_const_smu8_power_state(
60 const struct pp_hw_power_state *hw_ps) 60 const struct pp_hw_power_state *hw_ps)
61{ 61{
62 if (PhwCz_Magic != hw_ps->magic) 62 if (smu8_magic != hw_ps->magic)
63 return NULL; 63 return NULL;
64 64
65 return (struct cz_power_state *)hw_ps; 65 return (struct smu8_power_state *)hw_ps;
66} 66}
67 67
68static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, 68static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
69 uint32_t clock, uint32_t msg) 69 uint32_t clock, uint32_t msg)
70{ 70{
71 int i = 0; 71 int i = 0;
@@ -96,7 +96,7 @@ static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
96 return i; 96 return i;
97} 97}
98 98
99static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr, 99static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
100 uint32_t clock, uint32_t msg) 100 uint32_t clock, uint32_t msg)
101{ 101{
102 int i = 0; 102 int i = 0;
@@ -126,7 +126,7 @@ static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
126 return i; 126 return i;
127} 127}
128 128
129static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr, 129static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
130 uint32_t clock, uint32_t msg) 130 uint32_t clock, uint32_t msg)
131{ 131{
132 int i = 0; 132 int i = 0;
@@ -157,47 +157,42 @@ static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
157 return i; 157 return i;
158} 158}
159 159
160static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr) 160static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
161{ 161{
162 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 162 struct smu8_hwmgr *data = hwmgr->backend;
163 163
164 if (cz_hwmgr->max_sclk_level == 0) { 164 if (data->max_sclk_level == 0) {
165 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); 165 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
166 cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1; 166 data->max_sclk_level = smum_get_argument(hwmgr) + 1;
167 } 167 }
168 168
169 return cz_hwmgr->max_sclk_level; 169 return data->max_sclk_level;
170} 170}
171 171
172static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) 172static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
173{ 173{
174 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 174 struct smu8_hwmgr *data = hwmgr->backend;
175 uint32_t i; 175 struct amdgpu_device *adev = hwmgr->adev;
176 struct cgs_system_info sys_info = {0};
177 int result;
178 176
179 cz_hwmgr->gfx_ramp_step = 256*25/100; 177 data->gfx_ramp_step = 256*25/100;
180 cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ 178 data->gfx_ramp_delay = 1; /* by default, we delay 1us */
181 179
182 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) 180 data->mgcg_cgtt_local0 = 0x00000000;
183 cz_hwmgr->activity_target[i] = CZ_AT_DFLT; 181 data->mgcg_cgtt_local1 = 0x00000000;
184 182 data->clock_slow_down_freq = 25000;
185 cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; 183 data->skip_clock_slow_down = 1;
186 cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; 184 data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
187 cz_hwmgr->clock_slow_down_freq = 25000; 185 data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
188 cz_hwmgr->skip_clock_slow_down = 1; 186 data->voting_rights_clients = 0x00C00033;
189 cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ 187 data->static_screen_threshold = 8;
190 cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ 188 data->ddi_power_gating_disabled = 0;
191 cz_hwmgr->voting_rights_clients = 0x00C00033; 189 data->bapm_enabled = 1;
192 cz_hwmgr->static_screen_threshold = 8; 190 data->voltage_drop_threshold = 0;
193 cz_hwmgr->ddi_power_gating_disabled = 0; 191 data->gfx_power_gating_threshold = 500;
194 cz_hwmgr->bapm_enabled = 1; 192 data->vce_slow_sclk_threshold = 20000;
195 cz_hwmgr->voltage_drop_threshold = 0; 193 data->dce_slow_sclk_threshold = 30000;
196 cz_hwmgr->gfx_power_gating_threshold = 500; 194 data->disable_driver_thermal_policy = 1;
197 cz_hwmgr->vce_slow_sclk_threshold = 20000; 195 data->disable_nb_ps3_in_battery = 0;
198 cz_hwmgr->dce_slow_sclk_threshold = 30000;
199 cz_hwmgr->disable_driver_thermal_policy = 1;
200 cz_hwmgr->disable_nb_ps3_in_battery = 0;
201 196
202 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 197 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_ABM); 198 PHM_PlatformCaps_ABM);
@@ -208,14 +203,14 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
208 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 203 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_DynamicM3Arbiter); 204 PHM_PlatformCaps_DynamicM3Arbiter);
210 205
211 cz_hwmgr->override_dynamic_mgpg = 1; 206 data->override_dynamic_mgpg = 1;
212 207
213 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
214 PHM_PlatformCaps_DynamicPatchPowerState); 209 PHM_PlatformCaps_DynamicPatchPowerState);
215 210
216 cz_hwmgr->thermal_auto_throttling_treshold = 0; 211 data->thermal_auto_throttling_treshold = 0;
217 cz_hwmgr->tdr_clock = 0; 212 data->tdr_clock = 0;
218 cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; 213 data->disable_gfx_power_gating_in_uvd = 0;
219 214
220 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221 PHM_PlatformCaps_DynamicUVDState); 216 PHM_PlatformCaps_DynamicUVDState);
@@ -225,10 +220,10 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
225 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 220 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_VCEDPM); 221 PHM_PlatformCaps_VCEDPM);
227 222
228 cz_hwmgr->cc6_settings.cpu_cc6_disable = false; 223 data->cc6_settings.cpu_cc6_disable = false;
229 cz_hwmgr->cc6_settings.cpu_pstate_disable = false; 224 data->cc6_settings.cpu_pstate_disable = false;
230 cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false; 225 data->cc6_settings.nb_pstate_switch_disable = false;
231 cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0; 226 data->cc6_settings.cpu_pstate_separation_time = 0;
232 227
233 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
234 PHM_PlatformCaps_DisableVoltageIsland); 229 PHM_PlatformCaps_DisableVoltageIsland);
@@ -237,45 +232,42 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
237 PHM_PlatformCaps_UVDPowerGating); 232 PHM_PlatformCaps_UVDPowerGating);
238 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 233 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_VCEPowerGating); 234 PHM_PlatformCaps_VCEPowerGating);
240 sys_info.size = sizeof(struct cgs_system_info); 235
241 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; 236 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
242 result = cgs_query_system_info(hwmgr->device, &sys_info); 237 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
243 if (!result) { 238 PHM_PlatformCaps_UVDPowerGating);
244 if (sys_info.value & AMD_PG_SUPPORT_UVD) 239 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 240 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_UVDPowerGating); 241 PHM_PlatformCaps_VCEPowerGating);
247 if (sys_info.value & AMD_PG_SUPPORT_VCE) 242
248 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_VCEPowerGating);
250 }
251 243
252 return 0; 244 return 0;
253} 245}
254 246
255static uint32_t cz_convert_8Bit_index_to_voltage( 247static uint32_t smu8_convert_8Bit_index_to_voltage(
256 struct pp_hwmgr *hwmgr, uint16_t voltage) 248 struct pp_hwmgr *hwmgr, uint16_t voltage)
257{ 249{
258 return 6200 - (voltage * 25); 250 return 6200 - (voltage * 25);
259} 251}
260 252
261static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, 253static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
262 struct phm_clock_and_voltage_limits *table) 254 struct phm_clock_and_voltage_limits *table)
263{ 255{
264 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend; 256 struct smu8_hwmgr *data = hwmgr->backend;
265 struct cz_sys_info *sys_info = &cz_hwmgr->sys_info; 257 struct smu8_sys_info *sys_info = &data->sys_info;
266 struct phm_clock_voltage_dependency_table *dep_table = 258 struct phm_clock_voltage_dependency_table *dep_table =
267 hwmgr->dyn_state.vddc_dependency_on_sclk; 259 hwmgr->dyn_state.vddc_dependency_on_sclk;
268 260
269 if (dep_table->count > 0) { 261 if (dep_table->count > 0) {
270 table->sclk = dep_table->entries[dep_table->count-1].clk; 262 table->sclk = dep_table->entries[dep_table->count-1].clk;
271 table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr, 263 table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
272 (uint16_t)dep_table->entries[dep_table->count-1].v); 264 (uint16_t)dep_table->entries[dep_table->count-1].v);
273 } 265 }
274 table->mclk = sys_info->nbp_memory_clock[0]; 266 table->mclk = sys_info->nbp_memory_clock[0];
275 return 0; 267 return 0;
276} 268}
277 269
278static int cz_init_dynamic_state_adjustment_rule_settings( 270static int smu8_init_dynamic_state_adjustment_rule_settings(
279 struct pp_hwmgr *hwmgr, 271 struct pp_hwmgr *hwmgr,
280 ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table) 272 ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
281{ 273{
@@ -313,9 +305,9 @@ static int cz_init_dynamic_state_adjustment_rule_settings(
313 return 0; 305 return 0;
314} 306}
315 307
316static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) 308static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
317{ 309{
318 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend; 310 struct smu8_hwmgr *data = hwmgr->backend;
319 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL; 311 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
320 uint32_t i; 312 uint32_t i;
321 int result = 0; 313 int result = 0;
@@ -337,67 +329,67 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
337 return -EINVAL; 329 return -EINVAL;
338 } 330 }
339 331
340 cz_hwmgr->sys_info.bootup_uma_clock = 332 data->sys_info.bootup_uma_clock =
341 le32_to_cpu(info->ulBootUpUMAClock); 333 le32_to_cpu(info->ulBootUpUMAClock);
342 334
343 cz_hwmgr->sys_info.bootup_engine_clock = 335 data->sys_info.bootup_engine_clock =
344 le32_to_cpu(info->ulBootUpEngineClock); 336 le32_to_cpu(info->ulBootUpEngineClock);
345 337
346 cz_hwmgr->sys_info.dentist_vco_freq = 338 data->sys_info.dentist_vco_freq =
347 le32_to_cpu(info->ulDentistVCOFreq); 339 le32_to_cpu(info->ulDentistVCOFreq);
348 340
349 cz_hwmgr->sys_info.system_config = 341 data->sys_info.system_config =
350 le32_to_cpu(info->ulSystemConfig); 342 le32_to_cpu(info->ulSystemConfig);
351 343
352 cz_hwmgr->sys_info.bootup_nb_voltage_index = 344 data->sys_info.bootup_nb_voltage_index =
353 le16_to_cpu(info->usBootUpNBVoltage); 345 le16_to_cpu(info->usBootUpNBVoltage);
354 346
355 cz_hwmgr->sys_info.htc_hyst_lmt = 347 data->sys_info.htc_hyst_lmt =
356 (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt; 348 (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
357 349
358 cz_hwmgr->sys_info.htc_tmp_lmt = 350 data->sys_info.htc_tmp_lmt =
359 (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt; 351 (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
360 352
361 if (cz_hwmgr->sys_info.htc_tmp_lmt <= 353 if (data->sys_info.htc_tmp_lmt <=
362 cz_hwmgr->sys_info.htc_hyst_lmt) { 354 data->sys_info.htc_hyst_lmt) {
363 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n"); 355 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
364 return -EINVAL; 356 return -EINVAL;
365 } 357 }
366 358
367 cz_hwmgr->sys_info.nb_dpm_enable = 359 data->sys_info.nb_dpm_enable =
368 cz_hwmgr->enable_nb_ps_policy && 360 data->enable_nb_ps_policy &&
369 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1); 361 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
370 362
371 for (i = 0; i < CZ_NUM_NBPSTATES; i++) { 363 for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
372 if (i < CZ_NUM_NBPMEMORYCLOCK) { 364 if (i < SMU8_NUM_NBPMEMORYCLOCK) {
373 cz_hwmgr->sys_info.nbp_memory_clock[i] = 365 data->sys_info.nbp_memory_clock[i] =
374 le32_to_cpu(info->ulNbpStateMemclkFreq[i]); 366 le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
375 } 367 }
376 cz_hwmgr->sys_info.nbp_n_clock[i] = 368 data->sys_info.nbp_n_clock[i] =
377 le32_to_cpu(info->ulNbpStateNClkFreq[i]); 369 le32_to_cpu(info->ulNbpStateNClkFreq[i]);
378 } 370 }
379 371
380 for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) { 372 for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
381 cz_hwmgr->sys_info.display_clock[i] = 373 data->sys_info.display_clock[i] =
382 le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK); 374 le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
383 } 375 }
384 376
385 /* Here use 4 levels, make sure not exceed */ 377 /* Here use 4 levels, make sure not exceed */
386 for (i = 0; i < CZ_NUM_NBPSTATES; i++) { 378 for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
387 cz_hwmgr->sys_info.nbp_voltage_index[i] = 379 data->sys_info.nbp_voltage_index[i] =
388 le16_to_cpu(info->usNBPStateVoltage[i]); 380 le16_to_cpu(info->usNBPStateVoltage[i]);
389 } 381 }
390 382
391 if (!cz_hwmgr->sys_info.nb_dpm_enable) { 383 if (!data->sys_info.nb_dpm_enable) {
392 for (i = 1; i < CZ_NUM_NBPSTATES; i++) { 384 for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
393 if (i < CZ_NUM_NBPMEMORYCLOCK) { 385 if (i < SMU8_NUM_NBPMEMORYCLOCK) {
394 cz_hwmgr->sys_info.nbp_memory_clock[i] = 386 data->sys_info.nbp_memory_clock[i] =
395 cz_hwmgr->sys_info.nbp_memory_clock[0]; 387 data->sys_info.nbp_memory_clock[0];
396 } 388 }
397 cz_hwmgr->sys_info.nbp_n_clock[i] = 389 data->sys_info.nbp_n_clock[i] =
398 cz_hwmgr->sys_info.nbp_n_clock[0]; 390 data->sys_info.nbp_n_clock[0];
399 cz_hwmgr->sys_info.nbp_voltage_index[i] = 391 data->sys_info.nbp_voltage_index[i] =
400 cz_hwmgr->sys_info.nbp_voltage_index[0]; 392 data->sys_info.nbp_voltage_index[0];
401 } 393 }
402 } 394 }
403 395
@@ -407,40 +399,40 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
407 PHM_PlatformCaps_EnableDFSBypass); 399 PHM_PlatformCaps_EnableDFSBypass);
408 } 400 }
409 401
410 cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber; 402 data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
411 403
412 cz_construct_max_power_limits_table (hwmgr, 404 smu8_construct_max_power_limits_table (hwmgr,
413 &hwmgr->dyn_state.max_clock_voltage_on_ac); 405 &hwmgr->dyn_state.max_clock_voltage_on_ac);
414 406
415 cz_init_dynamic_state_adjustment_rule_settings(hwmgr, 407 smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
416 &info->sDISPCLK_Voltage[0]); 408 &info->sDISPCLK_Voltage[0]);
417 409
418 return result; 410 return result;
419} 411}
420 412
421static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) 413static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
422{ 414{
423 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 415 struct smu8_hwmgr *data = hwmgr->backend;
424 416
425 cz_hwmgr->boot_power_level.engineClock = 417 data->boot_power_level.engineClock =
426 cz_hwmgr->sys_info.bootup_engine_clock; 418 data->sys_info.bootup_engine_clock;
427 419
428 cz_hwmgr->boot_power_level.vddcIndex = 420 data->boot_power_level.vddcIndex =
429 (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; 421 (uint8_t)data->sys_info.bootup_nb_voltage_index;
430 422
431 cz_hwmgr->boot_power_level.dsDividerIndex = 0; 423 data->boot_power_level.dsDividerIndex = 0;
432 cz_hwmgr->boot_power_level.ssDividerIndex = 0; 424 data->boot_power_level.ssDividerIndex = 0;
433 cz_hwmgr->boot_power_level.allowGnbSlow = 1; 425 data->boot_power_level.allowGnbSlow = 1;
434 cz_hwmgr->boot_power_level.forceNBPstate = 0; 426 data->boot_power_level.forceNBPstate = 0;
435 cz_hwmgr->boot_power_level.hysteresis_up = 0; 427 data->boot_power_level.hysteresis_up = 0;
436 cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; 428 data->boot_power_level.numSIMDToPowerDown = 0;
437 cz_hwmgr->boot_power_level.display_wm = 0; 429 data->boot_power_level.display_wm = 0;
438 cz_hwmgr->boot_power_level.vce_wm = 0; 430 data->boot_power_level.vce_wm = 0;
439 431
440 return 0; 432 return 0;
441} 433}
442 434
443static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) 435static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
444{ 436{
445 struct SMU8_Fusion_ClkTable *clock_table; 437 struct SMU8_Fusion_ClkTable *clock_table;
446 int ret; 438 int ret;
@@ -470,18 +462,18 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
470 clock_table = (struct SMU8_Fusion_ClkTable *)table; 462 clock_table = (struct SMU8_Fusion_ClkTable *)table;
471 463
472 /* patch clock table */ 464 /* patch clock table */
473 PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 465 PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
474 "Dependency table entry exceeds max limit!", return -EINVAL;); 466 "Dependency table entry exceeds max limit!", return -EINVAL;);
475 PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 467 PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
476 "Dependency table entry exceeds max limit!", return -EINVAL;); 468 "Dependency table entry exceeds max limit!", return -EINVAL;);
477 PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 469 PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
478 "Dependency table entry exceeds max limit!", return -EINVAL;); 470 "Dependency table entry exceeds max limit!", return -EINVAL;);
479 PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 471 PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
480 "Dependency table entry exceeds max limit!", return -EINVAL;); 472 "Dependency table entry exceeds max limit!", return -EINVAL;);
481 PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 473 PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
482 "Dependency table entry exceeds max limit!", return -EINVAL;); 474 "Dependency table entry exceeds max limit!", return -EINVAL;);
483 475
484 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { 476 for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
485 477
486 /* vddc_sclk */ 478 /* vddc_sclk */
487 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = 479 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
@@ -559,9 +551,9 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
559 return ret; 551 return ret;
560} 552}
561 553
562static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr) 554static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
563{ 555{
564 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 556 struct smu8_hwmgr *data = hwmgr->backend;
565 struct phm_clock_voltage_dependency_table *table = 557 struct phm_clock_voltage_dependency_table *table =
566 hwmgr->dyn_state.vddc_dependency_on_sclk; 558 hwmgr->dyn_state.vddc_dependency_on_sclk;
567 unsigned long clock = 0, level; 559 unsigned long clock = 0, level;
@@ -569,25 +561,25 @@ static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
569 if (NULL == table || table->count <= 0) 561 if (NULL == table || table->count <= 0)
570 return -EINVAL; 562 return -EINVAL;
571 563
572 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; 564 data->sclk_dpm.soft_min_clk = table->entries[0].clk;
573 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; 565 data->sclk_dpm.hard_min_clk = table->entries[0].clk;
574 566
575 level = cz_get_max_sclk_level(hwmgr) - 1; 567 level = smu8_get_max_sclk_level(hwmgr) - 1;
576 568
577 if (level < table->count) 569 if (level < table->count)
578 clock = table->entries[level].clk; 570 clock = table->entries[level].clk;
579 else 571 else
580 clock = table->entries[table->count - 1].clk; 572 clock = table->entries[table->count - 1].clk;
581 573
582 cz_hwmgr->sclk_dpm.soft_max_clk = clock; 574 data->sclk_dpm.soft_max_clk = clock;
583 cz_hwmgr->sclk_dpm.hard_max_clk = clock; 575 data->sclk_dpm.hard_max_clk = clock;
584 576
585 return 0; 577 return 0;
586} 578}
587 579
588static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) 580static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
589{ 581{
590 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 582 struct smu8_hwmgr *data = hwmgr->backend;
591 struct phm_uvd_clock_voltage_dependency_table *table = 583 struct phm_uvd_clock_voltage_dependency_table *table =
592 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 584 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
593 unsigned long clock = 0, level; 585 unsigned long clock = 0, level;
@@ -595,8 +587,8 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
595 if (NULL == table || table->count <= 0) 587 if (NULL == table || table->count <= 0)
596 return -EINVAL; 588 return -EINVAL;
597 589
598 cz_hwmgr->uvd_dpm.soft_min_clk = 0; 590 data->uvd_dpm.soft_min_clk = 0;
599 cz_hwmgr->uvd_dpm.hard_min_clk = 0; 591 data->uvd_dpm.hard_min_clk = 0;
600 592
601 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); 593 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
602 level = smum_get_argument(hwmgr); 594 level = smum_get_argument(hwmgr);
@@ -606,15 +598,15 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
606 else 598 else
607 clock = table->entries[table->count - 1].vclk; 599 clock = table->entries[table->count - 1].vclk;
608 600
609 cz_hwmgr->uvd_dpm.soft_max_clk = clock; 601 data->uvd_dpm.soft_max_clk = clock;
610 cz_hwmgr->uvd_dpm.hard_max_clk = clock; 602 data->uvd_dpm.hard_max_clk = clock;
611 603
612 return 0; 604 return 0;
613} 605}
614 606
615static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) 607static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
616{ 608{
617 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 609 struct smu8_hwmgr *data = hwmgr->backend;
618 struct phm_vce_clock_voltage_dependency_table *table = 610 struct phm_vce_clock_voltage_dependency_table *table =
619 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 611 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
620 unsigned long clock = 0, level; 612 unsigned long clock = 0, level;
@@ -622,8 +614,8 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
622 if (NULL == table || table->count <= 0) 614 if (NULL == table || table->count <= 0)
623 return -EINVAL; 615 return -EINVAL;
624 616
625 cz_hwmgr->vce_dpm.soft_min_clk = 0; 617 data->vce_dpm.soft_min_clk = 0;
626 cz_hwmgr->vce_dpm.hard_min_clk = 0; 618 data->vce_dpm.hard_min_clk = 0;
627 619
628 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); 620 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
629 level = smum_get_argument(hwmgr); 621 level = smum_get_argument(hwmgr);
@@ -633,15 +625,15 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
633 else 625 else
634 clock = table->entries[table->count - 1].ecclk; 626 clock = table->entries[table->count - 1].ecclk;
635 627
636 cz_hwmgr->vce_dpm.soft_max_clk = clock; 628 data->vce_dpm.soft_max_clk = clock;
637 cz_hwmgr->vce_dpm.hard_max_clk = clock; 629 data->vce_dpm.hard_max_clk = clock;
638 630
639 return 0; 631 return 0;
640} 632}
641 633
642static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) 634static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
643{ 635{
644 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 636 struct smu8_hwmgr *data = hwmgr->backend;
645 struct phm_acp_clock_voltage_dependency_table *table = 637 struct phm_acp_clock_voltage_dependency_table *table =
646 hwmgr->dyn_state.acp_clock_voltage_dependency_table; 638 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
647 unsigned long clock = 0, level; 639 unsigned long clock = 0, level;
@@ -649,8 +641,8 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
649 if (NULL == table || table->count <= 0) 641 if (NULL == table || table->count <= 0)
650 return -EINVAL; 642 return -EINVAL;
651 643
652 cz_hwmgr->acp_dpm.soft_min_clk = 0; 644 data->acp_dpm.soft_min_clk = 0;
653 cz_hwmgr->acp_dpm.hard_min_clk = 0; 645 data->acp_dpm.hard_min_clk = 0;
654 646
655 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); 647 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
656 level = smum_get_argument(hwmgr); 648 level = smum_get_argument(hwmgr);
@@ -660,32 +652,32 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
660 else 652 else
661 clock = table->entries[table->count - 1].acpclk; 653 clock = table->entries[table->count - 1].acpclk;
662 654
663 cz_hwmgr->acp_dpm.soft_max_clk = clock; 655 data->acp_dpm.soft_max_clk = clock;
664 cz_hwmgr->acp_dpm.hard_max_clk = clock; 656 data->acp_dpm.hard_max_clk = clock;
665 return 0; 657 return 0;
666} 658}
667 659
668static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr) 660static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
669{ 661{
670 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 662 struct smu8_hwmgr *data = hwmgr->backend;
671 663
672 cz_hwmgr->uvd_power_gated = false; 664 data->uvd_power_gated = false;
673 cz_hwmgr->vce_power_gated = false; 665 data->vce_power_gated = false;
674 cz_hwmgr->samu_power_gated = false; 666 data->samu_power_gated = false;
675 cz_hwmgr->acp_power_gated = false; 667 data->acp_power_gated = false;
676 cz_hwmgr->pgacpinit = true; 668 data->pgacpinit = true;
677} 669}
678 670
679static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr) 671static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
680{ 672{
681 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 673 struct smu8_hwmgr *data = hwmgr->backend;
682 674
683 cz_hwmgr->low_sclk_interrupt_threshold = 0; 675 data->low_sclk_interrupt_threshold = 0;
684} 676}
685 677
686static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) 678static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
687{ 679{
688 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 680 struct smu8_hwmgr *data = hwmgr->backend;
689 struct phm_clock_voltage_dependency_table *table = 681 struct phm_clock_voltage_dependency_table *table =
690 hwmgr->dyn_state.vddc_dependency_on_sclk; 682 hwmgr->dyn_state.vddc_dependency_on_sclk;
691 683
@@ -694,29 +686,29 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
694 unsigned long stable_pstate_sclk; 686 unsigned long stable_pstate_sclk;
695 unsigned long percentage; 687 unsigned long percentage;
696 688
697 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; 689 data->sclk_dpm.soft_min_clk = table->entries[0].clk;
698 level = cz_get_max_sclk_level(hwmgr) - 1; 690 level = smu8_get_max_sclk_level(hwmgr) - 1;
699 691
700 if (level < table->count) 692 if (level < table->count)
701 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk; 693 data->sclk_dpm.soft_max_clk = table->entries[level].clk;
702 else 694 else
703 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; 695 data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
704 696
705 clock = hwmgr->display_config.min_core_set_clock; 697 clock = hwmgr->display_config.min_core_set_clock;
706 if (clock == 0) 698 if (clock == 0)
707 pr_debug("min_core_set_clock not set\n"); 699 pr_debug("min_core_set_clock not set\n");
708 700
709 if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { 701 if (data->sclk_dpm.hard_min_clk != clock) {
710 cz_hwmgr->sclk_dpm.hard_min_clk = clock; 702 data->sclk_dpm.hard_min_clk = clock;
711 703
712 smum_send_msg_to_smc_with_parameter(hwmgr, 704 smum_send_msg_to_smc_with_parameter(hwmgr,
713 PPSMC_MSG_SetSclkHardMin, 705 PPSMC_MSG_SetSclkHardMin,
714 cz_get_sclk_level(hwmgr, 706 smu8_get_sclk_level(hwmgr,
715 cz_hwmgr->sclk_dpm.hard_min_clk, 707 data->sclk_dpm.hard_min_clk,
716 PPSMC_MSG_SetSclkHardMin)); 708 PPSMC_MSG_SetSclkHardMin));
717 } 709 }
718 710
719 clock = cz_hwmgr->sclk_dpm.soft_min_clk; 711 clock = data->sclk_dpm.soft_min_clk;
720 712
721 /* update minimum clocks for Stable P-State feature */ 713 /* update minimum clocks for Stable P-State feature */
722 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 714 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -730,36 +722,36 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
730 clock = stable_pstate_sclk; 722 clock = stable_pstate_sclk;
731 } 723 }
732 724
733 if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) { 725 if (data->sclk_dpm.soft_min_clk != clock) {
734 cz_hwmgr->sclk_dpm.soft_min_clk = clock; 726 data->sclk_dpm.soft_min_clk = clock;
735 smum_send_msg_to_smc_with_parameter(hwmgr, 727 smum_send_msg_to_smc_with_parameter(hwmgr,
736 PPSMC_MSG_SetSclkSoftMin, 728 PPSMC_MSG_SetSclkSoftMin,
737 cz_get_sclk_level(hwmgr, 729 smu8_get_sclk_level(hwmgr,
738 cz_hwmgr->sclk_dpm.soft_min_clk, 730 data->sclk_dpm.soft_min_clk,
739 PPSMC_MSG_SetSclkSoftMin)); 731 PPSMC_MSG_SetSclkSoftMin));
740 } 732 }
741 733
742 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 734 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
743 PHM_PlatformCaps_StablePState) && 735 PHM_PlatformCaps_StablePState) &&
744 cz_hwmgr->sclk_dpm.soft_max_clk != clock) { 736 data->sclk_dpm.soft_max_clk != clock) {
745 cz_hwmgr->sclk_dpm.soft_max_clk = clock; 737 data->sclk_dpm.soft_max_clk = clock;
746 smum_send_msg_to_smc_with_parameter(hwmgr, 738 smum_send_msg_to_smc_with_parameter(hwmgr,
747 PPSMC_MSG_SetSclkSoftMax, 739 PPSMC_MSG_SetSclkSoftMax,
748 cz_get_sclk_level(hwmgr, 740 smu8_get_sclk_level(hwmgr,
749 cz_hwmgr->sclk_dpm.soft_max_clk, 741 data->sclk_dpm.soft_max_clk,
750 PPSMC_MSG_SetSclkSoftMax)); 742 PPSMC_MSG_SetSclkSoftMax));
751 } 743 }
752 744
753 return 0; 745 return 0;
754} 746}
755 747
756static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) 748static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
757{ 749{
758 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 750 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
759 PHM_PlatformCaps_SclkDeepSleep)) { 751 PHM_PlatformCaps_SclkDeepSleep)) {
760 uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; 752 uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
761 if (clks == 0) 753 if (clks == 0)
762 clks = CZ_MIN_DEEP_SLEEP_SCLK; 754 clks = SMU8_MIN_DEEP_SLEEP_SCLK;
763 755
764 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); 756 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
765 757
@@ -771,21 +763,21 @@ static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
771 return 0; 763 return 0;
772} 764}
773 765
774static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr) 766static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
775{ 767{
776 struct cz_hwmgr *cz_hwmgr = 768 struct smu8_hwmgr *data =
777 (struct cz_hwmgr *)(hwmgr->backend); 769 hwmgr->backend;
778 770
779 smum_send_msg_to_smc_with_parameter(hwmgr, 771 smum_send_msg_to_smc_with_parameter(hwmgr,
780 PPSMC_MSG_SetWatermarkFrequency, 772 PPSMC_MSG_SetWatermarkFrequency,
781 cz_hwmgr->sclk_dpm.soft_max_clk); 773 data->sclk_dpm.soft_max_clk);
782 774
783 return 0; 775 return 0;
784} 776}
785 777
786static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) 778static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
787{ 779{
788 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 780 struct smu8_hwmgr *hw_data = hwmgr->backend;
789 781
790 if (hw_data->is_nb_dpm_enabled) { 782 if (hw_data->is_nb_dpm_enabled) {
791 if (enable) { 783 if (enable) {
@@ -806,35 +798,35 @@ static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, b
806 return 0; 798 return 0;
807} 799}
808 800
809static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr) 801static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
810{ 802{
811 int ret = 0; 803 int ret = 0;
812 804
813 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 805 struct smu8_hwmgr *data = hwmgr->backend;
814 unsigned long dpm_features = 0; 806 unsigned long dpm_features = 0;
815 807
816 if (cz_hwmgr->is_nb_dpm_enabled) { 808 if (data->is_nb_dpm_enabled) {
817 cz_nbdpm_pstate_enable_disable(hwmgr, true, true); 809 smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
818 dpm_features |= NB_DPM_MASK; 810 dpm_features |= NB_DPM_MASK;
819 ret = smum_send_msg_to_smc_with_parameter( 811 ret = smum_send_msg_to_smc_with_parameter(
820 hwmgr, 812 hwmgr,
821 PPSMC_MSG_DisableAllSmuFeatures, 813 PPSMC_MSG_DisableAllSmuFeatures,
822 dpm_features); 814 dpm_features);
823 if (ret == 0) 815 if (ret == 0)
824 cz_hwmgr->is_nb_dpm_enabled = false; 816 data->is_nb_dpm_enabled = false;
825 } 817 }
826 818
827 return ret; 819 return ret;
828} 820}
829 821
830static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr) 822static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
831{ 823{
832 int ret = 0; 824 int ret = 0;
833 825
834 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 826 struct smu8_hwmgr *data = hwmgr->backend;
835 unsigned long dpm_features = 0; 827 unsigned long dpm_features = 0;
836 828
837 if (!cz_hwmgr->is_nb_dpm_enabled) { 829 if (!data->is_nb_dpm_enabled) {
838 PP_DBG_LOG("enabling ALL SMU features.\n"); 830 PP_DBG_LOG("enabling ALL SMU features.\n");
839 dpm_features |= NB_DPM_MASK; 831 dpm_features |= NB_DPM_MASK;
840 ret = smum_send_msg_to_smc_with_parameter( 832 ret = smum_send_msg_to_smc_with_parameter(
@@ -842,94 +834,94 @@ static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
842 PPSMC_MSG_EnableAllSmuFeatures, 834 PPSMC_MSG_EnableAllSmuFeatures,
843 dpm_features); 835 dpm_features);
844 if (ret == 0) 836 if (ret == 0)
845 cz_hwmgr->is_nb_dpm_enabled = true; 837 data->is_nb_dpm_enabled = true;
846 } 838 }
847 839
848 return ret; 840 return ret;
849} 841}
850 842
851static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input) 843static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
852{ 844{
853 bool disable_switch; 845 bool disable_switch;
854 bool enable_low_mem_state; 846 bool enable_low_mem_state;
855 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 847 struct smu8_hwmgr *hw_data = hwmgr->backend;
856 const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input; 848 const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
857 const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state); 849 const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
858 850
859 if (hw_data->sys_info.nb_dpm_enable) { 851 if (hw_data->sys_info.nb_dpm_enable) {
860 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false; 852 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
861 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true; 853 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
862 854
863 if (pnew_state->action == FORCE_HIGH) 855 if (pnew_state->action == FORCE_HIGH)
864 cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); 856 smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
865 else if (pnew_state->action == CANCEL_FORCE_HIGH) 857 else if (pnew_state->action == CANCEL_FORCE_HIGH)
866 cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); 858 smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
867 else 859 else
868 cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); 860 smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
869 } 861 }
870 return 0; 862 return 0;
871} 863}
872 864
873static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 865static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
874{ 866{
875 int ret = 0; 867 int ret = 0;
876 868
877 cz_update_sclk_limit(hwmgr); 869 smu8_update_sclk_limit(hwmgr);
878 cz_set_deep_sleep_sclk_threshold(hwmgr); 870 smu8_set_deep_sleep_sclk_threshold(hwmgr);
879 cz_set_watermark_threshold(hwmgr); 871 smu8_set_watermark_threshold(hwmgr);
880 ret = cz_enable_nb_dpm(hwmgr); 872 ret = smu8_enable_nb_dpm(hwmgr);
881 if (ret) 873 if (ret)
882 return ret; 874 return ret;
883 cz_update_low_mem_pstate(hwmgr, input); 875 smu8_update_low_mem_pstate(hwmgr, input);
884 876
885 return 0; 877 return 0;
886}; 878};
887 879
888 880
889static int cz_setup_asic_task(struct pp_hwmgr *hwmgr) 881static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
890{ 882{
891 int ret; 883 int ret;
892 884
893 ret = cz_upload_pptable_to_smu(hwmgr); 885 ret = smu8_upload_pptable_to_smu(hwmgr);
894 if (ret) 886 if (ret)
895 return ret; 887 return ret;
896 ret = cz_init_sclk_limit(hwmgr); 888 ret = smu8_init_sclk_limit(hwmgr);
897 if (ret) 889 if (ret)
898 return ret; 890 return ret;
899 ret = cz_init_uvd_limit(hwmgr); 891 ret = smu8_init_uvd_limit(hwmgr);
900 if (ret) 892 if (ret)
901 return ret; 893 return ret;
902 ret = cz_init_vce_limit(hwmgr); 894 ret = smu8_init_vce_limit(hwmgr);
903 if (ret) 895 if (ret)
904 return ret; 896 return ret;
905 ret = cz_init_acp_limit(hwmgr); 897 ret = smu8_init_acp_limit(hwmgr);
906 if (ret) 898 if (ret)
907 return ret; 899 return ret;
908 900
909 cz_init_power_gate_state(hwmgr); 901 smu8_init_power_gate_state(hwmgr);
910 cz_init_sclk_threshold(hwmgr); 902 smu8_init_sclk_threshold(hwmgr);
911 903
912 return 0; 904 return 0;
913} 905}
914 906
915static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr) 907static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
916{ 908{
917 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 909 struct smu8_hwmgr *hw_data = hwmgr->backend;
918 910
919 hw_data->disp_clk_bypass_pending = false; 911 hw_data->disp_clk_bypass_pending = false;
920 hw_data->disp_clk_bypass = false; 912 hw_data->disp_clk_bypass = false;
921} 913}
922 914
923static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr) 915static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
924{ 916{
925 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 917 struct smu8_hwmgr *hw_data = hwmgr->backend;
926 918
927 hw_data->is_nb_dpm_enabled = false; 919 hw_data->is_nb_dpm_enabled = false;
928} 920}
929 921
930static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr) 922static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
931{ 923{
932 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 924 struct smu8_hwmgr *hw_data = hwmgr->backend;
933 925
934 hw_data->cc6_settings.cc6_setting_changed = false; 926 hw_data->cc6_settings.cc6_setting_changed = false;
935 hw_data->cc6_settings.cpu_pstate_separation_time = 0; 927 hw_data->cc6_settings.cpu_pstate_separation_time = 0;
@@ -937,45 +929,47 @@ static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
937 hw_data->cc6_settings.cpu_pstate_disable = false; 929 hw_data->cc6_settings.cpu_pstate_disable = false;
938} 930}
939 931
940static int cz_power_off_asic(struct pp_hwmgr *hwmgr) 932static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
941{ 933{
942 cz_power_up_display_clock_sys_pll(hwmgr); 934 smu8_power_up_display_clock_sys_pll(hwmgr);
943 cz_clear_nb_dpm_flag(hwmgr); 935 smu8_clear_nb_dpm_flag(hwmgr);
944 cz_reset_cc6_data(hwmgr); 936 smu8_reset_cc6_data(hwmgr);
945 return 0; 937 return 0;
946}; 938};
947 939
948static void cz_program_voting_clients(struct pp_hwmgr *hwmgr) 940static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
949{ 941{
950 PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 942 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
951 PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); 943 ixCG_FREQ_TRAN_VOTING_0,
944 SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
952} 945}
953 946
954static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr) 947static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
955{ 948{
956 PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0); 949 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
950 ixCG_FREQ_TRAN_VOTING_0, 0);
957} 951}
958 952
959static int cz_start_dpm(struct pp_hwmgr *hwmgr) 953static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
960{ 954{
961 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 955 struct smu8_hwmgr *data = hwmgr->backend;
962 956
963 cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; 957 data->dpm_flags |= DPMFlags_SCLK_Enabled;
964 958
965 return smum_send_msg_to_smc_with_parameter(hwmgr, 959 return smum_send_msg_to_smc_with_parameter(hwmgr,
966 PPSMC_MSG_EnableAllSmuFeatures, 960 PPSMC_MSG_EnableAllSmuFeatures,
967 SCLK_DPM_MASK); 961 SCLK_DPM_MASK);
968} 962}
969 963
970static int cz_stop_dpm(struct pp_hwmgr *hwmgr) 964static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
971{ 965{
972 int ret = 0; 966 int ret = 0;
973 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 967 struct smu8_hwmgr *data = hwmgr->backend;
974 unsigned long dpm_features = 0; 968 unsigned long dpm_features = 0;
975 969
976 if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) { 970 if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
977 dpm_features |= SCLK_DPM_MASK; 971 dpm_features |= SCLK_DPM_MASK;
978 cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled; 972 data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
979 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 973 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
980 PPSMC_MSG_DisableAllSmuFeatures, 974 PPSMC_MSG_DisableAllSmuFeatures,
981 dpm_features); 975 dpm_features);
@@ -983,112 +977,80 @@ static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
983 return ret; 977 return ret;
984} 978}
985 979
986static int cz_program_bootup_state(struct pp_hwmgr *hwmgr) 980static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
987{ 981{
988 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 982 struct smu8_hwmgr *data = hwmgr->backend;
989 983
990 cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock; 984 data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
991 cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock; 985 data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
992 986
993 smum_send_msg_to_smc_with_parameter(hwmgr, 987 smum_send_msg_to_smc_with_parameter(hwmgr,
994 PPSMC_MSG_SetSclkSoftMin, 988 PPSMC_MSG_SetSclkSoftMin,
995 cz_get_sclk_level(hwmgr, 989 smu8_get_sclk_level(hwmgr,
996 cz_hwmgr->sclk_dpm.soft_min_clk, 990 data->sclk_dpm.soft_min_clk,
997 PPSMC_MSG_SetSclkSoftMin)); 991 PPSMC_MSG_SetSclkSoftMin));
998 992
999 smum_send_msg_to_smc_with_parameter(hwmgr, 993 smum_send_msg_to_smc_with_parameter(hwmgr,
1000 PPSMC_MSG_SetSclkSoftMax, 994 PPSMC_MSG_SetSclkSoftMax,
1001 cz_get_sclk_level(hwmgr, 995 smu8_get_sclk_level(hwmgr,
1002 cz_hwmgr->sclk_dpm.soft_max_clk, 996 data->sclk_dpm.soft_max_clk,
1003 PPSMC_MSG_SetSclkSoftMax)); 997 PPSMC_MSG_SetSclkSoftMax));
1004 998
1005 return 0; 999 return 0;
1006} 1000}
1007 1001
1008static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr) 1002static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1009{ 1003{
1010 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1004 struct smu8_hwmgr *data = hwmgr->backend;
1011 1005
1012 cz_hwmgr->acp_boot_level = 0xff; 1006 data->acp_boot_level = 0xff;
1013} 1007}
1014 1008
1015static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, 1009static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1016 unsigned long check_feature)
1017{ 1010{
1018 int result; 1011 smu8_disable_nb_dpm(hwmgr);
1019 unsigned long features;
1020
1021 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
1022 if (result == 0) {
1023 features = smum_get_argument(hwmgr);
1024 if (features & check_feature)
1025 return true;
1026 }
1027 1012
1028 return false; 1013 smu8_clear_voting_clients(hwmgr);
1029} 1014 if (smu8_stop_dpm(hwmgr))
1030
1031static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr)
1032{
1033 if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
1034 return true;
1035 return false;
1036}
1037
1038static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1039{
1040 if (!cz_check_for_dpm_enabled(hwmgr)) {
1041 pr_info("dpm has been disabled\n");
1042 return 0;
1043 }
1044 cz_disable_nb_dpm(hwmgr);
1045
1046 cz_clear_voting_clients(hwmgr);
1047 if (cz_stop_dpm(hwmgr))
1048 return -EINVAL; 1015 return -EINVAL;
1049 1016
1050 return 0; 1017 return 0;
1051}; 1018};
1052 1019
1053static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1020static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1054{ 1021{
1055 if (cz_check_for_dpm_enabled(hwmgr)) { 1022 smu8_program_voting_clients(hwmgr);
1056 pr_info("dpm has been enabled\n"); 1023 if (smu8_start_dpm(hwmgr))
1057 return 0;
1058 }
1059
1060 cz_program_voting_clients(hwmgr);
1061 if (cz_start_dpm(hwmgr))
1062 return -EINVAL; 1024 return -EINVAL;
1063 cz_program_bootup_state(hwmgr); 1025 smu8_program_bootup_state(hwmgr);
1064 cz_reset_acp_boot_level(hwmgr); 1026 smu8_reset_acp_boot_level(hwmgr);
1065 1027
1066 return 0; 1028 return 0;
1067}; 1029};
1068 1030
1069static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 1031static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1070 struct pp_power_state *prequest_ps, 1032 struct pp_power_state *prequest_ps,
1071 const struct pp_power_state *pcurrent_ps) 1033 const struct pp_power_state *pcurrent_ps)
1072{ 1034{
1073 struct cz_power_state *cz_ps = 1035 struct smu8_power_state *smu8_ps =
1074 cast_PhwCzPowerState(&prequest_ps->hardware); 1036 cast_smu8_power_state(&prequest_ps->hardware);
1075 1037
1076 const struct cz_power_state *cz_current_ps = 1038 const struct smu8_power_state *smu8_current_ps =
1077 cast_const_PhwCzPowerState(&pcurrent_ps->hardware); 1039 cast_const_smu8_power_state(&pcurrent_ps->hardware);
1078 1040
1079 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1041 struct smu8_hwmgr *data = hwmgr->backend;
1080 struct PP_Clocks clocks = {0, 0, 0, 0}; 1042 struct PP_Clocks clocks = {0, 0, 0, 0};
1081 bool force_high; 1043 bool force_high;
1082 uint32_t num_of_active_displays = 0; 1044 uint32_t num_of_active_displays = 0;
1083 struct cgs_display_info info = {0}; 1045 struct cgs_display_info info = {0};
1084 1046
1085 cz_ps->need_dfs_bypass = true; 1047 smu8_ps->need_dfs_bypass = true;
1086 1048
1087 cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); 1049 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1088 1050
1089 clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? 1051 clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
1090 hwmgr->display_config.min_mem_set_clock : 1052 hwmgr->display_config.min_mem_set_clock :
1091 cz_hwmgr->sys_info.nbp_memory_clock[1]; 1053 data->sys_info.nbp_memory_clock[1];
1092 1054
1093 cgs_get_active_displays_info(hwmgr->device, &info); 1055 cgs_get_active_displays_info(hwmgr->device, &info);
1094 num_of_active_displays = info.display_count; 1056 num_of_active_displays = info.display_count;
@@ -1096,56 +1058,56 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1096 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 1058 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1097 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; 1059 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1098 1060
1099 force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]) 1061 force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1100 || (num_of_active_displays >= 3); 1062 || (num_of_active_displays >= 3);
1101 1063
1102 cz_ps->action = cz_current_ps->action; 1064 smu8_ps->action = smu8_current_ps->action;
1103 1065
1104 if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 1066 if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1105 cz_nbdpm_pstate_enable_disable(hwmgr, false, false); 1067 smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1106 else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) 1068 else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1107 cz_nbdpm_pstate_enable_disable(hwmgr, false, true); 1069 smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1108 else if (!force_high && (cz_ps->action == FORCE_HIGH)) 1070 else if (!force_high && (smu8_ps->action == FORCE_HIGH))
1109 cz_ps->action = CANCEL_FORCE_HIGH; 1071 smu8_ps->action = CANCEL_FORCE_HIGH;
1110 else if (force_high && (cz_ps->action != FORCE_HIGH)) 1072 else if (force_high && (smu8_ps->action != FORCE_HIGH))
1111 cz_ps->action = FORCE_HIGH; 1073 smu8_ps->action = FORCE_HIGH;
1112 else 1074 else
1113 cz_ps->action = DO_NOTHING; 1075 smu8_ps->action = DO_NOTHING;
1114 1076
1115 return 0; 1077 return 0;
1116} 1078}
1117 1079
1118static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 1080static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1119{ 1081{
1120 int result = 0; 1082 int result = 0;
1121 struct cz_hwmgr *data; 1083 struct smu8_hwmgr *data;
1122 1084
1123 data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); 1085 data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
1124 if (data == NULL) 1086 if (data == NULL)
1125 return -ENOMEM; 1087 return -ENOMEM;
1126 1088
1127 hwmgr->backend = data; 1089 hwmgr->backend = data;
1128 1090
1129 result = cz_initialize_dpm_defaults(hwmgr); 1091 result = smu8_initialize_dpm_defaults(hwmgr);
1130 if (result != 0) { 1092 if (result != 0) {
1131 pr_err("cz_initialize_dpm_defaults failed\n"); 1093 pr_err("smu8_initialize_dpm_defaults failed\n");
1132 return result; 1094 return result;
1133 } 1095 }
1134 1096
1135 result = cz_get_system_info_data(hwmgr); 1097 result = smu8_get_system_info_data(hwmgr);
1136 if (result != 0) { 1098 if (result != 0) {
1137 pr_err("cz_get_system_info_data failed\n"); 1099 pr_err("smu8_get_system_info_data failed\n");
1138 return result; 1100 return result;
1139 } 1101 }
1140 1102
1141 cz_construct_boot_state(hwmgr); 1103 smu8_construct_boot_state(hwmgr);
1142 1104
1143 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; 1105 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS;
1144 1106
1145 return result; 1107 return result;
1146} 1108}
1147 1109
1148static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 1110static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1149{ 1111{
1150 if (hwmgr != NULL) { 1112 if (hwmgr != NULL) {
1151 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 1113 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
@@ -1157,28 +1119,28 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1157 return 0; 1119 return 0;
1158} 1120}
1159 1121
1160static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) 1122static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1161{ 1123{
1162 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1124 struct smu8_hwmgr *data = hwmgr->backend;
1163 1125
1164 smum_send_msg_to_smc_with_parameter(hwmgr, 1126 smum_send_msg_to_smc_with_parameter(hwmgr,
1165 PPSMC_MSG_SetSclkSoftMin, 1127 PPSMC_MSG_SetSclkSoftMin,
1166 cz_get_sclk_level(hwmgr, 1128 smu8_get_sclk_level(hwmgr,
1167 cz_hwmgr->sclk_dpm.soft_max_clk, 1129 data->sclk_dpm.soft_max_clk,
1168 PPSMC_MSG_SetSclkSoftMin)); 1130 PPSMC_MSG_SetSclkSoftMin));
1169 1131
1170 smum_send_msg_to_smc_with_parameter(hwmgr, 1132 smum_send_msg_to_smc_with_parameter(hwmgr,
1171 PPSMC_MSG_SetSclkSoftMax, 1133 PPSMC_MSG_SetSclkSoftMax,
1172 cz_get_sclk_level(hwmgr, 1134 smu8_get_sclk_level(hwmgr,
1173 cz_hwmgr->sclk_dpm.soft_max_clk, 1135 data->sclk_dpm.soft_max_clk,
1174 PPSMC_MSG_SetSclkSoftMax)); 1136 PPSMC_MSG_SetSclkSoftMax));
1175 1137
1176 return 0; 1138 return 0;
1177} 1139}
1178 1140
1179static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 1141static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1180{ 1142{
1181 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1143 struct smu8_hwmgr *data = hwmgr->backend;
1182 struct phm_clock_voltage_dependency_table *table = 1144 struct phm_clock_voltage_dependency_table *table =
1183 hwmgr->dyn_state.vddc_dependency_on_sclk; 1145 hwmgr->dyn_state.vddc_dependency_on_sclk;
1184 unsigned long clock = 0, level; 1146 unsigned long clock = 0, level;
@@ -1186,54 +1148,56 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1186 if (NULL == table || table->count <= 0) 1148 if (NULL == table || table->count <= 0)
1187 return -EINVAL; 1149 return -EINVAL;
1188 1150
1189 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; 1151 data->sclk_dpm.soft_min_clk = table->entries[0].clk;
1190 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; 1152 data->sclk_dpm.hard_min_clk = table->entries[0].clk;
1153 hwmgr->pstate_sclk = table->entries[0].clk;
1154 hwmgr->pstate_mclk = 0;
1191 1155
1192 level = cz_get_max_sclk_level(hwmgr) - 1; 1156 level = smu8_get_max_sclk_level(hwmgr) - 1;
1193 1157
1194 if (level < table->count) 1158 if (level < table->count)
1195 clock = table->entries[level].clk; 1159 clock = table->entries[level].clk;
1196 else 1160 else
1197 clock = table->entries[table->count - 1].clk; 1161 clock = table->entries[table->count - 1].clk;
1198 1162
1199 cz_hwmgr->sclk_dpm.soft_max_clk = clock; 1163 data->sclk_dpm.soft_max_clk = clock;
1200 cz_hwmgr->sclk_dpm.hard_max_clk = clock; 1164 data->sclk_dpm.hard_max_clk = clock;
1201 1165
1202 smum_send_msg_to_smc_with_parameter(hwmgr, 1166 smum_send_msg_to_smc_with_parameter(hwmgr,
1203 PPSMC_MSG_SetSclkSoftMin, 1167 PPSMC_MSG_SetSclkSoftMin,
1204 cz_get_sclk_level(hwmgr, 1168 smu8_get_sclk_level(hwmgr,
1205 cz_hwmgr->sclk_dpm.soft_min_clk, 1169 data->sclk_dpm.soft_min_clk,
1206 PPSMC_MSG_SetSclkSoftMin)); 1170 PPSMC_MSG_SetSclkSoftMin));
1207 1171
1208 smum_send_msg_to_smc_with_parameter(hwmgr, 1172 smum_send_msg_to_smc_with_parameter(hwmgr,
1209 PPSMC_MSG_SetSclkSoftMax, 1173 PPSMC_MSG_SetSclkSoftMax,
1210 cz_get_sclk_level(hwmgr, 1174 smu8_get_sclk_level(hwmgr,
1211 cz_hwmgr->sclk_dpm.soft_max_clk, 1175 data->sclk_dpm.soft_max_clk,
1212 PPSMC_MSG_SetSclkSoftMax)); 1176 PPSMC_MSG_SetSclkSoftMax));
1213 1177
1214 return 0; 1178 return 0;
1215} 1179}
1216 1180
1217static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) 1181static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1218{ 1182{
1219 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1183 struct smu8_hwmgr *data = hwmgr->backend;
1220 1184
1221 smum_send_msg_to_smc_with_parameter(hwmgr, 1185 smum_send_msg_to_smc_with_parameter(hwmgr,
1222 PPSMC_MSG_SetSclkSoftMax, 1186 PPSMC_MSG_SetSclkSoftMax,
1223 cz_get_sclk_level(hwmgr, 1187 smu8_get_sclk_level(hwmgr,
1224 cz_hwmgr->sclk_dpm.soft_min_clk, 1188 data->sclk_dpm.soft_min_clk,
1225 PPSMC_MSG_SetSclkSoftMax)); 1189 PPSMC_MSG_SetSclkSoftMax));
1226 1190
1227 smum_send_msg_to_smc_with_parameter(hwmgr, 1191 smum_send_msg_to_smc_with_parameter(hwmgr,
1228 PPSMC_MSG_SetSclkSoftMin, 1192 PPSMC_MSG_SetSclkSoftMin,
1229 cz_get_sclk_level(hwmgr, 1193 smu8_get_sclk_level(hwmgr,
1230 cz_hwmgr->sclk_dpm.soft_min_clk, 1194 data->sclk_dpm.soft_min_clk,
1231 PPSMC_MSG_SetSclkSoftMin)); 1195 PPSMC_MSG_SetSclkSoftMin));
1232 1196
1233 return 0; 1197 return 0;
1234} 1198}
1235 1199
1236static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 1200static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1237 enum amd_dpm_forced_level level) 1201 enum amd_dpm_forced_level level)
1238{ 1202{
1239 int ret = 0; 1203 int ret = 0;
@@ -1241,15 +1205,15 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1241 switch (level) { 1205 switch (level) {
1242 case AMD_DPM_FORCED_LEVEL_HIGH: 1206 case AMD_DPM_FORCED_LEVEL_HIGH:
1243 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1207 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1244 ret = cz_phm_force_dpm_highest(hwmgr); 1208 ret = smu8_phm_force_dpm_highest(hwmgr);
1245 break; 1209 break;
1246 case AMD_DPM_FORCED_LEVEL_LOW: 1210 case AMD_DPM_FORCED_LEVEL_LOW:
1247 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1211 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1248 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1212 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1249 ret = cz_phm_force_dpm_lowest(hwmgr); 1213 ret = smu8_phm_force_dpm_lowest(hwmgr);
1250 break; 1214 break;
1251 case AMD_DPM_FORCED_LEVEL_AUTO: 1215 case AMD_DPM_FORCED_LEVEL_AUTO:
1252 ret = cz_phm_unforce_dpm_levels(hwmgr); 1216 ret = smu8_phm_unforce_dpm_levels(hwmgr);
1253 break; 1217 break;
1254 case AMD_DPM_FORCED_LEVEL_MANUAL: 1218 case AMD_DPM_FORCED_LEVEL_MANUAL:
1255 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1219 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1260,14 +1224,14 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1260 return ret; 1224 return ret;
1261} 1225}
1262 1226
1263int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) 1227static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1264{ 1228{
1265 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) 1229 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1266 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); 1230 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1267 return 0; 1231 return 0;
1268} 1232}
1269 1233
1270int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) 1234static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1271{ 1235{
1272 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { 1236 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1273 return smum_send_msg_to_smc_with_parameter( 1237 return smum_send_msg_to_smc_with_parameter(
@@ -1279,52 +1243,22 @@ int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1279 return 0; 1243 return 0;
1280} 1244}
1281 1245
1282int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) 1246static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1283{
1284 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1285 struct phm_uvd_clock_voltage_dependency_table *ptable =
1286 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1287
1288 if (!bgate) {
1289 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1290 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1291 hwmgr->en_umd_pstate) {
1292 cz_hwmgr->uvd_dpm.hard_min_clk =
1293 ptable->entries[ptable->count - 1].vclk;
1294
1295 smum_send_msg_to_smc_with_parameter(hwmgr,
1296 PPSMC_MSG_SetUvdHardMin,
1297 cz_get_uvd_level(hwmgr,
1298 cz_hwmgr->uvd_dpm.hard_min_clk,
1299 PPSMC_MSG_SetUvdHardMin));
1300
1301 cz_enable_disable_uvd_dpm(hwmgr, true);
1302 } else {
1303 cz_enable_disable_uvd_dpm(hwmgr, true);
1304 }
1305 } else {
1306 cz_enable_disable_uvd_dpm(hwmgr, false);
1307 }
1308
1309 return 0;
1310}
1311
1312int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1313{ 1247{
1314 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1248 struct smu8_hwmgr *data = hwmgr->backend;
1315 struct phm_vce_clock_voltage_dependency_table *ptable = 1249 struct phm_vce_clock_voltage_dependency_table *ptable =
1316 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 1250 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1317 1251
1318 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ 1252 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1319 if (PP_CAP(PHM_PlatformCaps_StablePState) || 1253 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1320 hwmgr->en_umd_pstate) { 1254 hwmgr->en_umd_pstate) {
1321 cz_hwmgr->vce_dpm.hard_min_clk = 1255 data->vce_dpm.hard_min_clk =
1322 ptable->entries[ptable->count - 1].ecclk; 1256 ptable->entries[ptable->count - 1].ecclk;
1323 1257
1324 smum_send_msg_to_smc_with_parameter(hwmgr, 1258 smum_send_msg_to_smc_with_parameter(hwmgr,
1325 PPSMC_MSG_SetEclkHardMin, 1259 PPSMC_MSG_SetEclkHardMin,
1326 cz_get_eclk_level(hwmgr, 1260 smu8_get_eclk_level(hwmgr,
1327 cz_hwmgr->vce_dpm.hard_min_clk, 1261 data->vce_dpm.hard_min_clk,
1328 PPSMC_MSG_SetEclkHardMin)); 1262 PPSMC_MSG_SetEclkHardMin));
1329 } else { 1263 } else {
1330 1264
@@ -1338,7 +1272,7 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1338 return 0; 1272 return 0;
1339} 1273}
1340 1274
1341int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) 1275static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1342{ 1276{
1343 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1277 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1344 return smum_send_msg_to_smc(hwmgr, 1278 return smum_send_msg_to_smc(hwmgr,
@@ -1346,7 +1280,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1346 return 0; 1280 return 0;
1347} 1281}
1348 1282
1349int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) 1283static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1350{ 1284{
1351 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1285 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1352 return smum_send_msg_to_smc(hwmgr, 1286 return smum_send_msg_to_smc(hwmgr,
@@ -1354,17 +1288,17 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1354 return 0; 1288 return 0;
1355} 1289}
1356 1290
1357static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 1291static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1358{ 1292{
1359 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1293 struct smu8_hwmgr *data = hwmgr->backend;
1360 1294
1361 return cz_hwmgr->sys_info.bootup_uma_clock; 1295 return data->sys_info.bootup_uma_clock;
1362} 1296}
1363 1297
1364static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 1298static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1365{ 1299{
1366 struct pp_power_state *ps; 1300 struct pp_power_state *ps;
1367 struct cz_power_state *cz_ps; 1301 struct smu8_power_state *smu8_ps;
1368 1302
1369 if (hwmgr == NULL) 1303 if (hwmgr == NULL)
1370 return -EINVAL; 1304 return -EINVAL;
@@ -1374,59 +1308,59 @@ static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1374 if (ps == NULL) 1308 if (ps == NULL)
1375 return -EINVAL; 1309 return -EINVAL;
1376 1310
1377 cz_ps = cast_PhwCzPowerState(&ps->hardware); 1311 smu8_ps = cast_smu8_power_state(&ps->hardware);
1378 1312
1379 if (low) 1313 if (low)
1380 return cz_ps->levels[0].engineClock; 1314 return smu8_ps->levels[0].engineClock;
1381 else 1315 else
1382 return cz_ps->levels[cz_ps->level-1].engineClock; 1316 return smu8_ps->levels[smu8_ps->level-1].engineClock;
1383} 1317}
1384 1318
1385static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 1319static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1386 struct pp_hw_power_state *hw_ps) 1320 struct pp_hw_power_state *hw_ps)
1387{ 1321{
1388 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1322 struct smu8_hwmgr *data = hwmgr->backend;
1389 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); 1323 struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1390 1324
1391 cz_ps->level = 1; 1325 smu8_ps->level = 1;
1392 cz_ps->nbps_flags = 0; 1326 smu8_ps->nbps_flags = 0;
1393 cz_ps->bapm_flags = 0; 1327 smu8_ps->bapm_flags = 0;
1394 cz_ps->levels[0] = cz_hwmgr->boot_power_level; 1328 smu8_ps->levels[0] = data->boot_power_level;
1395 1329
1396 return 0; 1330 return 0;
1397} 1331}
1398 1332
1399static int cz_dpm_get_pp_table_entry_callback( 1333static int smu8_dpm_get_pp_table_entry_callback(
1400 struct pp_hwmgr *hwmgr, 1334 struct pp_hwmgr *hwmgr,
1401 struct pp_hw_power_state *hw_ps, 1335 struct pp_hw_power_state *hw_ps,
1402 unsigned int index, 1336 unsigned int index,
1403 const void *clock_info) 1337 const void *clock_info)
1404{ 1338{
1405 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); 1339 struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1406 1340
1407 const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info; 1341 const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1408 1342
1409 struct phm_clock_voltage_dependency_table *table = 1343 struct phm_clock_voltage_dependency_table *table =
1410 hwmgr->dyn_state.vddc_dependency_on_sclk; 1344 hwmgr->dyn_state.vddc_dependency_on_sclk;
1411 uint8_t clock_info_index = cz_clock_info->index; 1345 uint8_t clock_info_index = smu8_clock_info->index;
1412 1346
1413 if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) 1347 if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1414 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); 1348 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1415 1349
1416 cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk; 1350 smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1417 cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; 1351 smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1418 1352
1419 cz_ps->level = index + 1; 1353 smu8_ps->level = index + 1;
1420 1354
1421 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 1355 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1422 cz_ps->levels[index].dsDividerIndex = 5; 1356 smu8_ps->levels[index].dsDividerIndex = 5;
1423 cz_ps->levels[index].ssDividerIndex = 5; 1357 smu8_ps->levels[index].ssDividerIndex = 5;
1424 } 1358 }
1425 1359
1426 return 0; 1360 return 0;
1427} 1361}
1428 1362
1429static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) 1363static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1430{ 1364{
1431 int result; 1365 int result;
1432 unsigned long ret = 0; 1366 unsigned long ret = 0;
@@ -1436,31 +1370,31 @@ static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1436 return result ? 0 : ret; 1370 return result ? 0 : ret;
1437} 1371}
1438 1372
1439static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, 1373static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1440 unsigned long entry, struct pp_power_state *ps) 1374 unsigned long entry, struct pp_power_state *ps)
1441{ 1375{
1442 int result; 1376 int result;
1443 struct cz_power_state *cz_ps; 1377 struct smu8_power_state *smu8_ps;
1444 1378
1445 ps->hardware.magic = PhwCz_Magic; 1379 ps->hardware.magic = smu8_magic;
1446 1380
1447 cz_ps = cast_PhwCzPowerState(&(ps->hardware)); 1381 smu8_ps = cast_smu8_power_state(&(ps->hardware));
1448 1382
1449 result = pp_tables_get_entry(hwmgr, entry, ps, 1383 result = pp_tables_get_entry(hwmgr, entry, ps,
1450 cz_dpm_get_pp_table_entry_callback); 1384 smu8_dpm_get_pp_table_entry_callback);
1451 1385
1452 cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; 1386 smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1453 cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; 1387 smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1454 1388
1455 return result; 1389 return result;
1456} 1390}
1457 1391
1458static int cz_get_power_state_size(struct pp_hwmgr *hwmgr) 1392static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1459{ 1393{
1460 return sizeof(struct cz_power_state); 1394 return sizeof(struct smu8_power_state);
1461} 1395}
1462 1396
1463static void cz_hw_print_display_cfg( 1397static void smu8_hw_print_display_cfg(
1464 const struct cc6_settings *cc6_settings) 1398 const struct cc6_settings *cc6_settings)
1465{ 1399{
1466 PP_DBG_LOG("New Display Configuration:\n"); 1400 PP_DBG_LOG("New Display Configuration:\n");
@@ -1475,16 +1409,16 @@ static void cz_hw_print_display_cfg(
1475 cc6_settings->cpu_pstate_separation_time); 1409 cc6_settings->cpu_pstate_separation_time);
1476} 1410}
1477 1411
1478 static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr) 1412 static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1479{ 1413{
1480 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 1414 struct smu8_hwmgr *hw_data = hwmgr->backend;
1481 uint32_t data = 0; 1415 uint32_t data = 0;
1482 1416
1483 if (hw_data->cc6_settings.cc6_setting_changed) { 1417 if (hw_data->cc6_settings.cc6_setting_changed) {
1484 1418
1485 hw_data->cc6_settings.cc6_setting_changed = false; 1419 hw_data->cc6_settings.cc6_setting_changed = false;
1486 1420
1487 cz_hw_print_display_cfg(&hw_data->cc6_settings); 1421 smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1488 1422
1489 data |= (hw_data->cc6_settings.cpu_pstate_separation_time 1423 data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1490 & PWRMGT_SEPARATION_TIME_MASK) 1424 & PWRMGT_SEPARATION_TIME_MASK)
@@ -1508,10 +1442,10 @@ static void cz_hw_print_display_cfg(
1508} 1442}
1509 1443
1510 1444
1511static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 1445static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1512 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 1446 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1513{ 1447{
1514 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 1448 struct smu8_hwmgr *hw_data = hwmgr->backend;
1515 1449
1516 if (separation_time != 1450 if (separation_time !=
1517 hw_data->cc6_settings.cpu_pstate_separation_time || 1451 hw_data->cc6_settings.cpu_pstate_separation_time ||
@@ -1535,7 +1469,7 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1535 return 0; 1469 return 0;
1536} 1470}
1537 1471
1538static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, 1472static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
1539 struct amd_pp_simple_clock_info *info) 1473 struct amd_pp_simple_clock_info *info)
1540{ 1474{
1541 uint32_t i; 1475 uint32_t i;
@@ -1556,12 +1490,9 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
1556 return -EINVAL; 1490 return -EINVAL;
1557} 1491}
1558 1492
1559static int cz_force_clock_level(struct pp_hwmgr *hwmgr, 1493static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1560 enum pp_clock_type type, uint32_t mask) 1494 enum pp_clock_type type, uint32_t mask)
1561{ 1495{
1562 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1563 return -EINVAL;
1564
1565 switch (type) { 1496 switch (type) {
1566 case PP_SCLK: 1497 case PP_SCLK:
1567 smum_send_msg_to_smc_with_parameter(hwmgr, 1498 smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -1578,9 +1509,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
1578 return 0; 1509 return 0;
1579} 1510}
1580 1511
1581static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, 1512static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1582 enum pp_clock_type type, char *buf) 1513 enum pp_clock_type type, char *buf)
1583{ 1514{
1515 struct smu8_hwmgr *data = hwmgr->backend;
1584 struct phm_clock_voltage_dependency_table *sclk_table = 1516 struct phm_clock_voltage_dependency_table *sclk_table =
1585 hwmgr->dyn_state.vddc_dependency_on_sclk; 1517 hwmgr->dyn_state.vddc_dependency_on_sclk;
1586 int i, now, size = 0; 1518 int i, now, size = 0;
@@ -1598,26 +1530,38 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
1598 i, sclk_table->entries[i].clk / 100, 1530 i, sclk_table->entries[i].clk / 100,
1599 (i == now) ? "*" : ""); 1531 (i == now) ? "*" : "");
1600 break; 1532 break;
1533 case PP_MCLK:
1534 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1535 CGS_IND_REG__SMC,
1536 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1537 TARGET_AND_CURRENT_PROFILE_INDEX,
1538 CURR_MCLK_INDEX);
1539
1540 for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1541 size += sprintf(buf + size, "%d: %uMhz %s\n",
1542 SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1543 (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1544 break;
1601 default: 1545 default:
1602 break; 1546 break;
1603 } 1547 }
1604 return size; 1548 return size;
1605} 1549}
1606 1550
1607static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 1551static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1608 PHM_PerformanceLevelDesignation designation, uint32_t index, 1552 PHM_PerformanceLevelDesignation designation, uint32_t index,
1609 PHM_PerformanceLevel *level) 1553 PHM_PerformanceLevel *level)
1610{ 1554{
1611 const struct cz_power_state *ps; 1555 const struct smu8_power_state *ps;
1612 struct cz_hwmgr *data; 1556 struct smu8_hwmgr *data;
1613 uint32_t level_index; 1557 uint32_t level_index;
1614 uint32_t i; 1558 uint32_t i;
1615 1559
1616 if (level == NULL || hwmgr == NULL || state == NULL) 1560 if (level == NULL || hwmgr == NULL || state == NULL)
1617 return -EINVAL; 1561 return -EINVAL;
1618 1562
1619 data = (struct cz_hwmgr *)(hwmgr->backend); 1563 data = hwmgr->backend;
1620 ps = cast_const_PhwCzPowerState(state); 1564 ps = cast_const_smu8_power_state(state);
1621 1565
1622 level_index = index > ps->level - 1 ? ps->level - 1 : index; 1566 level_index = index > ps->level - 1 ? ps->level - 1 : index;
1623 level->coreClock = ps->levels[level_index].engineClock; 1567 level->coreClock = ps->levels[level_index].engineClock;
@@ -1632,21 +1576,21 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
1632 } 1576 }
1633 1577
1634 if (level_index == 0) 1578 if (level_index == 0)
1635 level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]; 1579 level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1636 else 1580 else
1637 level->memory_clock = data->sys_info.nbp_memory_clock[0]; 1581 level->memory_clock = data->sys_info.nbp_memory_clock[0];
1638 1582
1639 level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; 1583 level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1640 level->nonLocalMemoryFreq = 0; 1584 level->nonLocalMemoryFreq = 0;
1641 level->nonLocalMemoryWidth = 0; 1585 level->nonLocalMemoryWidth = 0;
1642 1586
1643 return 0; 1587 return 0;
1644} 1588}
1645 1589
1646static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 1590static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1647 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 1591 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1648{ 1592{
1649 const struct cz_power_state *ps = cast_const_PhwCzPowerState(state); 1593 const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1650 1594
1651 clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); 1595 clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1652 clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); 1596 clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
@@ -1654,14 +1598,14 @@ static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1654 return 0; 1598 return 0;
1655} 1599}
1656 1600
1657static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, 1601static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1658 struct amd_pp_clocks *clocks) 1602 struct amd_pp_clocks *clocks)
1659{ 1603{
1660 struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend); 1604 struct smu8_hwmgr *data = hwmgr->backend;
1661 int i; 1605 int i;
1662 struct phm_clock_voltage_dependency_table *table; 1606 struct phm_clock_voltage_dependency_table *table;
1663 1607
1664 clocks->count = cz_get_max_sclk_level(hwmgr); 1608 clocks->count = smu8_get_max_sclk_level(hwmgr);
1665 switch (type) { 1609 switch (type) {
1666 case amd_pp_disp_clock: 1610 case amd_pp_disp_clock:
1667 for (i = 0; i < clocks->count; i++) 1611 for (i = 0; i < clocks->count; i++)
@@ -1673,7 +1617,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
1673 clocks->clock[i] = table->entries[i].clk; 1617 clocks->clock[i] = table->entries[i].clk;
1674 break; 1618 break;
1675 case amd_pp_mem_clock: 1619 case amd_pp_mem_clock:
1676 clocks->count = CZ_NUM_NBPMEMORYCLOCK; 1620 clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1677 for (i = 0; i < clocks->count; i++) 1621 for (i = 0; i < clocks->count; i++)
1678 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; 1622 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
1679 break; 1623 break;
@@ -1684,7 +1628,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
1684 return 0; 1628 return 0;
1685} 1629}
1686 1630
1687static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 1631static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1688{ 1632{
1689 struct phm_clock_voltage_dependency_table *table = 1633 struct phm_clock_voltage_dependency_table *table =
1690 hwmgr->dyn_state.vddc_dependency_on_sclk; 1634 hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1695,7 +1639,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
1695 if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) 1639 if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1696 return -EINVAL; 1640 return -EINVAL;
1697 1641
1698 level = cz_get_max_sclk_level(hwmgr) - 1; 1642 level = smu8_get_max_sclk_level(hwmgr) - 1;
1699 1643
1700 if (level < table->count) 1644 if (level < table->count)
1701 clocks->engine_max_clock = table->entries[level].clk; 1645 clocks->engine_max_clock = table->entries[level].clk;
@@ -1707,7 +1651,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
1707 return 0; 1651 return 0;
1708} 1652}
1709 1653
1710static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr) 1654static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1711{ 1655{
1712 int actual_temp = 0; 1656 int actual_temp = 0;
1713 uint32_t val = cgs_read_ind_register(hwmgr->device, 1657 uint32_t val = cgs_read_ind_register(hwmgr->device,
@@ -1722,10 +1666,10 @@ static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1722 return actual_temp; 1666 return actual_temp;
1723} 1667}
1724 1668
1725static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, 1669static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1726 void *value, int *size) 1670 void *value, int *size)
1727{ 1671{
1728 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1672 struct smu8_hwmgr *data = hwmgr->backend;
1729 1673
1730 struct phm_clock_voltage_dependency_table *table = 1674 struct phm_clock_voltage_dependency_table *table =
1731 hwmgr->dyn_state.vddc_dependency_on_sclk; 1675 hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1763,18 +1707,18 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1763 case AMDGPU_PP_SENSOR_VDDNB: 1707 case AMDGPU_PP_SENSOR_VDDNB:
1764 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & 1708 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1765 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; 1709 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1766 vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); 1710 vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
1767 *((uint32_t *)value) = vddnb; 1711 *((uint32_t *)value) = vddnb;
1768 return 0; 1712 return 0;
1769 case AMDGPU_PP_SENSOR_VDDGFX: 1713 case AMDGPU_PP_SENSOR_VDDGFX:
1770 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & 1714 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1771 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; 1715 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1772 vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); 1716 vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
1773 *((uint32_t *)value) = vddgfx; 1717 *((uint32_t *)value) = vddgfx;
1774 return 0; 1718 return 0;
1775 case AMDGPU_PP_SENSOR_UVD_VCLK: 1719 case AMDGPU_PP_SENSOR_UVD_VCLK:
1776 if (!cz_hwmgr->uvd_power_gated) { 1720 if (!data->uvd_power_gated) {
1777 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 1721 if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1778 return -EINVAL; 1722 return -EINVAL;
1779 } else { 1723 } else {
1780 vclk = uvd_table->entries[uvd_index].vclk; 1724 vclk = uvd_table->entries[uvd_index].vclk;
@@ -1785,8 +1729,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1785 *((uint32_t *)value) = 0; 1729 *((uint32_t *)value) = 0;
1786 return 0; 1730 return 0;
1787 case AMDGPU_PP_SENSOR_UVD_DCLK: 1731 case AMDGPU_PP_SENSOR_UVD_DCLK:
1788 if (!cz_hwmgr->uvd_power_gated) { 1732 if (!data->uvd_power_gated) {
1789 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 1733 if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1790 return -EINVAL; 1734 return -EINVAL;
1791 } else { 1735 } else {
1792 dclk = uvd_table->entries[uvd_index].dclk; 1736 dclk = uvd_table->entries[uvd_index].dclk;
@@ -1797,8 +1741,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1797 *((uint32_t *)value) = 0; 1741 *((uint32_t *)value) = 0;
1798 return 0; 1742 return 0;
1799 case AMDGPU_PP_SENSOR_VCE_ECCLK: 1743 case AMDGPU_PP_SENSOR_VCE_ECCLK:
1800 if (!cz_hwmgr->vce_power_gated) { 1744 if (!data->vce_power_gated) {
1801 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 1745 if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1802 return -EINVAL; 1746 return -EINVAL;
1803 } else { 1747 } else {
1804 ecclk = vce_table->entries[vce_index].ecclk; 1748 ecclk = vce_table->entries[vce_index].ecclk;
@@ -1819,20 +1763,20 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1819 *((uint32_t *)value) = activity_percent; 1763 *((uint32_t *)value) = activity_percent;
1820 return 0; 1764 return 0;
1821 case AMDGPU_PP_SENSOR_UVD_POWER: 1765 case AMDGPU_PP_SENSOR_UVD_POWER:
1822 *((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1; 1766 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1823 return 0; 1767 return 0;
1824 case AMDGPU_PP_SENSOR_VCE_POWER: 1768 case AMDGPU_PP_SENSOR_VCE_POWER:
1825 *((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1; 1769 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1826 return 0; 1770 return 0;
1827 case AMDGPU_PP_SENSOR_GPU_TEMP: 1771 case AMDGPU_PP_SENSOR_GPU_TEMP:
1828 *((uint32_t *)value) = cz_thermal_get_temperature(hwmgr); 1772 *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1829 return 0; 1773 return 0;
1830 default: 1774 default:
1831 return -EINVAL; 1775 return -EINVAL;
1832 } 1776 }
1833} 1777}
1834 1778
1835static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 1779static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1836 uint32_t virtual_addr_low, 1780 uint32_t virtual_addr_low,
1837 uint32_t virtual_addr_hi, 1781 uint32_t virtual_addr_hi,
1838 uint32_t mc_addr_low, 1782 uint32_t mc_addr_low,
@@ -1858,43 +1802,190 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1858 return 0; 1802 return 0;
1859} 1803}
1860 1804
1805static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1806 struct PP_TemperatureRange *thermal_data)
1807{
1808 struct smu8_hwmgr *data = hwmgr->backend;
1809
1810 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1811
1812 thermal_data->max = (data->thermal_auto_throttling_treshold +
1813 data->sys_info.htc_hyst_lmt) *
1814 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1815
1816 return 0;
1817}
1818
1819static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1820{
1821 struct smu8_hwmgr *data = hwmgr->backend;
1822 uint32_t dpm_features = 0;
1823
1824 if (enable &&
1825 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1826 PHM_PlatformCaps_UVDDPM)) {
1827 data->dpm_flags |= DPMFlags_UVD_Enabled;
1828 dpm_features |= UVD_DPM_MASK;
1829 smum_send_msg_to_smc_with_parameter(hwmgr,
1830 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1831 } else {
1832 dpm_features |= UVD_DPM_MASK;
1833 data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1834 smum_send_msg_to_smc_with_parameter(hwmgr,
1835 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1836 }
1837 return 0;
1838}
1839
1840int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1841{
1842 struct smu8_hwmgr *data = hwmgr->backend;
1843 struct phm_uvd_clock_voltage_dependency_table *ptable =
1844 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1845
1846 if (!bgate) {
1847 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1848 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1849 hwmgr->en_umd_pstate) {
1850 data->uvd_dpm.hard_min_clk =
1851 ptable->entries[ptable->count - 1].vclk;
1852
1853 smum_send_msg_to_smc_with_parameter(hwmgr,
1854 PPSMC_MSG_SetUvdHardMin,
1855 smu8_get_uvd_level(hwmgr,
1856 data->uvd_dpm.hard_min_clk,
1857 PPSMC_MSG_SetUvdHardMin));
1858
1859 smu8_enable_disable_uvd_dpm(hwmgr, true);
1860 } else {
1861 smu8_enable_disable_uvd_dpm(hwmgr, true);
1862 }
1863 } else {
1864 smu8_enable_disable_uvd_dpm(hwmgr, false);
1865 }
1866
1867 return 0;
1868}
1869
1870static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1871{
1872 struct smu8_hwmgr *data = hwmgr->backend;
1873 uint32_t dpm_features = 0;
1874
1875 if (enable && phm_cap_enabled(
1876 hwmgr->platform_descriptor.platformCaps,
1877 PHM_PlatformCaps_VCEDPM)) {
1878 data->dpm_flags |= DPMFlags_VCE_Enabled;
1879 dpm_features |= VCE_DPM_MASK;
1880 smum_send_msg_to_smc_with_parameter(hwmgr,
1881 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1882 } else {
1883 dpm_features |= VCE_DPM_MASK;
1884 data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1885 smum_send_msg_to_smc_with_parameter(hwmgr,
1886 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1887 }
1888
1889 return 0;
1890}
1891
1892
1893static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1894{
1895 struct smu8_hwmgr *data = hwmgr->backend;
1896
1897 data->uvd_power_gated = bgate;
1898
1899 if (bgate) {
1900 cgs_set_powergating_state(hwmgr->device,
1901 AMD_IP_BLOCK_TYPE_UVD,
1902 AMD_PG_STATE_GATE);
1903 cgs_set_clockgating_state(hwmgr->device,
1904 AMD_IP_BLOCK_TYPE_UVD,
1905 AMD_CG_STATE_GATE);
1906 smu8_dpm_update_uvd_dpm(hwmgr, true);
1907 smu8_dpm_powerdown_uvd(hwmgr);
1908 } else {
1909 smu8_dpm_powerup_uvd(hwmgr);
1910 cgs_set_clockgating_state(hwmgr->device,
1911 AMD_IP_BLOCK_TYPE_UVD,
1912 AMD_CG_STATE_UNGATE);
1913 cgs_set_powergating_state(hwmgr->device,
1914 AMD_IP_BLOCK_TYPE_UVD,
1915 AMD_PG_STATE_UNGATE);
1916 smu8_dpm_update_uvd_dpm(hwmgr, false);
1917 }
1918
1919}
1920
1921static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1922{
1923 struct smu8_hwmgr *data = hwmgr->backend;
1924
1925 if (bgate) {
1926 cgs_set_powergating_state(
1927 hwmgr->device,
1928 AMD_IP_BLOCK_TYPE_VCE,
1929 AMD_PG_STATE_GATE);
1930 cgs_set_clockgating_state(
1931 hwmgr->device,
1932 AMD_IP_BLOCK_TYPE_VCE,
1933 AMD_CG_STATE_GATE);
1934 smu8_enable_disable_vce_dpm(hwmgr, false);
1935 smu8_dpm_powerdown_vce(hwmgr);
1936 data->vce_power_gated = true;
1937 } else {
1938 smu8_dpm_powerup_vce(hwmgr);
1939 data->vce_power_gated = false;
1940 cgs_set_clockgating_state(
1941 hwmgr->device,
1942 AMD_IP_BLOCK_TYPE_VCE,
1943 AMD_CG_STATE_UNGATE);
1944 cgs_set_powergating_state(
1945 hwmgr->device,
1946 AMD_IP_BLOCK_TYPE_VCE,
1947 AMD_PG_STATE_UNGATE);
1948 smu8_dpm_update_vce_dpm(hwmgr);
1949 smu8_enable_disable_vce_dpm(hwmgr, true);
1950 }
1951}
1861 1952
1862static const struct pp_hwmgr_func cz_hwmgr_funcs = { 1953static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
1863 .backend_init = cz_hwmgr_backend_init, 1954 .backend_init = smu8_hwmgr_backend_init,
1864 .backend_fini = cz_hwmgr_backend_fini, 1955 .backend_fini = smu8_hwmgr_backend_fini,
1865 .apply_state_adjust_rules = cz_apply_state_adjust_rules, 1956 .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
1866 .force_dpm_level = cz_dpm_force_dpm_level, 1957 .force_dpm_level = smu8_dpm_force_dpm_level,
1867 .get_power_state_size = cz_get_power_state_size, 1958 .get_power_state_size = smu8_get_power_state_size,
1868 .powerdown_uvd = cz_dpm_powerdown_uvd, 1959 .powerdown_uvd = smu8_dpm_powerdown_uvd,
1869 .powergate_uvd = cz_dpm_powergate_uvd, 1960 .powergate_uvd = smu8_dpm_powergate_uvd,
1870 .powergate_vce = cz_dpm_powergate_vce, 1961 .powergate_vce = smu8_dpm_powergate_vce,
1871 .get_mclk = cz_dpm_get_mclk, 1962 .get_mclk = smu8_dpm_get_mclk,
1872 .get_sclk = cz_dpm_get_sclk, 1963 .get_sclk = smu8_dpm_get_sclk,
1873 .patch_boot_state = cz_dpm_patch_boot_state, 1964 .patch_boot_state = smu8_dpm_patch_boot_state,
1874 .get_pp_table_entry = cz_dpm_get_pp_table_entry, 1965 .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
1875 .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, 1966 .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
1876 .set_cpu_power_state = cz_set_cpu_power_state, 1967 .set_cpu_power_state = smu8_set_cpu_power_state,
1877 .store_cc6_data = cz_store_cc6_data, 1968 .store_cc6_data = smu8_store_cc6_data,
1878 .force_clock_level = cz_force_clock_level, 1969 .force_clock_level = smu8_force_clock_level,
1879 .print_clock_levels = cz_print_clock_levels, 1970 .print_clock_levels = smu8_print_clock_levels,
1880 .get_dal_power_level = cz_get_dal_power_level, 1971 .get_dal_power_level = smu8_get_dal_power_level,
1881 .get_performance_level = cz_get_performance_level, 1972 .get_performance_level = smu8_get_performance_level,
1882 .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, 1973 .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
1883 .get_clock_by_type = cz_get_clock_by_type, 1974 .get_clock_by_type = smu8_get_clock_by_type,
1884 .get_max_high_clocks = cz_get_max_high_clocks, 1975 .get_max_high_clocks = smu8_get_max_high_clocks,
1885 .get_temperature = cz_thermal_get_temperature, 1976 .read_sensor = smu8_read_sensor,
1886 .read_sensor = cz_read_sensor, 1977 .power_off_asic = smu8_power_off_asic,
1887 .power_off_asic = cz_power_off_asic, 1978 .asic_setup = smu8_setup_asic_task,
1888 .asic_setup = cz_setup_asic_task, 1979 .dynamic_state_management_enable = smu8_enable_dpm_tasks,
1889 .dynamic_state_management_enable = cz_enable_dpm_tasks, 1980 .power_state_set = smu8_set_power_state_tasks,
1890 .power_state_set = cz_set_power_state_tasks, 1981 .dynamic_state_management_disable = smu8_disable_dpm_tasks,
1891 .dynamic_state_management_disable = cz_disable_dpm_tasks, 1982 .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
1892 .notify_cac_buffer_info = cz_notify_cac_buffer_info, 1983 .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
1893}; 1984};
1894 1985
1895int cz_init_function_pointers(struct pp_hwmgr *hwmgr) 1986int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
1896{ 1987{
1897 hwmgr->hwmgr_func = &cz_hwmgr_funcs; 1988 hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
1898 hwmgr->pptable_func = &pptable_funcs; 1989 hwmgr->pptable_func = &pptable_funcs;
1899 return 0; 1990 return 0;
1900} 1991}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
index 508b422d6159..05a06083e1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
@@ -21,19 +21,18 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef _CZ_HWMGR_H_ 24#ifndef _SMU8_HWMGR_H_
25#define _CZ_HWMGR_H_ 25#define _SMU8_HWMGR_H_
26 26
27#include "cgs_common.h" 27#include "cgs_common.h"
28#include "ppatomctrl.h" 28#include "ppatomctrl.h"
29 29
30#define CZ_NUM_NBPSTATES 4 30#define SMU8_NUM_NBPSTATES 4
31#define CZ_NUM_NBPMEMORYCLOCK 2 31#define SMU8_NUM_NBPMEMORYCLOCK 2
32#define MAX_DISPLAY_CLOCK_LEVEL 8 32#define MAX_DISPLAY_CLOCK_LEVEL 8
33#define CZ_AT_DFLT 30 33#define SMU8_MAX_HARDWARE_POWERLEVELS 8
34#define CZ_MAX_HARDWARE_POWERLEVELS 8 34#define SMU8_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
35#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 35#define SMU8_MIN_DEEP_SLEEP_SCLK 800
36#define CZ_MIN_DEEP_SLEEP_SCLK 800
37 36
38/* Carrizo device IDs */ 37/* Carrizo device IDs */
39#define DEVICE_ID_CZ_9870 0x9870 38#define DEVICE_ID_CZ_9870 0x9870
@@ -42,24 +41,21 @@
42#define DEVICE_ID_CZ_9876 0x9876 41#define DEVICE_ID_CZ_9876 0x9876
43#define DEVICE_ID_CZ_9877 0x9877 42#define DEVICE_ID_CZ_9877 0x9877
44 43
45#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value) \ 44struct smu8_dpm_entry {
46 cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value)
47
48struct cz_dpm_entry {
49 uint32_t soft_min_clk; 45 uint32_t soft_min_clk;
50 uint32_t hard_min_clk; 46 uint32_t hard_min_clk;
51 uint32_t soft_max_clk; 47 uint32_t soft_max_clk;
52 uint32_t hard_max_clk; 48 uint32_t hard_max_clk;
53}; 49};
54 50
55struct cz_sys_info { 51struct smu8_sys_info {
56 uint32_t bootup_uma_clock; 52 uint32_t bootup_uma_clock;
57 uint32_t bootup_engine_clock; 53 uint32_t bootup_engine_clock;
58 uint32_t dentist_vco_freq; 54 uint32_t dentist_vco_freq;
59 uint32_t nb_dpm_enable; 55 uint32_t nb_dpm_enable;
60 uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK]; 56 uint32_t nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK];
61 uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; 57 uint32_t nbp_n_clock[SMU8_NUM_NBPSTATES];
62 uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES]; 58 uint16_t nbp_voltage_index[SMU8_NUM_NBPSTATES];
63 uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL]; 59 uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL];
64 uint16_t bootup_nb_voltage_index; 60 uint16_t bootup_nb_voltage_index;
65 uint8_t htc_tmp_lmt; 61 uint8_t htc_tmp_lmt;
@@ -86,21 +82,21 @@ struct cz_sys_info {
86 ((tx) ? DISPLAYPHY_TX_SELECT : 0) | \ 82 ((tx) ? DISPLAYPHY_TX_SELECT : 0) | \
87 ((core) ? DISPLAYPHY_CORE_SELECT : 0)) 83 ((core) ? DISPLAYPHY_CORE_SELECT : 0))
88 84
89struct cz_display_phy_info_entry { 85struct smu8_display_phy_info_entry {
90 uint8_t phy_present; 86 uint8_t phy_present;
91 uint8_t active_lane_mapping; 87 uint8_t active_lane_mapping;
92 uint8_t display_config_type; 88 uint8_t display_config_type;
93 uint8_t active_number_of_lanes; 89 uint8_t active_number_of_lanes;
94}; 90};
95 91
96#define CZ_MAX_DISPLAYPHY_IDS 10 92#define SMU8_MAX_DISPLAYPHY_IDS 10
97 93
98struct cz_display_phy_info { 94struct smu8_display_phy_info {
99 bool display_phy_access_initialized; 95 bool display_phy_access_initialized;
100 struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS]; 96 struct smu8_display_phy_info_entry entries[SMU8_MAX_DISPLAYPHY_IDS];
101}; 97};
102 98
103struct cz_power_level { 99struct smu8_power_level {
104 uint32_t engineClock; 100 uint32_t engineClock;
105 uint8_t vddcIndex; 101 uint8_t vddcIndex;
106 uint8_t dsDividerIndex; 102 uint8_t dsDividerIndex;
@@ -114,7 +110,7 @@ struct cz_power_level {
114 uint8_t rsv[3]; 110 uint8_t rsv[3];
115}; 111};
116 112
117struct cz_uvd_clocks { 113struct smu8_uvd_clocks {
118 uint32_t vclk; 114 uint32_t vclk;
119 uint32_t dclk; 115 uint32_t dclk;
120 uint32_t vclk_low_divider; 116 uint32_t vclk_low_divider;
@@ -123,7 +119,7 @@ struct cz_uvd_clocks {
123 uint32_t dclk_high_divider; 119 uint32_t dclk_high_divider;
124}; 120};
125 121
126enum cz_pstate_previous_action { 122enum smu8_pstate_previous_action {
127 DO_NOTHING = 1, 123 DO_NOTHING = 1,
128 FORCE_HIGH, 124 FORCE_HIGH,
129 CANCEL_FORCE_HIGH 125 CANCEL_FORCE_HIGH
@@ -144,10 +140,10 @@ struct pp_disable_nb_ps_flags {
144 }; 140 };
145}; 141};
146 142
147struct cz_power_state { 143struct smu8_power_state {
148 unsigned int magic; 144 unsigned int magic;
149 uint32_t level; 145 uint32_t level;
150 struct cz_uvd_clocks uvd_clocks; 146 struct smu8_uvd_clocks uvd_clocks;
151 uint32_t evclk; 147 uint32_t evclk;
152 uint32_t ecclk; 148 uint32_t ecclk;
153 uint32_t samclk; 149 uint32_t samclk;
@@ -159,8 +155,8 @@ struct cz_power_state {
159 uint8_t dpm_0_pg_nb_ps_high; 155 uint8_t dpm_0_pg_nb_ps_high;
160 uint8_t dpm_x_nb_ps_low; 156 uint8_t dpm_x_nb_ps_low;
161 uint8_t dpm_x_nb_ps_high; 157 uint8_t dpm_x_nb_ps_high;
162 enum cz_pstate_previous_action action; 158 enum smu8_pstate_previous_action action;
163 struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS]; 159 struct smu8_power_level levels[SMU8_MAX_HARDWARE_POWERLEVELS];
164 struct pp_disable_nb_ps_flags disable_nb_ps_flag; 160 struct pp_disable_nb_ps_flags disable_nb_ps_flag;
165}; 161};
166 162
@@ -172,7 +168,6 @@ struct cz_power_state {
172#define DPMFlags_Debug 0x80000000 168#define DPMFlags_Debug 0x80000000
173 169
174#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 /* bit 0 */ 170#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 /* bit 0 */
175#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
176#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 /* bit 23 */ 171#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 /* bit 23 */
177#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 /* bit 24 */ 172#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 /* bit 24 */
178 173
@@ -184,8 +179,7 @@ struct cc6_settings {
184 uint32_t cpu_pstate_separation_time; 179 uint32_t cpu_pstate_separation_time;
185}; 180};
186 181
187struct cz_hwmgr { 182struct smu8_hwmgr {
188 uint32_t activity_target[CZ_MAX_HARDWARE_POWERLEVELS];
189 uint32_t dpm_interval; 183 uint32_t dpm_interval;
190 184
191 uint32_t voltage_drop_threshold; 185 uint32_t voltage_drop_threshold;
@@ -205,11 +199,11 @@ struct cz_hwmgr {
205 199
206 uint32_t thermal_auto_throttling_treshold; 200 uint32_t thermal_auto_throttling_treshold;
207 201
208 struct cz_sys_info sys_info; 202 struct smu8_sys_info sys_info;
209 203
210 struct cz_power_level boot_power_level; 204 struct smu8_power_level boot_power_level;
211 struct cz_power_state *cz_current_ps; 205 struct smu8_power_state *smu8_current_ps;
212 struct cz_power_state *cz_requested_ps; 206 struct smu8_power_state *smu8_requested_ps;
213 207
214 uint32_t mgcg_cgtt_local0; 208 uint32_t mgcg_cgtt_local0;
215 uint32_t mgcg_cgtt_local1; 209 uint32_t mgcg_cgtt_local1;
@@ -222,7 +216,7 @@ struct cz_hwmgr {
222 216
223 uint32_t lock_nb_ps_in_uvd_play_back; 217 uint32_t lock_nb_ps_in_uvd_play_back;
224 218
225 struct cz_display_phy_info display_phy_info; 219 struct smu8_display_phy_info display_phy_info;
226 uint32_t vce_slow_sclk_threshold; /* default 200mhz */ 220 uint32_t vce_slow_sclk_threshold; /* default 200mhz */
227 uint32_t dce_slow_sclk_threshold; /* default 300mhz */ 221 uint32_t dce_slow_sclk_threshold; /* default 300mhz */
228 uint32_t min_sclk_did; /* minimum sclk divider */ 222 uint32_t min_sclk_did; /* minimum sclk divider */
@@ -273,10 +267,10 @@ struct cz_hwmgr {
273 uint32_t fps_low_threshold; 267 uint32_t fps_low_threshold;
274 268
275 uint32_t dpm_flags; 269 uint32_t dpm_flags;
276 struct cz_dpm_entry sclk_dpm; 270 struct smu8_dpm_entry sclk_dpm;
277 struct cz_dpm_entry uvd_dpm; 271 struct smu8_dpm_entry uvd_dpm;
278 struct cz_dpm_entry vce_dpm; 272 struct smu8_dpm_entry vce_dpm;
279 struct cz_dpm_entry acp_dpm; 273 struct smu8_dpm_entry acp_dpm;
280 274
281 uint8_t uvd_boot_level; 275 uint8_t uvd_boot_level;
282 uint8_t vce_boot_level; 276 uint8_t vce_boot_level;
@@ -314,12 +308,4 @@ struct cz_hwmgr {
314 uint32_t num_of_clk_entries; 308 uint32_t num_of_clk_entries;
315}; 309};
316 310
317struct pp_hwmgr; 311#endif /* _SMU8_HWMGR_H_ */
318
319int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
320int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
321int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
322int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
323int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
324int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
325#endif /* _CZ_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
new file mode 100644
index 000000000000..e11daf5cbf80
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -0,0 +1,536 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "hwmgr.h"
24#include "pp_debug.h"
25#include "ppatomctrl.h"
26#include "ppsmc.h"
27
28uint8_t convert_to_vid(uint16_t vddc)
29{
30 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
31}
32
33uint16_t convert_to_vddc(uint8_t vid)
34{
35 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
36}
37
38uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
39{
40 u32 mask = 0;
41 u32 shift = 0;
42
43 shift = (offset % 4) << 3;
44 if (size == sizeof(uint8_t))
45 mask = 0xFF << shift;
46 else if (size == sizeof(uint16_t))
47 mask = 0xFFFF << shift;
48
49 original_data &= ~mask;
50 original_data |= (field << shift);
51 return original_data;
52}
53
54/**
55 * Returns once the part of the register indicated by the mask has
56 * reached the given value.
57 */
58int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
59 uint32_t value, uint32_t mask)
60{
61 uint32_t i;
62 uint32_t cur_value;
63
64 if (hwmgr == NULL || hwmgr->device == NULL) {
65 pr_err("Invalid Hardware Manager!");
66 return -EINVAL;
67 }
68
69 for (i = 0; i < hwmgr->usec_timeout; i++) {
70 cur_value = cgs_read_register(hwmgr->device, index);
71 if ((cur_value & mask) == (value & mask))
72 break;
73 udelay(1);
74 }
75
76 /* timeout means wrong logic*/
77 if (i == hwmgr->usec_timeout)
78 return -1;
79 return 0;
80}
81
82
83/**
84 * Returns once the part of the register indicated by the mask has
85 * reached the given value.The indirect space is described by giving
86 * the memory-mapped index of the indirect index register.
87 */
88int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
89 uint32_t indirect_port,
90 uint32_t index,
91 uint32_t value,
92 uint32_t mask)
93{
94 if (hwmgr == NULL || hwmgr->device == NULL) {
95 pr_err("Invalid Hardware Manager!");
96 return -EINVAL;
97 }
98
99 cgs_write_register(hwmgr->device, indirect_port, index);
100 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
101}
102
103int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
104 uint32_t index,
105 uint32_t value, uint32_t mask)
106{
107 uint32_t i;
108 uint32_t cur_value;
109
110 if (hwmgr == NULL || hwmgr->device == NULL)
111 return -EINVAL;
112
113 for (i = 0; i < hwmgr->usec_timeout; i++) {
114 cur_value = cgs_read_register(hwmgr->device,
115 index);
116 if ((cur_value & mask) != (value & mask))
117 break;
118 udelay(1);
119 }
120
121 /* timeout means wrong logic */
122 if (i == hwmgr->usec_timeout)
123 return -ETIME;
124 return 0;
125}
126
127int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
128 uint32_t indirect_port,
129 uint32_t index,
130 uint32_t value,
131 uint32_t mask)
132{
133 if (hwmgr == NULL || hwmgr->device == NULL)
134 return -EINVAL;
135
136 cgs_write_register(hwmgr->device, indirect_port, index);
137 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
138 value, mask);
139}
140
141bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
142{
143 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
144}
145
146bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
147{
148 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
149}
150
151
152int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
153{
154 uint32_t i, j;
155 uint16_t vvalue;
156 bool found = false;
157 struct pp_atomctrl_voltage_table *table;
158
159 PP_ASSERT_WITH_CODE((NULL != vol_table),
160 "Voltage Table empty.", return -EINVAL);
161
162 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
163 GFP_KERNEL);
164
165 if (NULL == table)
166 return -EINVAL;
167
168 table->mask_low = vol_table->mask_low;
169 table->phase_delay = vol_table->phase_delay;
170
171 for (i = 0; i < vol_table->count; i++) {
172 vvalue = vol_table->entries[i].value;
173 found = false;
174
175 for (j = 0; j < table->count; j++) {
176 if (vvalue == table->entries[j].value) {
177 found = true;
178 break;
179 }
180 }
181
182 if (!found) {
183 table->entries[table->count].value = vvalue;
184 table->entries[table->count].smio_low =
185 vol_table->entries[i].smio_low;
186 table->count++;
187 }
188 }
189
190 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
191 kfree(table);
192 table = NULL;
193 return 0;
194}
195
196int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
197 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
198{
199 uint32_t i;
200 int result;
201
202 PP_ASSERT_WITH_CODE((0 != dep_table->count),
203 "Voltage Dependency Table empty.", return -EINVAL);
204
205 PP_ASSERT_WITH_CODE((NULL != vol_table),
206 "vol_table empty.", return -EINVAL);
207
208 vol_table->mask_low = 0;
209 vol_table->phase_delay = 0;
210 vol_table->count = dep_table->count;
211
212 for (i = 0; i < dep_table->count; i++) {
213 vol_table->entries[i].value = dep_table->entries[i].mvdd;
214 vol_table->entries[i].smio_low = 0;
215 }
216
217 result = phm_trim_voltage_table(vol_table);
218 PP_ASSERT_WITH_CODE((0 == result),
219 "Failed to trim MVDD table.", return result);
220
221 return 0;
222}
223
224int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
225 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
226{
227 uint32_t i;
228 int result;
229
230 PP_ASSERT_WITH_CODE((0 != dep_table->count),
231 "Voltage Dependency Table empty.", return -EINVAL);
232
233 PP_ASSERT_WITH_CODE((NULL != vol_table),
234 "vol_table empty.", return -EINVAL);
235
236 vol_table->mask_low = 0;
237 vol_table->phase_delay = 0;
238 vol_table->count = dep_table->count;
239
240 for (i = 0; i < dep_table->count; i++) {
241 vol_table->entries[i].value = dep_table->entries[i].vddci;
242 vol_table->entries[i].smio_low = 0;
243 }
244
245 result = phm_trim_voltage_table(vol_table);
246 PP_ASSERT_WITH_CODE((0 == result),
247 "Failed to trim VDDCI table.", return result);
248
249 return 0;
250}
251
252int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
253 phm_ppt_v1_voltage_lookup_table *lookup_table)
254{
255 int i = 0;
256
257 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
258 "Voltage Lookup Table empty.", return -EINVAL);
259
260 PP_ASSERT_WITH_CODE((NULL != vol_table),
261 "vol_table empty.", return -EINVAL);
262
263 vol_table->mask_low = 0;
264 vol_table->phase_delay = 0;
265
266 vol_table->count = lookup_table->count;
267
268 for (i = 0; i < vol_table->count; i++) {
269 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
270 vol_table->entries[i].smio_low = 0;
271 }
272
273 return 0;
274}
275
276void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
277 struct pp_atomctrl_voltage_table *vol_table)
278{
279 unsigned int i, diff;
280
281 if (vol_table->count <= max_vol_steps)
282 return;
283
284 diff = vol_table->count - max_vol_steps;
285
286 for (i = 0; i < max_vol_steps; i++)
287 vol_table->entries[i] = vol_table->entries[i + diff];
288
289 vol_table->count = max_vol_steps;
290
291 return;
292}
293
294int phm_reset_single_dpm_table(void *table,
295 uint32_t count, int max)
296{
297 int i;
298
299 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
300
301 dpm_table->count = count > max ? max : count;
302
303 for (i = 0; i < dpm_table->count; i++)
304 dpm_table->dpm_level[i].enabled = false;
305
306 return 0;
307}
308
309void phm_setup_pcie_table_entry(
310 void *table,
311 uint32_t index, uint32_t pcie_gen,
312 uint32_t pcie_lanes)
313{
314 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
315 dpm_table->dpm_level[index].value = pcie_gen;
316 dpm_table->dpm_level[index].param1 = pcie_lanes;
317 dpm_table->dpm_level[index].enabled = 1;
318}
319
320int32_t phm_get_dpm_level_enable_mask_value(void *table)
321{
322 int32_t i;
323 int32_t mask = 0;
324 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
325
326 for (i = dpm_table->count; i > 0; i--) {
327 mask = mask << 1;
328 if (dpm_table->dpm_level[i - 1].enabled)
329 mask |= 0x1;
330 else
331 mask &= 0xFFFFFFFE;
332 }
333
334 return mask;
335}
336
337uint8_t phm_get_voltage_index(
338 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
339{
340 uint8_t count = (uint8_t) (lookup_table->count);
341 uint8_t i;
342
343 PP_ASSERT_WITH_CODE((NULL != lookup_table),
344 "Lookup Table empty.", return 0);
345 PP_ASSERT_WITH_CODE((0 != count),
346 "Lookup Table empty.", return 0);
347
348 for (i = 0; i < lookup_table->count; i++) {
349 /* find first voltage equal or bigger than requested */
350 if (lookup_table->entries[i].us_vdd >= voltage)
351 return i;
352 }
353 /* voltage is bigger than max voltage in the table */
354 return i - 1;
355}
356
357uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
358 uint32_t voltage)
359{
360 uint8_t count = (uint8_t) (voltage_table->count);
361 uint8_t i = 0;
362
363 PP_ASSERT_WITH_CODE((NULL != voltage_table),
364 "Voltage Table empty.", return 0;);
365 PP_ASSERT_WITH_CODE((0 != count),
366 "Voltage Table empty.", return 0;);
367
368 for (i = 0; i < count; i++) {
369 /* find first voltage bigger than requested */
370 if (voltage_table->entries[i].value >= voltage)
371 return i;
372 }
373
374 /* voltage is bigger than max voltage in the table */
375 return i - 1;
376}
377
378uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
379{
380 uint32_t i;
381
382 for (i = 0; i < vddci_table->count; i++) {
383 if (vddci_table->entries[i].value >= vddci)
384 return vddci_table->entries[i].value;
385 }
386
387 pr_debug("vddci is larger than max value in vddci_table\n");
388 return vddci_table->entries[i-1].value;
389}
390
391int phm_find_boot_level(void *table,
392 uint32_t value, uint32_t *boot_level)
393{
394 int result = -EINVAL;
395 uint32_t i;
396 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
397
398 for (i = 0; i < dpm_table->count; i++) {
399 if (value == dpm_table->dpm_level[i].value) {
400 *boot_level = i;
401 result = 0;
402 }
403 }
404
405 return result;
406}
407
408int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
409 phm_ppt_v1_voltage_lookup_table *lookup_table,
410 uint16_t virtual_voltage_id, int32_t *sclk)
411{
412 uint8_t entry_id;
413 uint8_t voltage_id;
414 struct phm_ppt_v1_information *table_info =
415 (struct phm_ppt_v1_information *)(hwmgr->pptable);
416
417 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
418
419 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
420 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
421 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
422 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
423 break;
424 }
425
426 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
427 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
428 return -EINVAL;
429 }
430
431 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
432
433 return 0;
434}
435
436/**
437 * Initialize Dynamic State Adjustment Rule Settings
438 *
439 * @param hwmgr the address of the powerplay hardware manager.
440 */
441int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
442{
443 uint32_t table_size;
444 struct phm_clock_voltage_dependency_table *table_clk_vlt;
445 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
446
447 /* initialize vddc_dep_on_dal_pwrl table */
448 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
449 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
450
451 if (NULL == table_clk_vlt) {
452 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
453 return -ENOMEM;
454 } else {
455 table_clk_vlt->count = 4;
456 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
457 table_clk_vlt->entries[0].v = 0;
458 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
459 table_clk_vlt->entries[1].v = 720;
460 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
461 table_clk_vlt->entries[2].v = 810;
462 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
463 table_clk_vlt->entries[3].v = 900;
464 if (pptable_info != NULL)
465 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
466 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
467 }
468
469 return 0;
470}
471
472uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
473{
474 uint32_t level = 0;
475
476 while (0 == (mask & (1 << level)))
477 level++;
478
479 return level;
480}
481
482void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
483{
484 struct phm_ppt_v1_information *table_info =
485 (struct phm_ppt_v1_information *)hwmgr->pptable;
486 struct phm_clock_voltage_dependency_table *table =
487 table_info->vddc_dep_on_dal_pwrl;
488 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
489 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
490 uint32_t req_vddc = 0, req_volt, i;
491
492 if (!table || table->count <= 0
493 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
494 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
495 return;
496
497 for (i = 0; i < table->count; i++) {
498 if (dal_power_level == table->entries[i].clk) {
499 req_vddc = table->entries[i].v;
500 break;
501 }
502 }
503
504 vddc_table = table_info->vdd_dep_on_sclk;
505 for (i = 0; i < vddc_table->count; i++) {
506 if (req_vddc <= vddc_table->entries[i].vddc) {
507 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
508 smum_send_msg_to_smc_with_parameter(hwmgr,
509 PPSMC_MSG_VddC_Request, req_volt);
510 return;
511 }
512 }
513 pr_err("DAL requested level can not"
514 " found a available voltage in VDDC DPM Table \n");
515}
516
517int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
518 uint32_t sclk, uint16_t id, uint16_t *voltage)
519{
520 uint32_t vol;
521 int ret = 0;
522
523 if (hwmgr->chip_id < CHIP_TONGA) {
524 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
525 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
526 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
527 if (*voltage >= 2000 || *voltage == 0)
528 *voltage = 1150;
529 } else {
530 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
531 *voltage = (uint16_t)(vol/100);
532 }
533 return ret;
534}
535
536
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
new file mode 100644
index 000000000000..a1a491300348
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -0,0 +1,180 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _SMU_HELPER_H_
24#define _SMU_HELPER_H_
25
26struct pp_atomctrl_voltage_table;
27struct pp_hwmgr;
28struct phm_ppt_v1_voltage_lookup_table;
29
30extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
31 uint32_t index,
32 uint32_t value, uint32_t mask);
33extern int phm_wait_for_indirect_register_unequal(
34 struct pp_hwmgr *hwmgr,
35 uint32_t indirect_port, uint32_t index,
36 uint32_t value, uint32_t mask);
37
38
39extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
40extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
41extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
42
43extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
44extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
45extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
46extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
47extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
48extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
49extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
50extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
51extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
52 uint32_t voltage);
53extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
54extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
55extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
56extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
57 uint16_t virtual_voltage_id, int32_t *sclk);
58extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
59extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
60extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
61
62extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
63 uint32_t sclk, uint16_t id, uint16_t *voltage);
64
65extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
66
67extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
68 uint32_t value, uint32_t mask);
69
70extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
71 uint32_t indirect_port,
72 uint32_t index,
73 uint32_t value,
74 uint32_t mask);
75
76#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
77#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
78
79#define PHM_SET_FIELD(origval, reg, field, fieldval) \
80 (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
81 (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
82
83#define PHM_GET_FIELD(value, reg, field) \
84 (((value) & PHM_FIELD_MASK(reg, field)) >> \
85 PHM_FIELD_SHIFT(reg, field))
86
87
88/* Operations on named fields. */
89
90#define PHM_READ_FIELD(device, reg, field) \
91 PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
92
93#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
94 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
95 reg, field)
96
97#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
98 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
99 reg, field)
100
101#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
102 cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
103 cgs_read_register(device, mm##reg), reg, field, fieldval))
104
105#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
106 cgs_write_ind_register(device, port, ix##reg, \
107 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
108 reg, field, fieldval))
109
110#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
111 cgs_write_ind_register(device, port, ix##reg, \
112 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
113 reg, field, fieldval))
114
115#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
116 phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
117
118
119#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
120 PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
121
122#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
123 PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
124 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
125
126#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
127 phm_wait_for_indirect_register_unequal(hwmgr, \
128 mm##port##_INDEX, index, value, mask)
129
130#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
131 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
132
133#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
134 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
135 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
136 PHM_FIELD_MASK(reg, field) )
137
138
139#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
140 port, index, value, mask) \
141 phm_wait_for_indirect_register_unequal(hwmgr, \
142 mm##port##_INDEX_11, index, value, mask)
143
144#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
145 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
146
147#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
148 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
149 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
150 PHM_FIELD_MASK(reg, field))
151
152
153#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
154 port, index, value, mask) \
155 phm_wait_on_indirect_register(hwmgr, \
156 mm##port##_INDEX_11, index, value, mask)
157
158#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
159 PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
160
161#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
162 PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
163 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
164 PHM_FIELD_MASK(reg, field))
165
166#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
167 index, value, mask) \
168 phm_wait_for_register_unequal(hwmgr, \
169 index, value, mask)
170
171#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
172 PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
173 mm##reg, value, mask)
174
175#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
176 PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
177 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
178 PHM_FIELD_MASK(reg, field))
179
180#endif /* _SMU_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 2d55dabc77d4..2fcbb17b794d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -44,11 +44,14 @@
44#include "vega10_pptable.h" 44#include "vega10_pptable.h"
45#include "vega10_thermal.h" 45#include "vega10_thermal.h"
46#include "pp_debug.h" 46#include "pp_debug.h"
47#include "pp_acpi.h"
48#include "amd_pcie_helpers.h" 47#include "amd_pcie_helpers.h"
49#include "cgs_linux.h" 48#include "cgs_linux.h"
50#include "ppinterrupt.h" 49#include "ppinterrupt.h"
51#include "pp_overdriver.h" 50#include "pp_overdriver.h"
51#include "pp_thermal.h"
52
53#include "smuio/smuio_9_0_offset.h"
54#include "smuio/smuio_9_0_sh_mask.h"
52 55
53#define VOLTAGE_SCALE 4 56#define VOLTAGE_SCALE 4
54#define VOLTAGE_VID_OFFSET_SCALE1 625 57#define VOLTAGE_VID_OFFSET_SCALE1 625
@@ -187,8 +190,7 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
187 (struct vega10_hwmgr *)(hwmgr->backend); 190 (struct vega10_hwmgr *)(hwmgr->backend);
188 struct phm_ppt_v2_information *table_info = 191 struct phm_ppt_v2_information *table_info =
189 (struct phm_ppt_v2_information *)hwmgr->pptable; 192 (struct phm_ppt_v2_information *)hwmgr->pptable;
190 struct cgs_system_info sys_info = {0}; 193 struct amdgpu_device *adev = hwmgr->adev;
191 int result;
192 194
193 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 195 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 PHM_PlatformCaps_SclkDeepSleep); 196 PHM_PlatformCaps_SclkDeepSleep);
@@ -203,15 +205,11 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_EnableSMU7ThermalManagement); 206 PHM_PlatformCaps_EnableSMU7ThermalManagement);
205 207
206 sys_info.size = sizeof(struct cgs_system_info); 208 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
207 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
208 result = cgs_query_system_info(hwmgr->device, &sys_info);
209
210 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 209 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_UVDPowerGating); 210 PHM_PlatformCaps_UVDPowerGating);
213 211
214 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE)) 212 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
215 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 213 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216 PHM_PlatformCaps_VCEPowerGating); 214 PHM_PlatformCaps_VCEPowerGating);
217 215
@@ -301,6 +299,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
301{ 299{
302 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 300 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
303 int i; 301 int i;
302 uint32_t sub_vendor_id, hw_revision;
303 struct amdgpu_device *adev = hwmgr->adev;
304 304
305 vega10_initialize_power_tune_defaults(hwmgr); 305 vega10_initialize_power_tune_defaults(hwmgr);
306 306
@@ -365,6 +365,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
365 FEATURE_FAN_CONTROL_BIT; 365 FEATURE_FAN_CONTROL_BIT;
366 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT; 366 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
367 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; 367 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
368 data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
368 369
369 if (!data->registry_data.prefetcher_dpm_key_disabled) 370 if (!data->registry_data.prefetcher_dpm_key_disabled)
370 data->smu_features[GNLD_DPM_PREFETCHER].supported = true; 371 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
@@ -434,6 +435,15 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
434 if (data->registry_data.didt_support) 435 if (data->registry_data.didt_support)
435 data->smu_features[GNLD_DIDT].supported = true; 436 data->smu_features[GNLD_DIDT].supported = true;
436 437
438 hw_revision = adev->pdev->revision;
439 sub_vendor_id = adev->pdev->subsystem_vendor;
440
441 if ((hwmgr->chip_id == 0x6862 ||
442 hwmgr->chip_id == 0x6861 ||
443 hwmgr->chip_id == 0x6868) &&
444 (hw_revision == 0) &&
445 (sub_vendor_id != 0x1002))
446 data->smu_features[GNLD_PCC_LIMIT].supported = true;
437} 447}
438 448
439#ifdef PPLIB_VEGA10_EVV_SUPPORT 449#ifdef PPLIB_VEGA10_EVV_SUPPORT
@@ -747,7 +757,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
747 struct vega10_hwmgr *data; 757 struct vega10_hwmgr *data;
748 uint32_t config_telemetry = 0; 758 uint32_t config_telemetry = 0;
749 struct pp_atomfwctrl_voltage_table vol_table; 759 struct pp_atomfwctrl_voltage_table vol_table;
750 struct cgs_system_info sys_info = {0}; 760 struct amdgpu_device *adev = hwmgr->adev;
751 uint32_t reg; 761 uint32_t reg;
752 762
753 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); 763 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
@@ -756,10 +766,12 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
756 766
757 hwmgr->backend = data; 767 hwmgr->backend = data;
758 768
759 vega10_set_default_registry_data(hwmgr); 769 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
770 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
771 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
760 772
773 vega10_set_default_registry_data(hwmgr);
761 data->disable_dpm_mask = 0xff; 774 data->disable_dpm_mask = 0xff;
762 data->workload_mask = 0xff;
763 775
764 /* need to set voltage control types before EVV patching */ 776 /* need to set voltage control types before EVV patching */
765 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE; 777 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
@@ -837,10 +849,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
837 hwmgr->platform_descriptor.clockStep.engineClock = 500; 849 hwmgr->platform_descriptor.clockStep.engineClock = 500;
838 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 850 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
839 851
840 sys_info.size = sizeof(struct cgs_system_info); 852 data->total_active_cus = adev->gfx.cu_info.number;
841 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
842 result = cgs_query_system_info(hwmgr->device, &sys_info);
843 data->total_active_cus = sys_info.value;
844 /* Setup default Overdrive Fan control settings */ 853 /* Setup default Overdrive Fan control settings */
845 data->odn_fan_table.target_fan_speed = 854 data->odn_fan_table.target_fan_speed =
846 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; 855 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
@@ -921,18 +930,9 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
921 "Failed to set up led dpm config!", 930 "Failed to set up led dpm config!",
922 return -EINVAL); 931 return -EINVAL);
923 932
924 return 0; 933 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
925}
926
927static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
928{
929 uint32_t features_enabled;
930 934
931 if (!vega10_get_smc_features(hwmgr, &features_enabled)) { 935 return 0;
932 if (features_enabled & SMC_DPM_FEATURES)
933 return true;
934 }
935 return false;
936} 936}
937 937
938/** 938/**
@@ -1380,14 +1380,12 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1380 1380
1381 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || 1381 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
1382 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { 1382 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
1383 data->odn_dpm_table.odn_core_clock_dpm_levels. 1383 data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl =
1384 number_of_performance_levels = data->dpm_table.gfx_table.count; 1384 data->dpm_table.gfx_table.count;
1385 for (i = 0; i < data->dpm_table.gfx_table.count; i++) { 1385 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1386 data->odn_dpm_table.odn_core_clock_dpm_levels. 1386 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock =
1387 performance_level_entries[i].clock =
1388 data->dpm_table.gfx_table.dpm_levels[i].value; 1387 data->dpm_table.gfx_table.dpm_levels[i].value;
1389 data->odn_dpm_table.odn_core_clock_dpm_levels. 1388 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true;
1390 performance_level_entries[i].enabled = true;
1391 } 1389 }
1392 1390
1393 data->odn_dpm_table.vdd_dependency_on_sclk.count = 1391 data->odn_dpm_table.vdd_dependency_on_sclk.count =
@@ -1403,14 +1401,12 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1403 dep_gfx_table->entries[i].cks_voffset; 1401 dep_gfx_table->entries[i].cks_voffset;
1404 } 1402 }
1405 1403
1406 data->odn_dpm_table.odn_memory_clock_dpm_levels. 1404 data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl =
1407 number_of_performance_levels = data->dpm_table.mem_table.count; 1405 data->dpm_table.mem_table.count;
1408 for (i = 0; i < data->dpm_table.mem_table.count; i++) { 1406 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1409 data->odn_dpm_table.odn_memory_clock_dpm_levels. 1407 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock =
1410 performance_level_entries[i].clock =
1411 data->dpm_table.mem_table.dpm_levels[i].value; 1408 data->dpm_table.mem_table.dpm_levels[i].value;
1412 data->odn_dpm_table.odn_memory_clock_dpm_levels. 1409 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true;
1413 performance_level_entries[i].enabled = true;
1414 } 1410 }
1415 1411
1416 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count; 1412 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
@@ -2411,34 +2407,6 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2411 return result; 2407 return result;
2412} 2408}
2413 2409
2414static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2415{
2416 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2417 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2418 uint32_t min_level;
2419
2420 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2421 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2422
2423 /* Optimize compute power profile: Use only highest
2424 * 2 power levels (if more than 2 are available)
2425 */
2426 if (dpm_table->count > 2)
2427 min_level = dpm_table->count - 2;
2428 else if (dpm_table->count == 2)
2429 min_level = 1;
2430 else
2431 min_level = 0;
2432
2433 hwmgr->default_compute_power_profile.min_sclk =
2434 dpm_table->dpm_levels[min_level].value;
2435
2436 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2437 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2438
2439 return 0;
2440}
2441
2442/** 2410/**
2443* Initializes the SMC table and uploads it 2411* Initializes the SMC table and uploads it
2444* 2412*
@@ -2582,7 +2550,6 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2582 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!", 2550 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2583 return result); 2551 return result);
2584 vega10_acg_enable(hwmgr); 2552 vega10_acg_enable(hwmgr);
2585 vega10_save_default_power_profile(hwmgr);
2586 2553
2587 return 0; 2554 return 0;
2588} 2555}
@@ -2859,34 +2826,39 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2859 return 0; 2826 return 0;
2860} 2827}
2861 2828
2862static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 2829static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2863{ 2830{
2864 struct vega10_hwmgr *data = 2831 struct vega10_hwmgr *data =
2865 (struct vega10_hwmgr *)(hwmgr->backend); 2832 (struct vega10_hwmgr *)(hwmgr->backend);
2866 int tmp_result, result = 0;
2867 2833
2868 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr, 2834 if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2869 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); 2835 if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2870 PP_ASSERT_WITH_CODE(!tmp_result, 2836 pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2871 "Failed to configure telemetry!", 2837 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2872 return tmp_result); 2838 enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2839 "Attempt to Enable PCC Limit feature Failed!",
2840 return -EINVAL);
2841 data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2842 }
2873 2843
2874 smum_send_msg_to_smc_with_parameter(hwmgr, 2844 return 0;
2875 PPSMC_MSG_NumOfDisplays, 0); 2845}
2876 2846
2877 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1; 2847static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2878 PP_ASSERT_WITH_CODE(!tmp_result, 2848{
2879 "DPM is already running right , skipping re-enablement!", 2849 struct vega10_hwmgr *data =
2880 return 0); 2850 (struct vega10_hwmgr *)(hwmgr->backend);
2851 int tmp_result, result = 0;
2852
2853 vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2881 2854
2882 if ((hwmgr->smu_version == 0x001c2c00) || 2855 if ((hwmgr->smu_version == 0x001c2c00) ||
2883 (hwmgr->smu_version == 0x001c2d00)) { 2856 (hwmgr->smu_version == 0x001c2d00))
2884 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr, 2857 smum_send_msg_to_smc_with_parameter(hwmgr,
2885 PPSMC_MSG_UpdatePkgPwrPidAlpha, 1); 2858 PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
2886 PP_ASSERT_WITH_CODE(!tmp_result, 2859
2887 "Failed to set package power PID!", 2860 smum_send_msg_to_smc_with_parameter(hwmgr,
2888 return tmp_result); 2861 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2889 }
2890 2862
2891 tmp_result = vega10_construct_voltage_tables(hwmgr); 2863 tmp_result = vega10_construct_voltage_tables(hwmgr);
2892 PP_ASSERT_WITH_CODE(!tmp_result, 2864 PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3162,16 +3134,19 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3162 minimum_clocks.memoryClock = stable_pstate_mclk; 3134 minimum_clocks.memoryClock = stable_pstate_mclk;
3163 } 3135 }
3164 3136
3165 disable_mclk_switching_for_frame_lock = phm_cap_enabled( 3137 disable_mclk_switching_for_frame_lock =
3166 hwmgr->platform_descriptor.platformCaps, 3138 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3167 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 3139 disable_mclk_switching_for_vr =
3168 disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); 3140 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3169 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); 3141 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3170 3142
3171 disable_mclk_switching = (info.display_count > 1) || 3143 if (info.display_count == 0)
3172 disable_mclk_switching_for_frame_lock || 3144 disable_mclk_switching = false;
3173 disable_mclk_switching_for_vr || 3145 else
3174 force_mclk_high; 3146 disable_mclk_switching = (info.display_count > 1) ||
3147 disable_mclk_switching_for_frame_lock ||
3148 disable_mclk_switching_for_vr ||
3149 force_mclk_high;
3175 3150
3176 sclk = vega10_ps->performance_levels[0].gfx_clock; 3151 sclk = vega10_ps->performance_levels[0].gfx_clock;
3177 mclk = vega10_ps->performance_levels[0].mem_clock; 3152 mclk = vega10_ps->performance_levels[0].mem_clock;
@@ -3348,11 +3323,9 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3348 dpm_count < dpm_table->gfx_table.count; 3323 dpm_count < dpm_table->gfx_table.count;
3349 dpm_count++) { 3324 dpm_count++) {
3350 dpm_table->gfx_table.dpm_levels[dpm_count].enabled = 3325 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3351 data->odn_dpm_table.odn_core_clock_dpm_levels. 3326 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled;
3352 performance_level_entries[dpm_count].enabled;
3353 dpm_table->gfx_table.dpm_levels[dpm_count].value = 3327 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3354 data->odn_dpm_table.odn_core_clock_dpm_levels. 3328 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock;
3355 performance_level_entries[dpm_count].clock;
3356 } 3329 }
3357 } 3330 }
3358 3331
@@ -3362,11 +3335,9 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3362 dpm_count < dpm_table->mem_table.count; 3335 dpm_count < dpm_table->mem_table.count;
3363 dpm_count++) { 3336 dpm_count++) {
3364 dpm_table->mem_table.dpm_levels[dpm_count].enabled = 3337 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3365 data->odn_dpm_table.odn_memory_clock_dpm_levels. 3338 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled;
3366 performance_level_entries[dpm_count].enabled;
3367 dpm_table->mem_table.dpm_levels[dpm_count].value = 3339 dpm_table->mem_table.dpm_levels[dpm_count].value =
3368 data->odn_dpm_table.odn_memory_clock_dpm_levels. 3340 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock;
3369 performance_level_entries[dpm_count].clock;
3370 } 3341 }
3371 } 3342 }
3372 3343
@@ -3398,8 +3369,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3398 dpm_table-> 3369 dpm_table->
3399 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. 3370 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3400 value = sclk; 3371 value = sclk;
3401 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) || 3372 if (hwmgr->od_enabled) {
3402 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
3403 /* Need to do calculation based on the golden DPM table 3373 /* Need to do calculation based on the golden DPM table
3404 * as the Heatmap GPU Clock axis is also based on 3374 * as the Heatmap GPU Clock axis is also based on
3405 * the default values 3375 * the default values
@@ -3453,9 +3423,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3453 mem_table.dpm_levels[dpm_table->mem_table.count - 1]. 3423 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3454 value = mclk; 3424 value = mclk;
3455 3425
3456 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) || 3426 if (hwmgr->od_enabled) {
3457 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
3458
3459 PP_ASSERT_WITH_CODE( 3427 PP_ASSERT_WITH_CODE(
3460 golden_dpm_table->mem_table.dpm_levels 3428 golden_dpm_table->mem_table.dpm_levels
3461 [golden_dpm_table->mem_table.count - 1].value, 3429 [golden_dpm_table->mem_table.count - 1].value,
@@ -3643,12 +3611,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3643 if (!data->registry_data.sclk_dpm_key_disabled) { 3611 if (!data->registry_data.sclk_dpm_key_disabled) {
3644 if (data->smc_state_table.gfx_boot_level != 3612 if (data->smc_state_table.gfx_boot_level !=
3645 data->dpm_table.gfx_table.dpm_state.soft_min_level) { 3613 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3646 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( 3614 smum_send_msg_to_smc_with_parameter(hwmgr,
3647 hwmgr,
3648 PPSMC_MSG_SetSoftMinGfxclkByIndex, 3615 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3649 data->smc_state_table.gfx_boot_level), 3616 data->smc_state_table.gfx_boot_level);
3650 "Failed to set soft min sclk index!",
3651 return -EINVAL);
3652 data->dpm_table.gfx_table.dpm_state.soft_min_level = 3617 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3653 data->smc_state_table.gfx_boot_level; 3618 data->smc_state_table.gfx_boot_level;
3654 } 3619 }
@@ -3659,19 +3624,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3659 data->dpm_table.mem_table.dpm_state.soft_min_level) { 3624 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3660 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) { 3625 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3661 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); 3626 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3662 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( 3627 smum_send_msg_to_smc_with_parameter(hwmgr,
3663 hwmgr,
3664 PPSMC_MSG_SetSoftMinSocclkByIndex, 3628 PPSMC_MSG_SetSoftMinSocclkByIndex,
3665 socclk_idx), 3629 socclk_idx);
3666 "Failed to set soft min uclk index!",
3667 return -EINVAL);
3668 } else { 3630 } else {
3669 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( 3631 smum_send_msg_to_smc_with_parameter(hwmgr,
3670 hwmgr,
3671 PPSMC_MSG_SetSoftMinUclkByIndex, 3632 PPSMC_MSG_SetSoftMinUclkByIndex,
3672 data->smc_state_table.mem_boot_level), 3633 data->smc_state_table.mem_boot_level);
3673 "Failed to set soft min uclk index!",
3674 return -EINVAL);
3675 } 3634 }
3676 data->dpm_table.mem_table.dpm_state.soft_min_level = 3635 data->dpm_table.mem_table.dpm_state.soft_min_level =
3677 data->smc_state_table.mem_boot_level; 3636 data->smc_state_table.mem_boot_level;
@@ -3690,13 +3649,10 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3690 3649
3691 if (!data->registry_data.sclk_dpm_key_disabled) { 3650 if (!data->registry_data.sclk_dpm_key_disabled) {
3692 if (data->smc_state_table.gfx_max_level != 3651 if (data->smc_state_table.gfx_max_level !=
3693 data->dpm_table.gfx_table.dpm_state.soft_max_level) { 3652 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3694 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( 3653 smum_send_msg_to_smc_with_parameter(hwmgr,
3695 hwmgr,
3696 PPSMC_MSG_SetSoftMaxGfxclkByIndex, 3654 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3697 data->smc_state_table.gfx_max_level), 3655 data->smc_state_table.gfx_max_level);
3698 "Failed to set soft max sclk index!",
3699 return -EINVAL);
3700 data->dpm_table.gfx_table.dpm_state.soft_max_level = 3656 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3701 data->smc_state_table.gfx_max_level; 3657 data->smc_state_table.gfx_max_level;
3702 } 3658 }
@@ -3704,13 +3660,10 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3704 3660
3705 if (!data->registry_data.mclk_dpm_key_disabled) { 3661 if (!data->registry_data.mclk_dpm_key_disabled) {
3706 if (data->smc_state_table.mem_max_level != 3662 if (data->smc_state_table.mem_max_level !=
3707 data->dpm_table.mem_table.dpm_state.soft_max_level) { 3663 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3708 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( 3664 smum_send_msg_to_smc_with_parameter(hwmgr,
3709 hwmgr, 3665 PPSMC_MSG_SetSoftMaxUclkByIndex,
3710 PPSMC_MSG_SetSoftMaxUclkByIndex, 3666 data->smc_state_table.mem_max_level);
3711 data->smc_state_table.mem_max_level),
3712 "Failed to set soft max mclk index!",
3713 return -EINVAL);
3714 data->dpm_table.mem_table.dpm_state.soft_max_level = 3667 data->dpm_table.mem_table.dpm_state.soft_max_level =
3715 data->smc_state_table.mem_max_level; 3668 data->smc_state_table.mem_max_level;
3716 } 3669 }
@@ -3780,7 +3733,6 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3780{ 3733{
3781 struct vega10_hwmgr *data = 3734 struct vega10_hwmgr *data =
3782 (struct vega10_hwmgr *)(hwmgr->backend); 3735 (struct vega10_hwmgr *)(hwmgr->backend);
3783 int result = 0;
3784 uint32_t low_sclk_interrupt_threshold = 0; 3736 uint32_t low_sclk_interrupt_threshold = 0;
3785 3737
3786 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) && 3738 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
@@ -3792,12 +3744,12 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3792 cpu_to_le32(low_sclk_interrupt_threshold); 3744 cpu_to_le32(low_sclk_interrupt_threshold);
3793 3745
3794 /* This message will also enable SmcToHost Interrupt */ 3746 /* This message will also enable SmcToHost Interrupt */
3795 result = smum_send_msg_to_smc_with_parameter(hwmgr, 3747 smum_send_msg_to_smc_with_parameter(hwmgr,
3796 PPSMC_MSG_SetLowGfxclkInterruptThreshold, 3748 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3797 (uint32_t)low_sclk_interrupt_threshold); 3749 (uint32_t)low_sclk_interrupt_threshold);
3798 } 3750 }
3799 3751
3800 return result; 3752 return 0;
3801} 3753}
3802 3754
3803static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, 3755static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
@@ -3888,13 +3840,11 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3888{ 3840{
3889 uint32_t value; 3841 uint32_t value;
3890 3842
3891 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 3843 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3892 PPSMC_MSG_GetCurrPkgPwr),
3893 "Failed to get current package power!",
3894 return -EINVAL);
3895
3896 vega10_read_arg_from_smc(hwmgr, &value); 3844 vega10_read_arg_from_smc(hwmgr, &value);
3845
3897 /* power value is an integer */ 3846 /* power value is an integer */
3847 memset(query, 0, sizeof *query);
3898 query->average_gpu_power = value << 8; 3848 query->average_gpu_power = value << 8;
3899 3849
3900 return 0; 3850 return 0;
@@ -3907,31 +3857,34 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3907 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 3857 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3908 struct vega10_dpm_table *dpm_table = &data->dpm_table; 3858 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3909 int ret = 0; 3859 int ret = 0;
3860 uint32_t reg, val_vid;
3910 3861
3911 switch (idx) { 3862 switch (idx) {
3912 case AMDGPU_PP_SENSOR_GFX_SCLK: 3863 case AMDGPU_PP_SENSOR_GFX_SCLK:
3913 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); 3864 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3914 if (!ret) { 3865 vega10_read_arg_from_smc(hwmgr, &sclk_idx);
3915 vega10_read_arg_from_smc(hwmgr, &sclk_idx); 3866 if (sclk_idx < dpm_table->gfx_table.count) {
3916 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value; 3867 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3917 *size = 4; 3868 *size = 4;
3869 } else {
3870 ret = -EINVAL;
3918 } 3871 }
3919 break; 3872 break;
3920 case AMDGPU_PP_SENSOR_GFX_MCLK: 3873 case AMDGPU_PP_SENSOR_GFX_MCLK:
3921 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); 3874 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3922 if (!ret) { 3875 vega10_read_arg_from_smc(hwmgr, &mclk_idx);
3923 vega10_read_arg_from_smc(hwmgr, &mclk_idx); 3876 if (mclk_idx < dpm_table->mem_table.count) {
3924 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; 3877 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3925 *size = 4; 3878 *size = 4;
3879 } else {
3880 ret = -EINVAL;
3926 } 3881 }
3927 break; 3882 break;
3928 case AMDGPU_PP_SENSOR_GPU_LOAD: 3883 case AMDGPU_PP_SENSOR_GPU_LOAD:
3929 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); 3884 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3930 if (!ret) { 3885 vega10_read_arg_from_smc(hwmgr, &activity_percent);
3931 vega10_read_arg_from_smc(hwmgr, &activity_percent); 3886 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3932 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; 3887 *size = 4;
3933 *size = 4;
3934 }
3935 break; 3888 break;
3936 case AMDGPU_PP_SENSOR_GPU_TEMP: 3889 case AMDGPU_PP_SENSOR_GPU_TEMP:
3937 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr); 3890 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
@@ -3953,17 +3906,27 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3953 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); 3906 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3954 } 3907 }
3955 break; 3908 break;
3909 case AMDGPU_PP_SENSOR_VDDGFX:
3910 reg = soc15_get_register_offset(SMUIO_HWID, 0,
3911 mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX,
3912 mmSMUSVI0_PLANE0_CURRENTVID);
3913 val_vid = (cgs_read_register(hwmgr->device, reg) &
3914 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3915 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3916 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3917 return 0;
3956 default: 3918 default:
3957 ret = -EINVAL; 3919 ret = -EINVAL;
3958 break; 3920 break;
3959 } 3921 }
3922
3960 return ret; 3923 return ret;
3961} 3924}
3962 3925
3963static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, 3926static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3964 bool has_disp) 3927 bool has_disp)
3965{ 3928{
3966 return smum_send_msg_to_smc_with_parameter(hwmgr, 3929 smum_send_msg_to_smc_with_parameter(hwmgr,
3967 PPSMC_MSG_SetUclkFastSwitch, 3930 PPSMC_MSG_SetUclkFastSwitch,
3968 has_disp ? 0 : 1); 3931 has_disp ? 0 : 1);
3969} 3932}
@@ -3998,7 +3961,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3998 3961
3999 if (!result) { 3962 if (!result) {
4000 clk_request = (clk_freq << 16) | clk_select; 3963 clk_request = (clk_freq << 16) | clk_select;
4001 result = smum_send_msg_to_smc_with_parameter(hwmgr, 3964 smum_send_msg_to_smc_with_parameter(hwmgr,
4002 PPSMC_MSG_RequestDisplayClockByFreq, 3965 PPSMC_MSG_RequestDisplayClockByFreq,
4003 clk_request); 3966 clk_request);
4004 } 3967 }
@@ -4067,10 +4030,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
4067 clock_req.clock_type = amd_pp_dcef_clock; 4030 clock_req.clock_type = amd_pp_dcef_clock;
4068 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; 4031 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
4069 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { 4032 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
4070 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( 4033 smum_send_msg_to_smc_with_parameter(
4071 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 4034 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
4072 min_clocks.dcefClockInSR /100), 4035 min_clocks.dcefClockInSR / 100);
4073 "Attempt to set divider for DCEFCLK Failed!",);
4074 } else { 4036 } else {
4075 pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); 4037 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
4076 } 4038 }
@@ -4169,6 +4131,8 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
4169 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL; 4131 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
4170 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL; 4132 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
4171 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL; 4133 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
4134 hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
4135 hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
4172 } 4136 }
4173 4137
4174 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 4138 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -4210,6 +4174,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4210 uint32_t mclk_mask = 0; 4174 uint32_t mclk_mask = 0;
4211 uint32_t soc_mask = 0; 4175 uint32_t soc_mask = 0;
4212 4176
4177 if (hwmgr->pstate_sclk == 0)
4178 vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4179
4213 switch (level) { 4180 switch (level) {
4214 case AMD_DPM_FORCED_LEVEL_HIGH: 4181 case AMD_DPM_FORCED_LEVEL_HIGH:
4215 ret = vega10_force_dpm_highest(hwmgr); 4182 ret = vega10_force_dpm_highest(hwmgr);
@@ -4242,6 +4209,7 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4242 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 4209 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4243 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); 4210 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4244 } 4211 }
4212
4245 return ret; 4213 return ret;
4246} 4214}
4247 4215
@@ -4488,26 +4456,11 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4488 enum pp_clock_type type, uint32_t mask) 4456 enum pp_clock_type type, uint32_t mask)
4489{ 4457{
4490 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 4458 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4491 int i;
4492
4493 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
4494 AMD_DPM_FORCED_LEVEL_LOW |
4495 AMD_DPM_FORCED_LEVEL_HIGH))
4496 return -EINVAL;
4497 4459
4498 switch (type) { 4460 switch (type) {
4499 case PP_SCLK: 4461 case PP_SCLK:
4500 for (i = 0; i < 32; i++) { 4462 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
4501 if (mask & (1 << i)) 4463 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
4502 break;
4503 }
4504 data->smc_state_table.gfx_boot_level = i;
4505
4506 for (i = 31; i >= 0; i--) {
4507 if (mask & (1 << i))
4508 break;
4509 }
4510 data->smc_state_table.gfx_max_level = i;
4511 4464
4512 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4465 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4513 "Failed to upload boot level to lowest!", 4466 "Failed to upload boot level to lowest!",
@@ -4519,17 +4472,8 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4519 break; 4472 break;
4520 4473
4521 case PP_MCLK: 4474 case PP_MCLK:
4522 for (i = 0; i < 32; i++) { 4475 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
4523 if (mask & (1 << i)) 4476 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
4524 break;
4525 }
4526 data->smc_state_table.mem_boot_level = i;
4527
4528 for (i = 31; i >= 0; i--) {
4529 if (mask & (1 << i))
4530 break;
4531 }
4532 data->smc_state_table.mem_max_level = i;
4533 4477
4534 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4478 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4535 "Failed to upload boot level to lowest!", 4479 "Failed to upload boot level to lowest!",
@@ -4563,14 +4507,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4563 if (data->registry_data.sclk_dpm_key_disabled) 4507 if (data->registry_data.sclk_dpm_key_disabled)
4564 break; 4508 break;
4565 4509
4566 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 4510 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
4567 PPSMC_MSG_GetCurrentGfxclkIndex), 4511 vega10_read_arg_from_smc(hwmgr, &now);
4568 "Attempt to get current sclk index Failed!",
4569 return -1);
4570 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4571 &now),
4572 "Attempt to read sclk index Failed!",
4573 return -1);
4574 4512
4575 for (i = 0; i < sclk_table->count; i++) 4513 for (i = 0; i < sclk_table->count; i++)
4576 size += sprintf(buf + size, "%d: %uMhz %s\n", 4514 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4581,14 +4519,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4581 if (data->registry_data.mclk_dpm_key_disabled) 4519 if (data->registry_data.mclk_dpm_key_disabled)
4582 break; 4520 break;
4583 4521
4584 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 4522 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
4585 PPSMC_MSG_GetCurrentUclkIndex), 4523 vega10_read_arg_from_smc(hwmgr, &now);
4586 "Attempt to get current mclk index Failed!",
4587 return -1);
4588 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4589 &now),
4590 "Attempt to read mclk index Failed!",
4591 return -1);
4592 4524
4593 for (i = 0; i < mclk_table->count; i++) 4525 for (i = 0; i < mclk_table->count; i++)
4594 size += sprintf(buf + size, "%d: %uMhz %s\n", 4526 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4596,14 +4528,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4596 (i == now) ? "*" : ""); 4528 (i == now) ? "*" : "");
4597 break; 4529 break;
4598 case PP_PCIE: 4530 case PP_PCIE:
4599 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 4531 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
4600 PPSMC_MSG_GetCurrentLinkIndex), 4532 vega10_read_arg_from_smc(hwmgr, &now);
4601 "Attempt to get current mclk index Failed!",
4602 return -1);
4603 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4604 &now),
4605 "Attempt to read mclk index Failed!",
4606 return -1);
4607 4533
4608 for (i = 0; i < pcie_table->count; i++) 4534 for (i = 0; i < pcie_table->count; i++)
4609 size += sprintf(buf + size, "%d: %s %s\n", i, 4535 size += sprintf(buf + size, "%d: %s %s\n", i,
@@ -4744,11 +4670,6 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4744{ 4670{
4745 int tmp_result, result = 0; 4671 int tmp_result, result = 0;
4746 4672
4747 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4748 PP_ASSERT_WITH_CODE(tmp_result == 0,
4749 "DPM is not running right now, no need to disable DPM!",
4750 return 0);
4751
4752 if (PP_CAP(PHM_PlatformCaps_ThermalController)) 4673 if (PP_CAP(PHM_PlatformCaps_ThermalController))
4753 vega10_disable_thermal_protection(hwmgr); 4674 vega10_disable_thermal_protection(hwmgr);
4754 4675
@@ -4779,6 +4700,8 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4779 tmp_result = vega10_acg_disable(hwmgr); 4700 tmp_result = vega10_acg_disable(hwmgr);
4780 PP_ASSERT_WITH_CODE((tmp_result == 0), 4701 PP_ASSERT_WITH_CODE((tmp_result == 0),
4781 "Failed to disable acg!", result = tmp_result); 4702 "Failed to disable acg!", result = tmp_result);
4703
4704 vega10_enable_disable_PCC_limit_feature(hwmgr, false);
4782 return result; 4705 return result;
4783} 4706}
4784 4707
@@ -4796,68 +4719,6 @@ static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4796 return result; 4719 return result;
4797} 4720}
4798 4721
4799static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4800 uint32_t *sclk_idx, uint32_t *mclk_idx,
4801 uint32_t min_sclk, uint32_t min_mclk)
4802{
4803 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4804 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4805 uint32_t i;
4806
4807 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4808 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4809 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4810 *sclk_idx = i;
4811 break;
4812 }
4813 }
4814
4815 for (i = 0; i < dpm_table->mem_table.count; i++) {
4816 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4817 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4818 *mclk_idx = i;
4819 break;
4820 }
4821 }
4822}
4823
4824static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4825 struct amd_pp_profile *request)
4826{
4827 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4828 uint32_t sclk_idx = ~0, mclk_idx = ~0;
4829
4830 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4831 return -EINVAL;
4832
4833 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4834 request->min_sclk, request->min_mclk);
4835
4836 if (sclk_idx != ~0) {
4837 if (!data->registry_data.sclk_dpm_key_disabled)
4838 PP_ASSERT_WITH_CODE(
4839 !smum_send_msg_to_smc_with_parameter(
4840 hwmgr,
4841 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4842 sclk_idx),
4843 "Failed to set soft min sclk index!",
4844 return -EINVAL);
4845 }
4846
4847 if (mclk_idx != ~0) {
4848 if (!data->registry_data.mclk_dpm_key_disabled)
4849 PP_ASSERT_WITH_CODE(
4850 !smum_send_msg_to_smc_with_parameter(
4851 hwmgr,
4852 PPSMC_MSG_SetSoftMinUclkByIndex,
4853 mclk_idx),
4854 "Failed to set soft min mclk index!",
4855 return -EINVAL);
4856 }
4857
4858 return 0;
4859}
4860
4861static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr) 4722static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4862{ 4723{
4863 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 4724 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -4988,6 +4849,20 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4988 return 0; 4849 return 0;
4989} 4850}
4990 4851
4852static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4853 struct PP_TemperatureRange *thermal_data)
4854{
4855 struct phm_ppt_v2_information *table_info =
4856 (struct phm_ppt_v2_information *)hwmgr->pptable;
4857
4858 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4859
4860 thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
4861 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4862
4863 return 0;
4864}
4865
4991static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr, 4866static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
4992 const void *info) 4867 const void *info)
4993{ 4868{
@@ -4999,12 +4874,12 @@ static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
4999 hwmgr->thermal_controller.ucType == 4874 hwmgr->thermal_controller.ucType ==
5000 ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { 4875 ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
5001 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, 4876 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5002 0xf, /* AMDGPU_IH_CLIENTID_THM */ 4877 SOC15_IH_CLIENTID_THM,
5003 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr), 4878 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
5004 "Failed to register high thermal interrupt!", 4879 "Failed to register high thermal interrupt!",
5005 return -EINVAL); 4880 return -EINVAL);
5006 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, 4881 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5007 0xf, /* AMDGPU_IH_CLIENTID_THM */ 4882 SOC15_IH_CLIENTID_THM,
5008 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr), 4883 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
5009 "Failed to register low thermal interrupt!", 4884 "Failed to register low thermal interrupt!",
5010 return -EINVAL); 4885 return -EINVAL);
@@ -5012,7 +4887,7 @@ static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
5012 4887
5013 /* Register CTF(GPIO_19) interrupt */ 4888 /* Register CTF(GPIO_19) interrupt */
5014 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, 4889 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5015 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */ 4890 SOC15_IH_CLIENTID_ROM_SMUIO,
5016 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr), 4891 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
5017 "Failed to register CTF thermal interrupt!", 4892 "Failed to register CTF thermal interrupt!",
5018 return -EINVAL); 4893 return -EINVAL);
@@ -5020,6 +4895,77 @@ static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
5020 return 0; 4895 return 0;
5021} 4896}
5022 4897
4898static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4899{
4900 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4901 uint32_t i, size = 0;
4902 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
4903 {90, 60, 0, 0,},
4904 {70, 60, 0, 0,},
4905 {70, 90, 0, 0,},
4906 {30, 60, 0, 6,},
4907 };
4908 static const char *profile_name[6] = {"3D_FULL_SCREEN",
4909 "POWER_SAVING",
4910 "VIDEO",
4911 "VR",
4912 "COMPUTE",
4913 "CUSTOM"};
4914 static const char *title[6] = {"NUM",
4915 "MODE_NAME",
4916 "BUSY_SET_POINT",
4917 "FPS",
4918 "USE_RLC_BUSY",
4919 "MIN_ACTIVE_LEVEL"};
4920
4921 if (!buf)
4922 return -EINVAL;
4923
4924 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
4925 title[1], title[2], title[3], title[4], title[5]);
4926
4927 for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
4928 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
4929 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4930 profile_mode_setting[i][0], profile_mode_setting[i][1],
4931 profile_mode_setting[i][2], profile_mode_setting[i][3]);
4932 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
4933 profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4934 data->custom_profile_mode[0], data->custom_profile_mode[1],
4935 data->custom_profile_mode[2], data->custom_profile_mode[3]);
4936 return size;
4937}
4938
4939static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4940{
4941 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4942 uint8_t busy_set_point;
4943 uint8_t FPS;
4944 uint8_t use_rlc_busy;
4945 uint8_t min_active_level;
4946
4947 hwmgr->power_profile_mode = input[size];
4948
4949 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4950 1<<hwmgr->power_profile_mode);
4951
4952 if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4953 if (size == 0 || size > 4)
4954 return -EINVAL;
4955
4956 data->custom_profile_mode[0] = busy_set_point = input[0];
4957 data->custom_profile_mode[1] = FPS = input[1];
4958 data->custom_profile_mode[2] = use_rlc_busy = input[2];
4959 data->custom_profile_mode[3] = min_active_level = input[3];
4960 smum_send_msg_to_smc_with_parameter(hwmgr,
4961 PPSMC_MSG_SetCustomGfxDpmParameters,
4962 busy_set_point | FPS<<8 |
4963 use_rlc_busy << 16 | min_active_level<<24);
4964 }
4965
4966 return 0;
4967}
4968
5023static const struct pp_hwmgr_func vega10_hwmgr_funcs = { 4969static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5024 .backend_init = vega10_hwmgr_backend_init, 4970 .backend_init = vega10_hwmgr_backend_init,
5025 .backend_fini = vega10_hwmgr_backend_fini, 4971 .backend_fini = vega10_hwmgr_backend_fini,
@@ -5038,7 +4984,6 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5038 .notify_smc_display_config_after_ps_adjustment = 4984 .notify_smc_display_config_after_ps_adjustment =
5039 vega10_notify_smc_display_config_after_ps_adjustment, 4985 vega10_notify_smc_display_config_after_ps_adjustment,
5040 .force_dpm_level = vega10_dpm_force_dpm_level, 4986 .force_dpm_level = vega10_dpm_force_dpm_level,
5041 .get_temperature = vega10_thermal_get_temperature,
5042 .stop_thermal_controller = vega10_thermal_stop_thermal_controller, 4987 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
5043 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info, 4988 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
5044 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent, 4989 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
@@ -5067,15 +5012,18 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5067 vega10_check_smc_update_required_for_display_configuration, 5012 vega10_check_smc_update_required_for_display_configuration,
5068 .power_off_asic = vega10_power_off_asic, 5013 .power_off_asic = vega10_power_off_asic,
5069 .disable_smc_firmware_ctf = vega10_thermal_disable_alert, 5014 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
5070 .set_power_profile_state = vega10_set_power_profile_state,
5071 .get_sclk_od = vega10_get_sclk_od, 5015 .get_sclk_od = vega10_get_sclk_od,
5072 .set_sclk_od = vega10_set_sclk_od, 5016 .set_sclk_od = vega10_set_sclk_od,
5073 .get_mclk_od = vega10_get_mclk_od, 5017 .get_mclk_od = vega10_get_mclk_od,
5074 .set_mclk_od = vega10_set_mclk_od, 5018 .set_mclk_od = vega10_set_mclk_od,
5075 .avfs_control = vega10_avfs_enable, 5019 .avfs_control = vega10_avfs_enable,
5076 .notify_cac_buffer_info = vega10_notify_cac_buffer_info, 5020 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
5021 .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
5077 .register_internal_thermal_interrupt = vega10_register_thermal_interrupt, 5022 .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
5078 .start_thermal_controller = vega10_start_thermal_controller, 5023 .start_thermal_controller = vega10_start_thermal_controller,
5024 .get_power_profile_mode = vega10_get_power_profile_mode,
5025 .set_power_profile_mode = vega10_set_power_profile_mode,
5026 .set_power_limit = vega10_set_power_limit,
5079}; 5027};
5080 5028
5081int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) 5029int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index e8507ff8dbb3..8f6c2cb962da 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -66,6 +66,7 @@ enum {
66 GNLD_FEATURE_FAST_PPT_BIT, 66 GNLD_FEATURE_FAST_PPT_BIT,
67 GNLD_DIDT, 67 GNLD_DIDT,
68 GNLD_ACG, 68 GNLD_ACG,
69 GNLD_PCC_LIMIT,
69 GNLD_FEATURES_MAX 70 GNLD_FEATURES_MAX
70}; 71};
71 72
@@ -189,12 +190,6 @@ struct vega10_vbios_boot_state {
189 uint32_t dcef_clock; 190 uint32_t dcef_clock;
190}; 191};
191 192
192#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
193#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
194#define DPMTABLE_UPDATE_SCLK 0x00000004
195#define DPMTABLE_UPDATE_MCLK 0x00000008
196#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
197
198struct vega10_smc_state_table { 193struct vega10_smc_state_table {
199 uint32_t soc_boot_level; 194 uint32_t soc_boot_level;
200 uint32_t gfx_boot_level; 195 uint32_t gfx_boot_level;
@@ -379,9 +374,6 @@ struct vega10_hwmgr {
379 /* ---- Overdrive next setting ---- */ 374 /* ---- Overdrive next setting ---- */
380 uint32_t apply_overdrive_next_settings_mask; 375 uint32_t apply_overdrive_next_settings_mask;
381 376
382 /* ---- Workload Mask ---- */
383 uint32_t workload_mask;
384
385 /* ---- SMU9 ---- */ 377 /* ---- SMU9 ---- */
386 struct smu_features smu_features[GNLD_FEATURES_MAX]; 378 struct smu_features smu_features[GNLD_FEATURES_MAX];
387 struct vega10_smc_state_table smc_state_table; 379 struct vega10_smc_state_table smc_state_table;
@@ -389,6 +381,7 @@ struct vega10_hwmgr {
389 uint32_t config_telemetry; 381 uint32_t config_telemetry;
390 uint32_t acg_loop_state; 382 uint32_t acg_loop_state;
391 uint32_t mem_channels; 383 uint32_t mem_channels;
384 uint8_t custom_profile_mode[4];
392}; 385};
393 386
394#define VEGA10_DPM2_NEAR_TDP_DEC 10 387#define VEGA10_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 598a194737a9..b1f74c7f0943 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -850,7 +850,6 @@ static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const
850static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) 850static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
851{ 851{
852 uint32_t data; 852 uint32_t data;
853 int result;
854 uint32_t en = (enable ? 1 : 0); 853 uint32_t en = (enable ? 1 : 0);
855 uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; 854 uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
856 855
@@ -924,24 +923,20 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
924 } 923 }
925 } 924 }
926 925
927 if (enable) { 926 /* For Vega10, SMC does not support any mask yet. */
928 /* For Vega10, SMC does not support any mask yet. */ 927 if (enable)
929 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); 928 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
930 PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!"); 929
931 }
932} 930}
933 931
934static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) 932static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
935{ 933{
936 int result; 934 int result;
937 uint32_t num_se = 0, count, data; 935 uint32_t num_se = 0, count, data;
938 struct cgs_system_info sys_info = {0}; 936 struct amdgpu_device *adev = hwmgr->adev;
939 uint32_t reg; 937 uint32_t reg;
940 938
941 sys_info.size = sizeof(struct cgs_system_info); 939 num_se = adev->gfx.config.max_shader_engines;
942 sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
943 if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
944 num_se = sys_info.value;
945 940
946 cgs_enter_safe_mode(hwmgr->device, true); 941 cgs_enter_safe_mode(hwmgr->device, true);
947 942
@@ -989,13 +984,10 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
989{ 984{
990 int result; 985 int result;
991 uint32_t num_se = 0, count, data; 986 uint32_t num_se = 0, count, data;
992 struct cgs_system_info sys_info = {0}; 987 struct amdgpu_device *adev = hwmgr->adev;
993 uint32_t reg; 988 uint32_t reg;
994 989
995 sys_info.size = sizeof(struct cgs_system_info); 990 num_se = adev->gfx.config.max_shader_engines;
996 sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
997 if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
998 num_se = sys_info.value;
999 991
1000 cgs_enter_safe_mode(hwmgr->device, true); 992 cgs_enter_safe_mode(hwmgr->device, true);
1001 993
@@ -1054,13 +1046,10 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
1054{ 1046{
1055 int result; 1047 int result;
1056 uint32_t num_se = 0, count, data; 1048 uint32_t num_se = 0, count, data;
1057 struct cgs_system_info sys_info = {0}; 1049 struct amdgpu_device *adev = hwmgr->adev;
1058 uint32_t reg; 1050 uint32_t reg;
1059 1051
1060 sys_info.size = sizeof(struct cgs_system_info); 1052 num_se = adev->gfx.config.max_shader_engines;
1061 sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
1062 if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
1063 num_se = sys_info.value;
1064 1053
1065 cgs_enter_safe_mode(hwmgr->device, true); 1054 cgs_enter_safe_mode(hwmgr->device, true);
1066 1055
@@ -1105,13 +1094,10 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1105 int result; 1094 int result;
1106 uint32_t num_se = 0; 1095 uint32_t num_se = 0;
1107 uint32_t count, data; 1096 uint32_t count, data;
1108 struct cgs_system_info sys_info = {0}; 1097 struct amdgpu_device *adev = hwmgr->adev;
1109 uint32_t reg; 1098 uint32_t reg;
1110 1099
1111 sys_info.size = sizeof(struct cgs_system_info); 1100 num_se = adev->gfx.config.max_shader_engines;
1112 sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
1113 if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
1114 num_se = sys_info.value;
1115 1101
1116 cgs_enter_safe_mode(hwmgr->device, true); 1102 cgs_enter_safe_mode(hwmgr->device, true);
1117 1103
@@ -1344,7 +1330,7 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
1344 (struct vega10_hwmgr *)(hwmgr->backend); 1330 (struct vega10_hwmgr *)(hwmgr->backend);
1345 1331
1346 if (data->registry_data.enable_pkg_pwr_tracking_feature) 1332 if (data->registry_data.enable_pkg_pwr_tracking_feature)
1347 return smum_send_msg_to_smc_with_parameter(hwmgr, 1333 smum_send_msg_to_smc_with_parameter(hwmgr,
1348 PPSMC_MSG_SetPptLimit, n); 1334 PPSMC_MSG_SetPptLimit, n);
1349 1335
1350 return 0; 1336 return 0;
@@ -1357,10 +1343,11 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
1357 struct phm_ppt_v2_information *table_info = 1343 struct phm_ppt_v2_information *table_info =
1358 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1344 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1359 struct phm_tdp_table *tdp_table = table_info->tdp_table; 1345 struct phm_tdp_table *tdp_table = table_info->tdp_table;
1360 uint32_t default_pwr_limit =
1361 (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
1362 int result = 0; 1346 int result = 0;
1363 1347
1348 hwmgr->default_power_limit = hwmgr->power_limit =
1349 (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
1350
1364 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { 1351 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1365 if (data->smu_features[GNLD_PPT].supported) 1352 if (data->smu_features[GNLD_PPT].supported)
1366 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 1353 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -1374,7 +1361,7 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
1374 "Attempt to enable PPT feature Failed!", 1361 "Attempt to enable PPT feature Failed!",
1375 data->smu_features[GNLD_TDC].supported = false); 1362 data->smu_features[GNLD_TDC].supported = false);
1376 1363
1377 result = vega10_set_power_limit(hwmgr, default_pwr_limit); 1364 result = vega10_set_power_limit(hwmgr, hwmgr->power_limit);
1378 PP_ASSERT_WITH_CODE(!result, 1365 PP_ASSERT_WITH_CODE(!result,
1379 "Failed to set Default Power Limit in SMC!", 1366 "Failed to set Default Power Limit in SMC!",
1380 return result); 1367 return result);
@@ -1405,24 +1392,24 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
1405 return 0; 1392 return 0;
1406} 1393}
1407 1394
1408static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, 1395static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
1409 uint32_t adjust_percent) 1396 uint32_t adjust_percent)
1410{ 1397{
1411 return smum_send_msg_to_smc_with_parameter(hwmgr, 1398 smum_send_msg_to_smc_with_parameter(hwmgr,
1412 PPSMC_MSG_OverDriveSetPercentage, adjust_percent); 1399 PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
1413} 1400}
1414 1401
1415int vega10_power_control_set_level(struct pp_hwmgr *hwmgr) 1402int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
1416{ 1403{
1417 int adjust_percent, result = 0; 1404 int adjust_percent;
1418 1405
1419 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { 1406 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1420 adjust_percent = 1407 adjust_percent =
1421 hwmgr->platform_descriptor.TDPAdjustmentPolarity ? 1408 hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
1422 hwmgr->platform_descriptor.TDPAdjustment : 1409 hwmgr->platform_descriptor.TDPAdjustment :
1423 (-1 * hwmgr->platform_descriptor.TDPAdjustment); 1410 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
1424 result = vega10_set_overdrive_target_percentage(hwmgr, 1411 vega10_set_overdrive_target_percentage(hwmgr,
1425 (uint32_t)adjust_percent); 1412 (uint32_t)adjust_percent);
1426 } 1413 }
1427 return result; 1414 return 0;
1428} 1415}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index f14c7611fad3..c61d0744860d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -267,10 +267,10 @@ static int init_over_drive_limits(
267 hwmgr->platform_descriptor.maxOverdriveVDDC = 0; 267 hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
268 hwmgr->platform_descriptor.overdriveVDDCStep = 0; 268 hwmgr->platform_descriptor.overdriveVDDCStep = 0;
269 269
270 if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 && 270 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 ||
271 hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) { 271 hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
272 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 272 hwmgr->od_enabled = false;
273 PHM_PlatformCaps_ACOverdriveSupport); 273 pr_debug("OverDrive feature not support by VBIOS\n");
274 } 274 }
275 275
276 return 0; 276 return 0;
@@ -688,9 +688,9 @@ static int get_dcefclk_voltage_dependency_table(
688 uint8_t num_entries; 688 uint8_t num_entries;
689 struct phm_ppt_v1_clock_voltage_dependency_table 689 struct phm_ppt_v1_clock_voltage_dependency_table
690 *clk_table; 690 *clk_table;
691 struct cgs_system_info sys_info = {0};
692 uint32_t dev_id; 691 uint32_t dev_id;
693 uint32_t rev_id; 692 uint32_t rev_id;
693 struct amdgpu_device *adev = hwmgr->adev;
694 694
695 PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0), 695 PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0),
696 "Invalid PowerPlay Table!", return -1); 696 "Invalid PowerPlay Table!", return -1);
@@ -701,15 +701,8 @@ static int get_dcefclk_voltage_dependency_table(
701 * This DPM level was added to support 3DPM monitors @ 4K120Hz 701 * This DPM level was added to support 3DPM monitors @ 4K120Hz
702 * 702 *
703 */ 703 */
704 sys_info.size = sizeof(struct cgs_system_info); 704 dev_id = adev->pdev->device;
705 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; 705 rev_id = adev->pdev->revision;
706 cgs_query_system_info(hwmgr->device, &sys_info);
707 dev_id = (uint32_t)sys_info.value;
708
709 sys_info.size = sizeof(struct cgs_system_info);
710 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
711 cgs_query_system_info(hwmgr->device, &sys_info);
712 rev_id = (uint32_t)sys_info.value;
713 706
714 if (dev_id == 0x6863 && rev_id == 0 && 707 if (dev_id == 0x6863 && rev_id == 0 &&
715 clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000) 708 clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index dc3761bcb9b6..fc2325e7f387 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -31,14 +31,8 @@
31 31
32static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) 32static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
33{ 33{
34 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 34 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
35 PPSMC_MSG_GetCurrentRpm), 35 vega10_read_arg_from_smc(hwmgr, current_rpm);
36 "Attempt to get current RPM from SMC Failed!",
37 return -1);
38 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
39 current_rpm),
40 "Attempt to read current RPM from SMC Failed!",
41 return -1);
42 return 0; 36 return 0;
43} 37}
44 38
@@ -386,9 +380,9 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
386static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 380static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
387 struct PP_TemperatureRange *range) 381 struct PP_TemperatureRange *range)
388{ 382{
389 uint32_t low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * 383 int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
390 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 384 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
391 uint32_t high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * 385 int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
392 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 386 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
393 uint32_t val, reg; 387 uint32_t val, reg;
394 388
@@ -409,7 +403,9 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
409 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 403 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
410 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 404 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
411 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 405 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
412 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 406 val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
407 (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
408 (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
413 409
414 cgs_write_register(hwmgr->device, reg, val); 410 cgs_write_register(hwmgr->device, reg, val);
415 411
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 152e70db4a81..fe3665965416 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -30,6 +30,6 @@
30#include "cgs_common.h" 30#include "cgs_common.h"
31#include "dm_pp_interface.h" 31#include "dm_pp_interface.h"
32#include "kgd_pp_interface.h" 32#include "kgd_pp_interface.h"
33 33#include "amdgpu.h"
34 34
35#endif /* _AMD_POWERPLAY_H_ */ 35#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 5716b937a6ad..b366a5bd2d81 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -358,6 +358,17 @@ struct phm_clocks {
358 uint32_t clock[MAX_NUM_CLOCKS]; 358 uint32_t clock[MAX_NUM_CLOCKS];
359}; 359};
360 360
361#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
362#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
363#define DPMTABLE_UPDATE_SCLK 0x00000004
364#define DPMTABLE_UPDATE_MCLK 0x00000008
365#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
366
367/* To determine if sclk and mclk are in overdrive state */
368#define SCLK_OVERDRIVE_ENABLED 0x00000001
369#define MCLK_OVERDRIVE_ENABLED 0x00000002
370#define VDDC_OVERDRIVE_ENABLED 0x00000010
371
361struct phm_odn_performance_level { 372struct phm_odn_performance_level {
362 uint32_t clock; 373 uint32_t clock;
363 uint32_t vddc; 374 uint32_t vddc;
@@ -368,9 +379,9 @@ struct phm_odn_clock_levels {
368 uint32_t size; 379 uint32_t size;
369 uint32_t options; 380 uint32_t options;
370 uint32_t flags; 381 uint32_t flags;
371 uint32_t number_of_performance_levels; 382 uint32_t num_of_pl;
372 /* variable-sized array, specify by ulNumberOfPerformanceLevels. */ 383 /* variable-sized array, specify by num_of_pl. */
373 struct phm_odn_performance_level performance_level_entries[8]; 384 struct phm_odn_performance_level entries[8];
374}; 385};
375 386
376extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); 387extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
@@ -393,7 +404,7 @@ extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_leve
393extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr); 404extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
394extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); 405extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
395extern int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info); 406extern int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info);
396extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range); 407extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr);
397extern int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr); 408extern int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr);
398extern bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr); 409extern bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr);
399 410
@@ -437,6 +448,5 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
437 448
438extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); 449extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
439extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr); 450extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
440extern int phm_reset_power_profile_state(struct pp_hwmgr *hwmgr);
441#endif /* _HARDWARE_MANAGER_H_ */ 451#endif /* _HARDWARE_MANAGER_H_ */
442 452
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 565fe0832f41..85b46ad68546 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -25,16 +25,14 @@
25 25
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include "amd_powerplay.h" 27#include "amd_powerplay.h"
28#include "pp_instance.h"
29#include "hardwaremanager.h" 28#include "hardwaremanager.h"
30#include "pp_power_source.h" 29#include "pp_power_source.h"
31#include "hwmgr_ppt.h" 30#include "hwmgr_ppt.h"
32#include "ppatomctrl.h" 31#include "ppatomctrl.h"
33#include "hwmgr_ppt.h" 32#include "hwmgr_ppt.h"
34#include "power_state.h" 33#include "power_state.h"
35#include "cgs_linux.h" 34#include "smu_helper.h"
36 35
37struct pp_instance;
38struct pp_hwmgr; 36struct pp_hwmgr;
39struct phm_fan_speed_info; 37struct phm_fan_speed_info;
40struct pp_atomctrl_voltage_table; 38struct pp_atomctrl_voltage_table;
@@ -42,6 +40,7 @@ struct pp_atomctrl_voltage_table;
42#define VOLTAGE_SCALE 4 40#define VOLTAGE_SCALE 4
43 41
44uint8_t convert_to_vid(uint16_t vddc); 42uint8_t convert_to_vid(uint16_t vddc);
43uint16_t convert_to_vddc(uint8_t vid);
45 44
46enum DISPLAY_GAP { 45enum DISPLAY_GAP {
47 DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ 46 DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
@@ -83,6 +82,7 @@ enum PP_FEATURE_MASK {
83 PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800, 82 PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
84 PP_SOCCLK_DPM_MASK = 0x1000, 83 PP_SOCCLK_DPM_MASK = 0x1000,
85 PP_DCEFCLK_DPM_MASK = 0x2000, 84 PP_DCEFCLK_DPM_MASK = 0x2000,
85 PP_OVERDRIVE_MASK = 0x4000,
86}; 86};
87 87
88enum PHM_BackEnd_Magic { 88enum PHM_BackEnd_Magic {
@@ -233,9 +233,9 @@ struct pp_smumgr_func {
233 uint32_t (*get_offsetof)(uint32_t type, uint32_t member); 233 uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
234 uint32_t (*get_mac_definition)(uint32_t value); 234 uint32_t (*get_mac_definition)(uint32_t value);
235 bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); 235 bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
236 int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
237 struct amd_pp_profile *request);
238 bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); 236 bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
237 int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
238 int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
239}; 239};
240 240
241struct pp_hwmgr_func { 241struct pp_hwmgr_func {
@@ -277,7 +277,6 @@ struct pp_hwmgr_func {
277 const uint32_t *msg_id); 277 const uint32_t *msg_id);
278 int (*set_max_fan_rpm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); 278 int (*set_max_fan_rpm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm);
279 int (*set_max_fan_pwm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); 279 int (*set_max_fan_pwm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm);
280 int (*get_temperature)(struct pp_hwmgr *hwmgr);
281 int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr); 280 int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr);
282 int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); 281 int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
283 void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); 282 void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
@@ -326,8 +325,6 @@ struct pp_hwmgr_func {
326 int (*get_mclk_od)(struct pp_hwmgr *hwmgr); 325 int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
327 int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); 326 int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
328 int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size); 327 int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size);
329 int (*set_power_profile_state)(struct pp_hwmgr *hwmgr,
330 struct amd_pp_profile *request);
331 int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable); 328 int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
332 int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr); 329 int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
333 int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); 330 int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
@@ -339,6 +336,15 @@ struct pp_hwmgr_func {
339 uint32_t mc_addr_low, 336 uint32_t mc_addr_low,
340 uint32_t mc_addr_hi, 337 uint32_t mc_addr_hi,
341 uint32_t size); 338 uint32_t size);
339 int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
340 struct PP_TemperatureRange *range);
341 int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
342 int (*set_power_profile_mode)(struct pp_hwmgr *hwmgr, long *input, uint32_t size);
343 int (*odn_edit_dpm_table)(struct pp_hwmgr *hwmgr,
344 enum PP_OD_DPM_TABLE_COMMAND type,
345 long *input, uint32_t size);
346 int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
347 int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
342}; 348};
343 349
344struct pp_table_func { 350struct pp_table_func {
@@ -608,7 +614,6 @@ struct phm_dynamic_state_info {
608 struct phm_ppm_table *ppm_parameter_table; 614 struct phm_ppm_table *ppm_parameter_table;
609 struct phm_cac_tdp_table *cac_dtp_table; 615 struct phm_cac_tdp_table *cac_dtp_table;
610 struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk; 616 struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk;
611 struct phm_vq_budgeting_table *vq_budgeting_table;
612}; 617};
613 618
614struct pp_fan_info { 619struct pp_fan_info {
@@ -689,10 +694,15 @@ enum PP_TABLE_VERSION {
689/** 694/**
690 * The main hardware manager structure. 695 * The main hardware manager structure.
691 */ 696 */
697#define Workload_Policy_Max 5
698
692struct pp_hwmgr { 699struct pp_hwmgr {
700 void *adev;
693 uint32_t chip_family; 701 uint32_t chip_family;
694 uint32_t chip_id; 702 uint32_t chip_id;
695 uint32_t smu_version; 703 uint32_t smu_version;
704 bool pm_en;
705 struct mutex smu_lock;
696 706
697 uint32_t pp_table_version; 707 uint32_t pp_table_version;
698 void *device; 708 void *device;
@@ -739,14 +749,19 @@ struct pp_hwmgr {
739 struct pp_power_state *uvd_ps; 749 struct pp_power_state *uvd_ps;
740 struct amd_pp_display_configuration display_config; 750 struct amd_pp_display_configuration display_config;
741 uint32_t feature_mask; 751 uint32_t feature_mask;
742 752 bool avfs_supported;
743 /* UMD Pstate */ 753 /* UMD Pstate */
744 struct amd_pp_profile gfx_power_profile;
745 struct amd_pp_profile compute_power_profile;
746 struct amd_pp_profile default_gfx_power_profile;
747 struct amd_pp_profile default_compute_power_profile;
748 enum amd_pp_profile_type current_power_profile;
749 bool en_umd_pstate; 754 bool en_umd_pstate;
755 uint32_t power_profile_mode;
756 uint32_t default_power_profile_mode;
757 uint32_t pstate_sclk;
758 uint32_t pstate_mclk;
759 bool od_enabled;
760 uint32_t power_limit;
761 uint32_t default_power_limit;
762 uint32_t workload_mask;
763 uint32_t workload_prority[Workload_Policy_Max];
764 uint32_t workload_setting[Workload_Policy_Max];
750}; 765};
751 766
752struct cgs_irq_src_funcs { 767struct cgs_irq_src_funcs {
@@ -754,166 +769,17 @@ struct cgs_irq_src_funcs {
754 cgs_irq_handler_func_t handler; 769 cgs_irq_handler_func_t handler;
755}; 770};
756 771
757extern int hwmgr_early_init(struct pp_instance *handle); 772extern int hwmgr_early_init(struct pp_hwmgr *hwmgr);
758extern int hwmgr_hw_init(struct pp_instance *handle); 773extern int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
759extern int hwmgr_hw_fini(struct pp_instance *handle); 774extern int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
760extern int hwmgr_hw_suspend(struct pp_instance *handle); 775extern int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
761extern int hwmgr_hw_resume(struct pp_instance *handle); 776extern int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
762extern int hwmgr_handle_task(struct pp_instance *handle, 777extern int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
763 enum amd_pp_task task_id, 778 enum amd_pp_task task_id,
764 void *input, void *output); 779 enum amd_pm_state_type *user_state);
765extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
766 uint32_t value, uint32_t mask);
767
768extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
769 uint32_t indirect_port,
770 uint32_t index,
771 uint32_t value,
772 uint32_t mask);
773
774extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
775 uint32_t index,
776 uint32_t value, uint32_t mask);
777extern int phm_wait_for_indirect_register_unequal(
778 struct pp_hwmgr *hwmgr,
779 uint32_t indirect_port, uint32_t index,
780 uint32_t value, uint32_t mask);
781
782
783extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
784extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
785extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
786
787extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
788extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
789extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
790extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
791extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
792extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
793extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
794extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
795extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
796 uint32_t voltage);
797extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
798extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
799extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
800extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
801 uint16_t virtual_voltage_id, int32_t *sclk);
802extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
803extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
804extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
805
806extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
807extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
808extern int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
809
810extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
811 uint32_t sclk, uint16_t id, uint16_t *voltage);
812
813#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
814
815#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
816#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
817
818#define PHM_SET_FIELD(origval, reg, field, fieldval) \
819 (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
820 (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
821
822#define PHM_GET_FIELD(value, reg, field) \
823 (((value) & PHM_FIELD_MASK(reg, field)) >> \
824 PHM_FIELD_SHIFT(reg, field))
825
826
827/* Operations on named fields. */
828
829#define PHM_READ_FIELD(device, reg, field) \
830 PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
831
832#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
833 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
834 reg, field)
835
836#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
837 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
838 reg, field)
839
840#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
841 cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
842 cgs_read_register(device, mm##reg), reg, field, fieldval))
843
844#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
845 cgs_write_ind_register(device, port, ix##reg, \
846 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
847 reg, field, fieldval))
848
849#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
850 cgs_write_ind_register(device, port, ix##reg, \
851 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
852 reg, field, fieldval))
853
854#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
855 phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
856 780
857 781
858#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ 782#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
859 PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
860
861#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
862 PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
863 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
864
865#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
866 phm_wait_for_indirect_register_unequal(hwmgr, \
867 mm##port##_INDEX, index, value, mask)
868
869#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
870 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
871
872#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
873 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
874 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
875 PHM_FIELD_MASK(reg, field) )
876
877
878#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
879 port, index, value, mask) \
880 phm_wait_for_indirect_register_unequal(hwmgr, \
881 mm##port##_INDEX_11, index, value, mask)
882
883#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
884 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
885
886#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
887 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
888 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
889 PHM_FIELD_MASK(reg, field))
890
891
892#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
893 port, index, value, mask) \
894 phm_wait_on_indirect_register(hwmgr, \
895 mm##port##_INDEX_11, index, value, mask)
896
897#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
898 PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
899
900#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
901 PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
902 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
903 PHM_FIELD_MASK(reg, field))
904
905#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
906 index, value, mask) \
907 phm_wait_for_register_unequal(hwmgr, \
908 index, value, mask)
909
910#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
911 PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
912 mm##reg, value, mask)
913 783
914#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
915 PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
916 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
917 PHM_FIELD_MASK(reg, field))
918 784
919#endif /* _HWMGR_H_ */ 785#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
deleted file mode 100644
index b8f4b73c322e..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
+++ /dev/null
@@ -1,412 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef POLARIS10_PP_SMC_H
25#define POLARIS10_PP_SMC_H
26
27
28#pragma pack(push, 1)
29
30#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
31
32#define PPSMC_SWSTATE_FLAG_DC 0x01
33#define PPSMC_SWSTATE_FLAG_UVD 0x02
34#define PPSMC_SWSTATE_FLAG_VCE 0x04
35
36#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
37#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
38#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
39
40#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
41#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
42#define PPSMC_SYSTEMFLAG_GDDR5 0x04
43
44#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
45
46#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
47#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
48
49#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
50#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
51
52#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
53#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
54
55
56#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
57#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
58#define PPSMC_DPM2FLAGS_OCP 0x04
59
60
61#define PPSMC_DISPLAY_WATERMARK_LOW 0
62#define PPSMC_DISPLAY_WATERMARK_HIGH 1
63
64
65#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
66#define PPSMC_STATEFLAG_POWERBOOST 0x02
67#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
68#define PPSMC_STATEFLAG_POWERSHIFT 0x08
69#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
70#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
71#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
72
73
74#define FDO_MODE_HARDWARE 0
75#define FDO_MODE_PIECE_WISE_LINEAR 1
76
77enum FAN_CONTROL {
78 FAN_CONTROL_FUZZY,
79 FAN_CONTROL_TABLE
80};
81
82
83#define PPSMC_Result_OK ((uint16_t)0x01)
84#define PPSMC_Result_NoMore ((uint16_t)0x02)
85
86#define PPSMC_Result_NotNow ((uint16_t)0x03)
87#define PPSMC_Result_Failed ((uint16_t)0xFF)
88#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
89#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
90
91typedef uint16_t PPSMC_Result;
92
93#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
94
95
96#define PPSMC_MSG_Halt ((uint16_t)0x10)
97#define PPSMC_MSG_Resume ((uint16_t)0x11)
98#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
99#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
100#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
101#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
102#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
103#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
104#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
105#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
106#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
107#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
108#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
109#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
110#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
111#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
112#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
113#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
114#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
115#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
116#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
117#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
118#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
119#define PPSMC_CACHistoryStart ((uint16_t)0x57)
120#define PPSMC_CACHistoryStop ((uint16_t)0x58)
121#define PPSMC_TDPClampingActive ((uint16_t)0x59)
122#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
123#define PPSMC_StartFanControl ((uint16_t)0x5B)
124#define PPSMC_StopFanControl ((uint16_t)0x5C)
125#define PPSMC_NoDisplay ((uint16_t)0x5D)
126#define PPSMC_HasDisplay ((uint16_t)0x5E)
127#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
128#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
129#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
130#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
131#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
132#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
133#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
134#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
135#define PPSMC_OCPActive ((uint16_t)0x6C)
136#define PPSMC_OCPInactive ((uint16_t)0x6D)
137#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
138#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
139#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
140#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
141#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
142#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
143#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
144#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
145#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
146#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
147#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
148#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
149#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
150#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
151#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
152#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
153
154#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
155#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
156#define PPSMC_FlushDataCache ((uint16_t)0x80)
157#define PPSMC_FlushInstrCache ((uint16_t)0x81)
158
159#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
160#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
161
162#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
163
164#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
165#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
166#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
167#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
168
169#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
170#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
171#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
172#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
173
174#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
175
176#define PPSMC_MSG_Test ((uint16_t) 0x100)
177#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
178#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
179#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
180#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
181#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
182#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
183#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
184#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
185#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
186#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
187#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
188#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
189#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
190#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
191#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
192#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
193#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
194#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
195#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
196#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
197#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
198#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
199#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
200#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
201#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
202#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
203#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
204#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
205#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
206#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
207#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
208#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
209#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
210#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
211#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
212#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
213
214#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
215#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
216#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
217#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
218#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
219#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
220#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
221#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
222#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
223#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
224#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
225#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
226#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
227#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
228#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
229#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
230#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
231#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
232#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
233#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
234#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
235#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
236#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
237#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
238#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
239#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
240#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
241#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
242#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
243#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
244#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
245#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
246#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
247#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
248#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
249#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
250#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
251
252#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
253#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
254#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
255#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
256#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
257#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
258#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
259#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
260#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
261#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
262#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
263#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
264#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
265#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
266#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
267#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
268#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
269#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
270#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
271#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
272#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
273#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
274#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
275#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
276#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
277#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
278#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
279#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
280#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
281#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
282#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
283#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
284#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
285#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
286#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
287#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
288#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
289#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
290#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
291#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
292#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
293#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
294#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
295#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
296#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
297#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
298#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
299#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
300#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
301#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
302#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
303#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
304#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
305#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
306#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
307#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
308#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
309#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
310#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
311#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
312#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
313#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
314#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
315#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
316#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
317#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
318#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
319#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
320#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
321#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
322#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
323#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
324#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
325#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
326#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
327#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
328#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
329#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
330#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
331#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
332#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
333
334#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
335#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
336#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
337#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
338#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
339#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
340#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
341#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
342#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
343
344#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
345#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
346#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
347#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
348#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
349#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
350#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
351
352#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
353#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
354#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
355#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
356#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
357#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
358#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
359#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
360#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
361#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
362#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
363#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
364#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
365#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
366#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
367#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
368#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
369#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
370#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
371#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
372#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
373#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
374#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
375#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
376#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
377#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
378
379#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
380#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275)
381#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277)
382#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
383#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
384#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
385#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
386#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
387
388#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
389#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
390#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
391
392#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
393#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
394
395#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
396
397#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
398#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
399#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
400#define PPSMC_MSG_GetData ((uint16_t) 0x801)
401#define PPSMC_MSG_SetData ((uint16_t) 0x802)
402
403typedef uint16_t PPSMC_Msg;
404
405#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
406#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
407#define PPSMC_EVENT_STATUS_DC 0x00000004
408
409#pragma pack(pop)
410
411#endif
412
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 827860fffe78..a99b5cbb113e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -122,8 +122,8 @@ struct PP_StateSoftwareAlgorithmBlock {
122 * Type to hold a temperature range. 122 * Type to hold a temperature range.
123 */ 123 */
124struct PP_TemperatureRange { 124struct PP_TemperatureRange {
125 uint32_t min; 125 int min;
126 uint32_t max; 126 int max;
127}; 127};
128 128
129struct PP_StateValidationBlock { 129struct PP_StateValidationBlock {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h b/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h
deleted file mode 100644
index 0faf6a25c18b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _PP_FEATURE_H_
25#define _PP_FEATURE_H_
26
27/**
28 * PowerPlay feature ids.
29 */
30enum pp_feature {
31 PP_Feature_PowerPlay = 0,
32 PP_Feature_User2DPerformance,
33 PP_Feature_User3DPerformance,
34 PP_Feature_VariBright,
35 PP_Feature_VariBrightOnPowerXpress,
36 PP_Feature_ReducedRefreshRate,
37 PP_Feature_GFXClockGating,
38 PP_Feature_OverdriveTest,
39 PP_Feature_OverDrive,
40 PP_Feature_PowerBudgetWaiver,
41 PP_Feature_PowerControl,
42 PP_Feature_PowerControl_2,
43 PP_Feature_MultiUVDState,
44 PP_Feature_Force3DClock,
45 PP_Feature_BACO,
46 PP_Feature_PowerDown,
47 PP_Feature_DynamicUVDState,
48 PP_Feature_VCEDPM,
49 PP_Feature_PPM,
50 PP_Feature_ACP_POWERGATING,
51 PP_Feature_FFC,
52 PP_Feature_FPS,
53 PP_Feature_ViPG,
54 PP_Feature_Max
55};
56
57/**
58 * Struct for PowerPlay feature info.
59 */
60struct pp_feature_info {
61 bool supported; /* feature supported by PowerPlay */
62 bool enabled; /* feature enabled in PowerPlay */
63 bool enabled_default; /* default enable status of the feature */
64 uint32_t version; /* feature version */
65};
66
67#endif /* _PP_FEATURE_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
index b7ab69e4c254..214f370c5efd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
@@ -23,7 +23,8 @@
23#ifndef PP_SOC15_H 23#ifndef PP_SOC15_H
24#define PP_SOC15_H 24#define PP_SOC15_H
25 25
26#include "soc15ip.h" 26#include "soc15_hw_ip.h"
27#include "vega10_ip_offset.h"
27 28
28inline static uint32_t soc15_get_register_offset( 29inline static uint32_t soc15_get_register_offset(
29 uint32_t hw_id, 30 uint32_t hw_id,
@@ -43,7 +44,8 @@ inline static uint32_t soc15_get_register_offset(
43 reg = DF_BASE.instance[inst].segment[segment] + offset; 44 reg = DF_BASE.instance[inst].segment[segment] + offset;
44 else if (hw_id == GC_HWID) 45 else if (hw_id == GC_HWID)
45 reg = GC_BASE.instance[inst].segment[segment] + offset; 46 reg = GC_BASE.instance[inst].segment[segment] + offset;
46 47 else if (hw_id == SMUIO_HWID)
48 reg = SMUIO_BASE.instance[inst].segment[segment] + offset;
47 return reg; 49 return reg;
48} 50}
49 51
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
index 7d1eec5d2e7a..201d2b6329ab 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,19 +20,21 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef _PP_INSTANCE_H_ 23#ifndef PP_THERMAL_H
24#define _PP_INSTANCE_H_ 24#define PP_THERMAL_H
25 25
26#include "hwmgr.h" 26#include "power_state.h"
27 27
28struct pp_instance { 28static const struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =
29 uint32_t chip_family; 29{
30 uint32_t chip_id; 30 {-273150, 99000},
31 bool pm_en; 31 { 120000, 120000},
32 uint32_t feature_mask; 32};
33 void *device; 33
34 struct pp_hwmgr *hwmgr; 34static const struct PP_TemperatureRange SMU7ThermalPolicy[] =
35 struct mutex pp_lock; 35{
36 {-273150, 99000},
37 { 120000, 120000},
36}; 38};
37 39
38#endif 40#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index f15f4df9d0a9..426bff2aad2b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -80,7 +80,8 @@
80#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32 80#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
81#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33 81#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
82#define PPSMC_MSG_SetSoftMaxVcn 0x34 82#define PPSMC_MSG_SetSoftMaxVcn 0x34
83#define PPSMC_Message_Count 0x35 83#define PPSMC_MSG_PowerGateMmHub 0x35
84#define PPSMC_Message_Count 0x36
84 85
85 86
86typedef uint16_t PPSMC_Result; 87typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7.h b/drivers/gpu/drm/amd/powerplay/inc/smu7.h
index 75a380a15292..e14072d45918 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7.h
@@ -82,6 +82,25 @@
82#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT) 82#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
83 83
84 84
85/* Voltage Regulator Configuration */
86/* VR Config info is contained in dpmTable */
87
88#define VRCONF_VDDC_MASK 0x000000FF
89#define VRCONF_VDDC_SHIFT 0
90#define VRCONF_VDDGFX_MASK 0x0000FF00
91#define VRCONF_VDDGFX_SHIFT 8
92#define VRCONF_VDDCI_MASK 0x00FF0000
93#define VRCONF_VDDCI_SHIFT 16
94#define VRCONF_MVDD_MASK 0xFF000000
95#define VRCONF_MVDD_SHIFT 24
96
97#define VR_MERGED_WITH_VDDC 0
98#define VR_SVI2_PLANE_1 1
99#define VR_SVI2_PLANE_2 2
100#define VR_SMIO_PATTERN_1 3
101#define VR_SMIO_PATTERN_2 4
102#define VR_STATIC_VOLTAGE 5
103
85struct SMU7_PIDController 104struct SMU7_PIDController
86{ 105{
87 uint32_t Ki; 106 uint32_t Ki;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
index 0b0b404ff091..ee876745dd12 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
@@ -316,7 +316,8 @@ struct SMU7_Discrete_DpmTable
316 uint8_t AcpLevelCount; 316 uint8_t AcpLevelCount;
317 uint8_t SamuLevelCount; 317 uint8_t SamuLevelCount;
318 uint8_t MasterDeepSleepControl; 318 uint8_t MasterDeepSleepControl;
319 uint32_t Reserved[5]; 319 uint32_t VRConfig;
320 uint32_t Reserved[4];
320// uint32_t SamuDefaultLevel; 321// uint32_t SamuDefaultLevel;
321 322
322 SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS]; 323 SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
index 550ed675027a..70ac4d477be2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
@@ -58,7 +58,7 @@
58#define FEATURE_FAST_PPT_BIT 26 58#define FEATURE_FAST_PPT_BIT 26
59#define FEATURE_GFX_EDC_BIT 27 59#define FEATURE_GFX_EDC_BIT 27
60#define FEATURE_ACG_BIT 28 60#define FEATURE_ACG_BIT 28
61#define FEATURE_SPARE_29_BIT 29 61#define FEATURE_PCC_LIMIT_CONTROL_BIT 29
62#define FEATURE_SPARE_30_BIT 30 62#define FEATURE_SPARE_30_BIT 30
63#define FEATURE_SPARE_31_BIT 31 63#define FEATURE_SPARE_31_BIT 31
64 64
@@ -94,7 +94,7 @@
94#define FEATURE_FAST_PPT_MASK (1 << FAST_PPT_BIT ) 94#define FEATURE_FAST_PPT_MASK (1 << FAST_PPT_BIT )
95#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) 95#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
96#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT ) 96#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
97#define FFEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT ) 97#define FEATURE_PCC_LIMIT_CONTROL_MASK (1 << FEATURE_PCC_LIMIT_CONTROL_BIT )
98#define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT ) 98#define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
99#define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT ) 99#define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
100/* Workload types */ 100/* Workload types */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index b1b27b2128f6..fc3a2a533586 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -26,32 +26,6 @@
26#include "amd_powerplay.h" 26#include "amd_powerplay.h"
27#include "hwmgr.h" 27#include "hwmgr.h"
28 28
29#define smu_lower_32_bits(n) ((uint32_t)(n))
30#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
31
32
33
34enum AVFS_BTC_STATUS {
35 AVFS_BTC_BOOT = 0,
36 AVFS_BTC_BOOT_STARTEDSMU,
37 AVFS_LOAD_VIRUS,
38 AVFS_BTC_VIRUS_LOADED,
39 AVFS_BTC_VIRUS_FAIL,
40 AVFS_BTC_COMPLETED_PREVIOUSLY,
41 AVFS_BTC_ENABLEAVFS,
42 AVFS_BTC_STARTED,
43 AVFS_BTC_FAILED,
44 AVFS_BTC_RESTOREVFT_FAILED,
45 AVFS_BTC_SAVEVFT_FAILED,
46 AVFS_BTC_DPMTABLESETUP_FAILED,
47 AVFS_BTC_COMPLETED_UNSAVED,
48 AVFS_BTC_COMPLETED_SAVED,
49 AVFS_BTC_COMPLETED_RESTORED,
50 AVFS_BTC_DISABLED,
51 AVFS_BTC_NOTSUPPORTED,
52 AVFS_BTC_SMUMSG_ERROR
53};
54
55enum SMU_TABLE { 29enum SMU_TABLE {
56 SMU_UVD_TABLE = 0, 30 SMU_UVD_TABLE = 0,
57 SMU_VCE_TABLE, 31 SMU_VCE_TABLE,
@@ -95,6 +69,11 @@ enum SMU_MAC_DEFINITION {
95 SMU_UVD_MCLK_HANDSHAKE_DISABLE, 69 SMU_UVD_MCLK_HANDSHAKE_DISABLE,
96}; 70};
97 71
72enum SMU10_TABLE_ID {
73 SMU10_WMTABLE = 0,
74 SMU10_CLOCKTABLE,
75};
76
98extern int smum_get_argument(struct pp_hwmgr *hwmgr); 77extern int smum_get_argument(struct pp_hwmgr *hwmgr);
99 78
100extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); 79extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
@@ -106,13 +85,6 @@ extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
106extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 85extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
107 uint16_t msg, uint32_t parameter); 86 uint16_t msg, uint32_t parameter);
108 87
109extern int smu_allocate_memory(void *device, uint32_t size,
110 enum cgs_gpu_mem_type type,
111 uint32_t byte_align, uint64_t *mc_addr,
112 void **kptr, void *handle);
113
114extern int smu_free_memory(void *device, void *handle);
115
116extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); 88extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
117 89
118extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); 90extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
@@ -129,10 +101,10 @@ extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value);
129 101
130extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); 102extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
131 103
132extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
133 struct amd_pp_profile *request);
134
135extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); 104extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr);
136 105
106extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting);
107
108extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
137 109
138#endif 110#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index 247c97397a27..c3ed737ab951 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -131,6 +131,7 @@ typedef uint16_t PPSMC_Result;
131#define PPSMC_MSG_RunAcgInOpenLoop 0x5E 131#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
132#define PPSMC_MSG_InitializeAcg 0x5F 132#define PPSMC_MSG_InitializeAcg 0x5F
133#define PPSMC_MSG_GetCurrPkgPwr 0x61 133#define PPSMC_MSG_GetCurrPkgPwr 0x61
134#define PPSMC_MSG_SetPccThrottleLevel 0x67
134#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68 135#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68
135#define PPSMC_Message_Count 0x69 136#define PPSMC_Message_Count 0x69
136 137
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 98e701e4f553..735c38624ce1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -23,9 +23,9 @@
23# Makefile for the 'smu manager' sub-component of powerplay. 23# Makefile for the 'smu manager' sub-component of powerplay.
24# It provides the smu management services for the driver. 24# It provides the smu management services for the driver.
25 25
26SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ 26SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
27 polaris10_smumgr.o iceland_smumgr.o \ 27 polaris10_smumgr.o iceland_smumgr.o \
28 smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o 28 smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o
29 29
30AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) 30AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
31 31
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 0b4a55660de4..5d6dfdfbbbb6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -236,13 +236,10 @@ static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
236static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) 236static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
237{ 237{
238 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 238 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
239 struct cgs_system_info sys_info = {0}; 239 struct amdgpu_device *adev = hwmgr->adev;
240 uint32_t dev_id; 240 uint32_t dev_id;
241 241
242 sys_info.size = sizeof(struct cgs_system_info); 242 dev_id = adev->pdev->device;
243 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
244 cgs_query_system_info(hwmgr->device, &sys_info);
245 dev_id = (uint32_t)sys_info.value;
246 243
247 switch (dev_id) { 244 switch (dev_id) {
248 case 0x67BA: 245 case 0x67BA:
@@ -411,8 +408,7 @@ static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
411} 408}
412 409
413static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr, 410static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
414 uint32_t clock, uint16_t sclk_al_threshold, 411 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
415 struct SMU7_Discrete_GraphicsLevel *level)
416{ 412{
417 int result; 413 int result;
418 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 414 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -438,14 +434,14 @@ static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
438 clock, 434 clock,
439 &level->MinVddcPhases); 435 &level->MinVddcPhases);
440 436
441 level->ActivityLevel = sclk_al_threshold; 437 level->ActivityLevel = data->current_profile_setting.sclk_activity;
442 level->CcPwrDynRm = 0; 438 level->CcPwrDynRm = 0;
443 level->CcPwrDynRm1 = 0; 439 level->CcPwrDynRm1 = 0;
444 level->EnabledForActivity = 0; 440 level->EnabledForActivity = 0;
445 /* this level can be used for throttling.*/ 441 /* this level can be used for throttling.*/
446 level->EnabledForThrottle = 1; 442 level->EnabledForThrottle = 1;
447 level->UpH = 0; 443 level->UpH = data->current_profile_setting.sclk_up_hyst;
448 level->DownH = 0; 444 level->DownH = data->current_profile_setting.sclk_down_hyst;
449 level->VoltageDownH = 0; 445 level->VoltageDownH = 0;
450 level->PowerThrottle = 0; 446 level->PowerThrottle = 0;
451 447
@@ -492,7 +488,6 @@ static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
492 for (i = 0; i < dpm_table->sclk_table.count; i++) { 488 for (i = 0; i < dpm_table->sclk_table.count; i++) {
493 result = ci_populate_single_graphic_level(hwmgr, 489 result = ci_populate_single_graphic_level(hwmgr,
494 dpm_table->sclk_table.dpm_levels[i].value, 490 dpm_table->sclk_table.dpm_levels[i].value,
495 (uint16_t)smu_data->activity_target[i],
496 &levels[i]); 491 &levels[i]);
497 if (result) 492 if (result)
498 return result; 493 return result;
@@ -860,10 +855,13 @@ static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
860 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); 855 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
861 856
862 /* GPIO voltage control */ 857 /* GPIO voltage control */
863 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) 858 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
864 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; 859 table->VddcLevel[count].Smio = (uint8_t) count;
865 else 860 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
861 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
862 } else {
866 table->VddcLevel[count].Smio = 0; 863 table->VddcLevel[count].Smio = 0;
864 }
867 } 865 }
868 866
869 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); 867 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
@@ -885,10 +883,13 @@ static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
885 &(data->vddci_voltage_table.entries[count]), 883 &(data->vddci_voltage_table.entries[count]),
886 &(table->VddciLevel[count])); 884 &(table->VddciLevel[count]));
887 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL); 885 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
888 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) 886 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
889 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; 887 table->VddciLevel[count].Smio = (uint8_t) count;
890 else 888 table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
891 table->VddciLevel[count].Smio |= 0; 889 table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
890 } else {
891 table->VddciLevel[count].Smio = 0;
892 }
892 } 893 }
893 894
894 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); 895 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
@@ -910,10 +911,13 @@ static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
910 &(data->mvdd_voltage_table.entries[count]), 911 &(data->mvdd_voltage_table.entries[count]),
911 &table->MvddLevel[count]); 912 &table->MvddLevel[count]);
912 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL); 913 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
913 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) 914 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
914 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; 915 table->MvddLevel[count].Smio = (uint8_t) count;
915 else 916 table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
916 table->MvddLevel[count].Smio |= 0; 917 table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
918 } else {
919 table->MvddLevel[count].Smio = 0;
920 }
917 } 921 }
918 922
919 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); 923 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
@@ -1217,12 +1221,12 @@ static int ci_populate_single_memory_level(
1217 1221
1218 memory_level->EnabledForThrottle = 1; 1222 memory_level->EnabledForThrottle = 1;
1219 memory_level->EnabledForActivity = 1; 1223 memory_level->EnabledForActivity = 1;
1220 memory_level->UpH = 0; 1224 memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
1221 memory_level->DownH = 100; 1225 memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
1222 memory_level->VoltageDownH = 0; 1226 memory_level->VoltageDownH = 0;
1223 1227
1224 /* Indicates maximum activity level for this performance level.*/ 1228 /* Indicates maximum activity level for this performance level.*/
1225 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; 1229 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1226 memory_level->StutterEnable = 0; 1230 memory_level->StutterEnable = 0;
1227 memory_level->StrobeEnable = 0; 1231 memory_level->StrobeEnable = 0;
1228 memory_level->EdcReadEnable = 0; 1232 memory_level->EdcReadEnable = 0;
@@ -1302,7 +1306,7 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1302 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1306 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1303 struct smu7_dpm_table *dpm_table = &data->dpm_table; 1307 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1304 int result; 1308 int result;
1305 struct cgs_system_info sys_info = {0}; 1309 struct amdgpu_device *adev = hwmgr->adev;
1306 uint32_t dev_id; 1310 uint32_t dev_id;
1307 1311
1308 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 1312 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
@@ -1323,10 +1327,7 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1323 1327
1324 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; 1328 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1325 1329
1326 sys_info.size = sizeof(struct cgs_system_info); 1330 dev_id = adev->pdev->device;
1327 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1328 cgs_query_system_info(hwmgr->device, &sys_info);
1329 dev_id = (uint32_t)sys_info.value;
1330 1331
1331 if ((dpm_table->mclk_table.count >= 2) 1332 if ((dpm_table->mclk_table.count >= 2)
1332 && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) { 1333 && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
@@ -1506,7 +1507,7 @@ static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1506 table->MemoryACPILevel.DownH = 100; 1507 table->MemoryACPILevel.DownH = 100;
1507 table->MemoryACPILevel.VoltageDownH = 0; 1508 table->MemoryACPILevel.VoltageDownH = 0;
1508 /* Indicates maximum activity level for this performance level.*/ 1509 /* Indicates maximum activity level for this performance level.*/
1509 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); 1510 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1510 1511
1511 table->MemoryACPILevel.StutterEnable = 0; 1512 table->MemoryACPILevel.StutterEnable = 0;
1512 table->MemoryACPILevel.StrobeEnable = 0; 1513 table->MemoryACPILevel.StrobeEnable = 0;
@@ -1941,6 +1942,37 @@ static int ci_start_smc(struct pp_hwmgr *hwmgr)
1941 return 0; 1942 return 0;
1942} 1943}
1943 1944
1945static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1946{
1947 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1948 uint16_t config;
1949
1950 config = VR_SVI2_PLANE_1;
1951 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1952
1953 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1954 config = VR_SVI2_PLANE_2;
1955 table->VRConfig |= config;
1956 } else {
1957 pr_info("VDDCshould be on SVI2 controller!");
1958 }
1959
1960 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1961 config = VR_SVI2_PLANE_2;
1962 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1963 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1964 config = VR_SMIO_PATTERN_1;
1965 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1966 }
1967
1968 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1969 config = VR_SMIO_PATTERN_2;
1970 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1971 }
1972
1973 return 0;
1974}
1975
1944static int ci_init_smc_table(struct pp_hwmgr *hwmgr) 1976static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1945{ 1977{
1946 int result; 1978 int result;
@@ -2064,6 +2096,11 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
2064 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count; 2096 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2065 table->PCIeGenInterval = 1; 2097 table->PCIeGenInterval = 1;
2066 2098
2099 result = ci_populate_vr_config(hwmgr, table);
2100 PP_ASSERT_WITH_CODE(0 == result,
2101 "Failed to populate VRConfig setting!", return result);
2102 data->vr_config = table->VRConfig;
2103
2067 ci_populate_smc_svi2_config(hwmgr, table); 2104 ci_populate_smc_svi2_config(hwmgr, table);
2068 2105
2069 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++) 2106 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
@@ -2084,6 +2121,7 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
2084 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; 2121 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2085 2122
2086 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); 2123 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2124 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2087 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); 2125 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2088 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); 2126 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2089 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); 2127 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
@@ -2728,35 +2766,8 @@ static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2728 return ci_is_smc_ram_running(hwmgr); 2766 return ci_is_smc_ram_running(hwmgr);
2729} 2767}
2730 2768
2731static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2732 struct amd_pp_profile *request)
2733{
2734 struct ci_smumgr *smu_data = (struct ci_smumgr *)
2735 (hwmgr->smu_backend);
2736 struct SMU7_Discrete_GraphicsLevel *levels =
2737 smu_data->smc_state_table.GraphicsLevel;
2738 uint32_t array = smu_data->dpm_table_start +
2739 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2740 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
2741 SMU7_MAX_LEVELS_GRAPHICS;
2742 uint32_t i;
2743
2744 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2745 levels[i].ActivityLevel =
2746 cpu_to_be16(request->activity_threshold);
2747 levels[i].EnabledForActivity = 1;
2748 levels[i].UpH = request->up_hyst;
2749 levels[i].DownH = request->down_hyst;
2750 }
2751
2752 return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2753 array_size, SMC_RAM_END);
2754}
2755
2756
2757static int ci_smu_init(struct pp_hwmgr *hwmgr) 2769static int ci_smu_init(struct pp_hwmgr *hwmgr)
2758{ 2770{
2759 int i;
2760 struct ci_smumgr *ci_priv = NULL; 2771 struct ci_smumgr *ci_priv = NULL;
2761 2772
2762 ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL); 2773 ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
@@ -2764,9 +2775,6 @@ static int ci_smu_init(struct pp_hwmgr *hwmgr)
2764 if (ci_priv == NULL) 2775 if (ci_priv == NULL)
2765 return -ENOMEM; 2776 return -ENOMEM;
2766 2777
2767 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2768 ci_priv->activity_target[i] = 30;
2769
2770 hwmgr->smu_backend = ci_priv; 2778 hwmgr->smu_backend = ci_priv;
2771 2779
2772 return 0; 2780 return 0;
@@ -2785,6 +2793,102 @@ static int ci_start_smu(struct pp_hwmgr *hwmgr)
2785 return 0; 2793 return 0;
2786} 2794}
2787 2795
2796static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
2797 void *profile_setting)
2798{
2799 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2800 struct ci_smumgr *smu_data = (struct ci_smumgr *)
2801 (hwmgr->smu_backend);
2802 struct profile_mode_setting *setting;
2803 struct SMU7_Discrete_GraphicsLevel *levels =
2804 smu_data->smc_state_table.GraphicsLevel;
2805 uint32_t array = smu_data->dpm_table_start +
2806 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2807
2808 uint32_t mclk_array = smu_data->dpm_table_start +
2809 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2810 struct SMU7_Discrete_MemoryLevel *mclk_levels =
2811 smu_data->smc_state_table.MemoryLevel;
2812 uint32_t i;
2813 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2814
2815 if (profile_setting == NULL)
2816 return -EINVAL;
2817
2818 setting = (struct profile_mode_setting *)profile_setting;
2819
2820 if (setting->bupdate_sclk) {
2821 if (!data->sclk_dpm_key_disabled)
2822 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
2823 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2824 if (levels[i].ActivityLevel !=
2825 cpu_to_be16(setting->sclk_activity)) {
2826 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2827
2828 clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2829 + offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
2830 offset = clk_activity_offset & ~0x3;
2831 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2832 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2833 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2834
2835 }
2836 if (levels[i].UpH != setting->sclk_up_hyst ||
2837 levels[i].DownH != setting->sclk_down_hyst) {
2838 levels[i].UpH = setting->sclk_up_hyst;
2839 levels[i].DownH = setting->sclk_down_hyst;
2840 up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2841 + offsetof(SMU7_Discrete_GraphicsLevel, UpH);
2842 down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2843 + offsetof(SMU7_Discrete_GraphicsLevel, DownH);
2844 offset = up_hyst_offset & ~0x3;
2845 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2846 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
2847 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
2848 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2849 }
2850 }
2851 if (!data->sclk_dpm_key_disabled)
2852 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
2853 }
2854
2855 if (setting->bupdate_mclk) {
2856 if (!data->mclk_dpm_key_disabled)
2857 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
2858 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2859 if (mclk_levels[i].ActivityLevel !=
2860 cpu_to_be16(setting->mclk_activity)) {
2861 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2862
2863 clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2864 + offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
2865 offset = clk_activity_offset & ~0x3;
2866 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2867 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2868 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2869
2870 }
2871 if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
2872 mclk_levels[i].DownH != setting->mclk_down_hyst) {
2873 mclk_levels[i].UpH = setting->mclk_up_hyst;
2874 mclk_levels[i].DownH = setting->mclk_down_hyst;
2875 up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2876 + offsetof(SMU7_Discrete_MemoryLevel, UpH);
2877 down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2878 + offsetof(SMU7_Discrete_MemoryLevel, DownH);
2879 offset = up_hyst_offset & ~0x3;
2880 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2881 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
2882 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
2883 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2884 }
2885 }
2886 if (!data->mclk_dpm_key_disabled)
2887 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
2888 }
2889 return 0;
2890}
2891
2788const struct pp_smumgr_func ci_smu_funcs = { 2892const struct pp_smumgr_func ci_smu_funcs = {
2789 .smu_init = ci_smu_init, 2893 .smu_init = ci_smu_init,
2790 .smu_fini = ci_smu_fini, 2894 .smu_fini = ci_smu_fini,
@@ -2806,5 +2910,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
2806 .get_mac_definition = ci_get_mac_definition, 2910 .get_mac_definition = ci_get_mac_definition,
2807 .initialize_mc_reg_table = ci_initialize_mc_reg_table, 2911 .initialize_mc_reg_table = ci_initialize_mc_reg_table,
2808 .is_dpm_running = ci_is_dpm_running, 2912 .is_dpm_running = ci_is_dpm_running,
2809 .populate_requested_graphic_levels = ci_populate_requested_graphic_levels, 2913 .update_dpm_settings = ci_update_dpm_settings,
2810}; 2914};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
index 8189cfa17c46..a8282705c569 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
@@ -70,8 +70,6 @@ struct ci_smumgr {
70 const struct ci_pt_defaults *power_tune_defaults; 70 const struct ci_pt_defaults *power_tune_defaults;
71 SMU7_Discrete_MCRegisters mc_regs; 71 SMU7_Discrete_MCRegisters mc_regs;
72 struct ci_mc_reg_table mc_reg_table; 72 struct ci_mc_reg_table mc_reg_table;
73 uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS];
74
75}; 73};
76 74
77#endif 75#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
deleted file mode 100644
index 4d3aff381bca..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ /dev/null
@@ -1,871 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/gfp.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "cgs_common.h"
31#include "smu/smu_8_0_d.h"
32#include "smu/smu_8_0_sh_mask.h"
33#include "smu8.h"
34#include "smu8_fusion.h"
35#include "cz_smumgr.h"
36#include "cz_ppsmc.h"
37#include "smu_ucode_xfer_cz.h"
38#include "gca/gfx_8_0_d.h"
39#include "gca/gfx_8_0_sh_mask.h"
40#include "smumgr.h"
41
42#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
43
44static const enum cz_scratch_entry firmware_list[] = {
45 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
46 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
47 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
48 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
49 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
50 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
51 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
52 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
53};
54
55static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
56{
57 if (hwmgr == NULL || hwmgr->device == NULL)
58 return -EINVAL;
59
60 return cgs_read_register(hwmgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0);
62}
63
64static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
65{
66 int result = 0;
67
68 if (hwmgr == NULL || hwmgr->device == NULL)
69 return -EINVAL;
70
71 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
72 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
73 if (result != 0) {
74 pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
75 return result;
76 }
77
78 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
79 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
80
81 return 0;
82}
83
84/* Send a message to the SMC, and wait for its response.*/
85static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{
87 int result = 0;
88
89 result = cz_send_msg_to_smc_async(hwmgr, msg);
90 if (result != 0)
91 return result;
92
93 return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95}
96
97static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
98 uint32_t smc_address, uint32_t limit)
99{
100 if (hwmgr == NULL || hwmgr->device == NULL)
101 return -EINVAL;
102
103 if (0 != (3 & smc_address)) {
104 pr_err("SMC address must be 4 byte aligned\n");
105 return -EINVAL;
106 }
107
108 if (limit <= (smc_address + 3)) {
109 pr_err("SMC address beyond the SMC RAM area\n");
110 return -EINVAL;
111 }
112
113 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
114 SMN_MP1_SRAM_START_ADDR + smc_address);
115
116 return 0;
117}
118
119static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
120 uint32_t smc_address, uint32_t value, uint32_t limit)
121{
122 int result;
123
124 if (hwmgr == NULL || hwmgr->device == NULL)
125 return -EINVAL;
126
127 result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
128 if (!result)
129 cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
130
131 return result;
132}
133
134static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
135 uint16_t msg, uint32_t parameter)
136{
137 if (hwmgr == NULL || hwmgr->device == NULL)
138 return -EINVAL;
139
140 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
141
142 return cz_send_msg_to_smc(hwmgr, msg);
143}
144
145static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
146 uint32_t firmware)
147{
148 int i;
149 uint32_t index = SMN_MP1_SRAM_START_ADDR +
150 SMU8_FIRMWARE_HEADER_LOCATION +
151 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
152
153 if (hwmgr == NULL || hwmgr->device == NULL)
154 return -EINVAL;
155
156 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
157
158 for (i = 0; i < hwmgr->usec_timeout; i++) {
159 if (firmware ==
160 (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
161 break;
162 udelay(1);
163 }
164
165 if (i >= hwmgr->usec_timeout) {
166 pr_err("SMU check loaded firmware failed.\n");
167 return -EINVAL;
168 }
169
170 return 0;
171}
172
173static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
174{
175 uint32_t reg_data;
176 uint32_t tmp;
177 int ret = 0;
178 struct cgs_firmware_info info = {0};
179 struct cz_smumgr *cz_smu;
180
181 if (hwmgr == NULL || hwmgr->device == NULL)
182 return -EINVAL;
183
184 cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
185 ret = cgs_get_firmware_info(hwmgr->device,
186 CGS_UCODE_ID_CP_MEC, &info);
187
188 if (ret)
189 return -EINVAL;
190
191 /* Disable MEC parsing/prefetching */
192 tmp = cgs_read_register(hwmgr->device,
193 mmCP_MEC_CNTL);
194 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
195 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
196 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
197
198 tmp = cgs_read_register(hwmgr->device,
199 mmCP_CPC_IC_BASE_CNTL);
200
201 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
202 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
203 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
204 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
205 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
206
207 reg_data = smu_lower_32_bits(info.mc_addr) &
208 PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
209 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
210
211 reg_data = smu_upper_32_bits(info.mc_addr) &
212 PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
213 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
214
215 return 0;
216}
217
218static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
219 enum cz_scratch_entry firmware_enum)
220{
221 uint8_t ret = 0;
222
223 switch (firmware_enum) {
224 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
225 ret = UCODE_ID_SDMA0;
226 break;
227 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
228 if (hwmgr->chip_id == CHIP_STONEY)
229 ret = UCODE_ID_SDMA0;
230 else
231 ret = UCODE_ID_SDMA1;
232 break;
233 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
234 ret = UCODE_ID_CP_CE;
235 break;
236 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
237 ret = UCODE_ID_CP_PFP;
238 break;
239 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
240 ret = UCODE_ID_CP_ME;
241 break;
242 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
243 ret = UCODE_ID_CP_MEC_JT1;
244 break;
245 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
246 if (hwmgr->chip_id == CHIP_STONEY)
247 ret = UCODE_ID_CP_MEC_JT1;
248 else
249 ret = UCODE_ID_CP_MEC_JT2;
250 break;
251 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
252 ret = UCODE_ID_GMCON_RENG;
253 break;
254 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
255 ret = UCODE_ID_RLC_G;
256 break;
257 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
258 ret = UCODE_ID_RLC_SCRATCH;
259 break;
260 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
261 ret = UCODE_ID_RLC_SRM_ARAM;
262 break;
263 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
264 ret = UCODE_ID_RLC_SRM_DRAM;
265 break;
266 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
267 ret = UCODE_ID_DMCU_ERAM;
268 break;
269 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
270 ret = UCODE_ID_DMCU_IRAM;
271 break;
272 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
273 ret = TASK_ARG_INIT_MM_PWR_LOG;
274 break;
275 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
276 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
277 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
278 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
279 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
280 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
281 ret = TASK_ARG_REG_MMIO;
282 break;
283 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
284 ret = TASK_ARG_INIT_CLK_TABLE;
285 break;
286 }
287
288 return ret;
289}
290
291static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
292{
293 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
294
295 switch (fw_type) {
296 case UCODE_ID_SDMA0:
297 result = CGS_UCODE_ID_SDMA0;
298 break;
299 case UCODE_ID_SDMA1:
300 result = CGS_UCODE_ID_SDMA1;
301 break;
302 case UCODE_ID_CP_CE:
303 result = CGS_UCODE_ID_CP_CE;
304 break;
305 case UCODE_ID_CP_PFP:
306 result = CGS_UCODE_ID_CP_PFP;
307 break;
308 case UCODE_ID_CP_ME:
309 result = CGS_UCODE_ID_CP_ME;
310 break;
311 case UCODE_ID_CP_MEC_JT1:
312 result = CGS_UCODE_ID_CP_MEC_JT1;
313 break;
314 case UCODE_ID_CP_MEC_JT2:
315 result = CGS_UCODE_ID_CP_MEC_JT2;
316 break;
317 case UCODE_ID_RLC_G:
318 result = CGS_UCODE_ID_RLC_G;
319 break;
320 default:
321 break;
322 }
323
324 return result;
325}
326
327static int cz_smu_populate_single_scratch_task(
328 struct pp_hwmgr *hwmgr,
329 enum cz_scratch_entry fw_enum,
330 uint8_t type, bool is_last)
331{
332 uint8_t i;
333 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
334 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
335 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
336
337 task->type = type;
338 task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
339 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
340
341 for (i = 0; i < cz_smu->scratch_buffer_length; i++)
342 if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
343 break;
344
345 if (i >= cz_smu->scratch_buffer_length) {
346 pr_err("Invalid Firmware Type\n");
347 return -EINVAL;
348 }
349
350 task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low;
351 task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high;
352 task->size_bytes = cz_smu->scratch_buffer[i].data_size;
353
354 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
355 struct cz_ih_meta_data *pIHReg_restore =
356 (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
357 pIHReg_restore->command =
358 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
359 }
360
361 return 0;
362}
363
364static int cz_smu_populate_single_ucode_load_task(
365 struct pp_hwmgr *hwmgr,
366 enum cz_scratch_entry fw_enum,
367 bool is_last)
368{
369 uint8_t i;
370 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
371 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
372 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
373
374 task->type = TASK_TYPE_UCODE_LOAD;
375 task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
376 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
377
378 for (i = 0; i < cz_smu->driver_buffer_length; i++)
379 if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
380 break;
381
382 if (i >= cz_smu->driver_buffer_length) {
383 pr_err("Invalid Firmware Type\n");
384 return -EINVAL;
385 }
386
387 task->addr.low = cz_smu->driver_buffer[i].mc_addr_low;
388 task->addr.high = cz_smu->driver_buffer[i].mc_addr_high;
389 task->size_bytes = cz_smu->driver_buffer[i].data_size;
390
391 return 0;
392}
393
394static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
395{
396 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
397
398 cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
399 cz_smu_populate_single_scratch_task(hwmgr,
400 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
401 TASK_TYPE_UCODE_SAVE, true);
402
403 return 0;
404}
405
406static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
407{
408 int i;
409 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
410 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
411
412 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
413 toc->JobList[i] = (uint8_t)IGNORE_JOB;
414
415 return 0;
416}
417
418static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
419{
420 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
421 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
422
423 toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
424 cz_smu_populate_single_scratch_task(hwmgr,
425 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
426 TASK_TYPE_UCODE_SAVE, false);
427
428 cz_smu_populate_single_scratch_task(hwmgr,
429 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
430 TASK_TYPE_UCODE_SAVE, true);
431
432 return 0;
433}
434
435
436static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
437{
438 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
439 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
440
441 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
442
443 cz_smu_populate_single_ucode_load_task(hwmgr,
444 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
445 cz_smu_populate_single_ucode_load_task(hwmgr,
446 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
447 cz_smu_populate_single_ucode_load_task(hwmgr,
448 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
449 cz_smu_populate_single_ucode_load_task(hwmgr,
450 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
451
452 if (hwmgr->chip_id == CHIP_STONEY)
453 cz_smu_populate_single_ucode_load_task(hwmgr,
454 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
455 else
456 cz_smu_populate_single_ucode_load_task(hwmgr,
457 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
458
459 cz_smu_populate_single_ucode_load_task(hwmgr,
460 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
461
462 /* populate scratch */
463 cz_smu_populate_single_scratch_task(hwmgr,
464 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
465 TASK_TYPE_UCODE_LOAD, false);
466
467 cz_smu_populate_single_scratch_task(hwmgr,
468 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
469 TASK_TYPE_UCODE_LOAD, false);
470
471 cz_smu_populate_single_scratch_task(hwmgr,
472 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
473 TASK_TYPE_UCODE_LOAD, true);
474
475 return 0;
476}
477
478static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
479{
480 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
481
482 cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
483
484 cz_smu_populate_single_scratch_task(hwmgr,
485 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
486 TASK_TYPE_INITIALIZE, true);
487 return 0;
488}
489
490static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
491{
492 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
493
494 cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
495
496 cz_smu_populate_single_ucode_load_task(hwmgr,
497 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
498 if (hwmgr->chip_id != CHIP_STONEY)
499 cz_smu_populate_single_ucode_load_task(hwmgr,
500 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
501 cz_smu_populate_single_ucode_load_task(hwmgr,
502 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
503 cz_smu_populate_single_ucode_load_task(hwmgr,
504 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
505 cz_smu_populate_single_ucode_load_task(hwmgr,
506 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
507 cz_smu_populate_single_ucode_load_task(hwmgr,
508 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
509 if (hwmgr->chip_id != CHIP_STONEY)
510 cz_smu_populate_single_ucode_load_task(hwmgr,
511 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
512 cz_smu_populate_single_ucode_load_task(hwmgr,
513 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
514
515 return 0;
516}
517
518static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
519{
520 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
521
522 cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
523
524 cz_smu_populate_single_scratch_task(hwmgr,
525 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
526 TASK_TYPE_INITIALIZE, true);
527
528 return 0;
529}
530
531static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
532{
533 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
534
535 cz_smu->toc_entry_used_count = 0;
536 cz_smu_initialize_toc_empty_job_list(hwmgr);
537 cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
538 cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
539 cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
540 cz_smu_construct_toc_for_power_profiling(hwmgr);
541 cz_smu_construct_toc_for_bootup(hwmgr);
542 cz_smu_construct_toc_for_clock_table(hwmgr);
543
544 return 0;
545}
546
547static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
548{
549 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
550 uint32_t firmware_type;
551 uint32_t i;
552 int ret;
553 enum cgs_ucode_id ucode_id;
554 struct cgs_firmware_info info = {0};
555
556 cz_smu->driver_buffer_length = 0;
557
558 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
559
560 firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
561 firmware_list[i]);
562
563 ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
564
565 ret = cgs_get_firmware_info(hwmgr->device,
566 ucode_id, &info);
567
568 if (ret == 0) {
569 cz_smu->driver_buffer[i].mc_addr_high =
570 smu_upper_32_bits(info.mc_addr);
571
572 cz_smu->driver_buffer[i].mc_addr_low =
573 smu_lower_32_bits(info.mc_addr);
574
575 cz_smu->driver_buffer[i].data_size = info.image_size;
576
577 cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
578 cz_smu->driver_buffer_length++;
579 }
580 }
581
582 return 0;
583}
584
585static int cz_smu_populate_single_scratch_entry(
586 struct pp_hwmgr *hwmgr,
587 enum cz_scratch_entry scratch_type,
588 uint32_t ulsize_byte,
589 struct cz_buffer_entry *entry)
590{
591 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
592 long long mc_addr =
593 ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
594 | cz_smu->smu_buffer.mc_addr_low;
595
596 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
597
598 mc_addr += cz_smu->smu_buffer_used_bytes;
599
600 entry->data_size = ulsize_byte;
601 entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
602 cz_smu->smu_buffer_used_bytes;
603 entry->mc_addr_low = smu_lower_32_bits(mc_addr);
604 entry->mc_addr_high = smu_upper_32_bits(mc_addr);
605 entry->firmware_ID = scratch_type;
606
607 cz_smu->smu_buffer_used_bytes += ulsize_aligned;
608
609 return 0;
610}
611
612static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
613{
614 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
615 unsigned long i;
616
617 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
618 if (cz_smu->scratch_buffer[i].firmware_ID
619 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
620 break;
621 }
622
623 *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
624
625 cz_send_msg_to_smc_with_parameter(hwmgr,
626 PPSMC_MSG_SetClkTableAddrHi,
627 cz_smu->scratch_buffer[i].mc_addr_high);
628
629 cz_send_msg_to_smc_with_parameter(hwmgr,
630 PPSMC_MSG_SetClkTableAddrLo,
631 cz_smu->scratch_buffer[i].mc_addr_low);
632
633 cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
634 cz_smu->toc_entry_clock_table);
635
636 cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
637
638 return 0;
639}
640
641static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
642{
643 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
644 unsigned long i;
645
646 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
647 if (cz_smu->scratch_buffer[i].firmware_ID
648 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
649 break;
650 }
651
652 cz_send_msg_to_smc_with_parameter(hwmgr,
653 PPSMC_MSG_SetClkTableAddrHi,
654 cz_smu->scratch_buffer[i].mc_addr_high);
655
656 cz_send_msg_to_smc_with_parameter(hwmgr,
657 PPSMC_MSG_SetClkTableAddrLo,
658 cz_smu->scratch_buffer[i].mc_addr_low);
659
660 cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
661 cz_smu->toc_entry_clock_table);
662
663 cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
664
665 return 0;
666}
667
668static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
669{
670 struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
671 uint32_t smc_address;
672
673 if (!hwmgr->reload_fw) {
674 pr_info("skip reloading...\n");
675 return 0;
676 }
677
678 cz_smu_populate_firmware_entries(hwmgr);
679
680 cz_smu_construct_toc(hwmgr);
681
682 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
683 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
684
685 cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
686
687 cz_send_msg_to_smc_with_parameter(hwmgr,
688 PPSMC_MSG_DriverDramAddrHi,
689 cz_smu->toc_buffer.mc_addr_high);
690
691 cz_send_msg_to_smc_with_parameter(hwmgr,
692 PPSMC_MSG_DriverDramAddrLo,
693 cz_smu->toc_buffer.mc_addr_low);
694
695 cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
696
697 cz_send_msg_to_smc_with_parameter(hwmgr,
698 PPSMC_MSG_ExecuteJob,
699 cz_smu->toc_entry_aram);
700 cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
701 cz_smu->toc_entry_power_profiling_index);
702
703 return cz_send_msg_to_smc_with_parameter(hwmgr,
704 PPSMC_MSG_ExecuteJob,
705 cz_smu->toc_entry_initialize_index);
706}
707
708static int cz_start_smu(struct pp_hwmgr *hwmgr)
709{
710 int ret = 0;
711 uint32_t fw_to_check = 0;
712 struct cgs_firmware_info info = {0};
713 uint32_t index = SMN_MP1_SRAM_START_ADDR +
714 SMU8_FIRMWARE_HEADER_LOCATION +
715 offsetof(struct SMU8_Firmware_Header, Version);
716
717
718 if (hwmgr == NULL || hwmgr->device == NULL)
719 return -EINVAL;
720
721 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
722 hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
723 info.version = hwmgr->smu_version >> 8;
724 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
725
726 fw_to_check = UCODE_ID_RLC_G_MASK |
727 UCODE_ID_SDMA0_MASK |
728 UCODE_ID_SDMA1_MASK |
729 UCODE_ID_CP_CE_MASK |
730 UCODE_ID_CP_ME_MASK |
731 UCODE_ID_CP_PFP_MASK |
732 UCODE_ID_CP_MEC_JT1_MASK |
733 UCODE_ID_CP_MEC_JT2_MASK;
734
735 if (hwmgr->chip_id == CHIP_STONEY)
736 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
737
738 ret = cz_request_smu_load_fw(hwmgr);
739 if (ret)
740 pr_err("SMU firmware load failed\n");
741
742 cz_check_fw_load_finish(hwmgr, fw_to_check);
743
744 ret = cz_load_mec_firmware(hwmgr);
745 if (ret)
746 pr_err("Mec Firmware load failed\n");
747
748 return ret;
749}
750
751static int cz_smu_init(struct pp_hwmgr *hwmgr)
752{
753 uint64_t mc_addr = 0;
754 int ret = 0;
755 struct cz_smumgr *cz_smu;
756
757 cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
758 if (cz_smu == NULL)
759 return -ENOMEM;
760
761 hwmgr->smu_backend = cz_smu;
762
763 cz_smu->toc_buffer.data_size = 4096;
764 cz_smu->smu_buffer.data_size =
765 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
766 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
767 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
768 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
769 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
770
771 ret = smu_allocate_memory(hwmgr->device,
772 cz_smu->toc_buffer.data_size,
773 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
774 PAGE_SIZE,
775 &mc_addr,
776 &cz_smu->toc_buffer.kaddr,
777 &cz_smu->toc_buffer.handle);
778 if (ret != 0)
779 return -1;
780
781 cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
782 cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
783
784 ret = smu_allocate_memory(hwmgr->device,
785 cz_smu->smu_buffer.data_size,
786 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
787 PAGE_SIZE,
788 &mc_addr,
789 &cz_smu->smu_buffer.kaddr,
790 &cz_smu->smu_buffer.handle);
791 if (ret != 0)
792 return -1;
793
794 cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
795 cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
796
797 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
798 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
799 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
800 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
801 pr_err("Error when Populate Firmware Entry.\n");
802 return -1;
803 }
804
805 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
806 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
807 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
808 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
809 pr_err("Error when Populate Firmware Entry.\n");
810 return -1;
811 }
812 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
813 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
814 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
815 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
816 pr_err("Error when Populate Firmware Entry.\n");
817 return -1;
818 }
819
820 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
821 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
822 sizeof(struct SMU8_MultimediaPowerLogData),
823 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
824 pr_err("Error when Populate Firmware Entry.\n");
825 return -1;
826 }
827
828 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
829 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
830 sizeof(struct SMU8_Fusion_ClkTable),
831 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
832 pr_err("Error when Populate Firmware Entry.\n");
833 return -1;
834 }
835
836 return 0;
837}
838
839static int cz_smu_fini(struct pp_hwmgr *hwmgr)
840{
841 struct cz_smumgr *cz_smu;
842
843 if (hwmgr == NULL || hwmgr->device == NULL)
844 return -EINVAL;
845
846 cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
847 if (cz_smu) {
848 cgs_free_gpu_mem(hwmgr->device,
849 cz_smu->toc_buffer.handle);
850 cgs_free_gpu_mem(hwmgr->device,
851 cz_smu->smu_buffer.handle);
852 kfree(cz_smu);
853 }
854
855 return 0;
856}
857
858const struct pp_smumgr_func cz_smu_funcs = {
859 .smu_init = cz_smu_init,
860 .smu_fini = cz_smu_fini,
861 .start_smu = cz_start_smu,
862 .check_fw_load_finish = cz_check_fw_load_finish,
863 .request_smu_load_fw = NULL,
864 .request_smu_load_specific_fw = NULL,
865 .get_argument = cz_smum_get_argument,
866 .send_msg_to_smc = cz_send_msg_to_smc,
867 .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
868 .download_pptable_settings = cz_download_pptable_settings,
869 .upload_pptable_settings = cz_upload_pptable_settings,
870};
871
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
deleted file mode 100644
index 7c3a290c8957..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _CZ_SMUMGR_H_
24#define _CZ_SMUMGR_H_
25
26
27#define MAX_NUM_FIRMWARE 8
28#define MAX_NUM_SCRATCH 11
29#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
30#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
31#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024
32#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4)
33
34enum cz_scratch_entry {
35 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
36 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
37 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
38 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
39 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
40 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
41 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
42 CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
43 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
44 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
45 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
46 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
47 CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
48 CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
49 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
50 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
51 CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
52 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
53 CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
54 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
55 CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
56 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
57};
58
59struct cz_buffer_entry {
60 uint32_t data_size;
61 uint32_t mc_addr_low;
62 uint32_t mc_addr_high;
63 void *kaddr;
64 enum cz_scratch_entry firmware_ID;
65 unsigned long handle; /* as bo handle used when release bo */
66};
67
68struct cz_register_index_data_pair {
69 uint32_t offset;
70 uint32_t value;
71};
72
73struct cz_ih_meta_data {
74 uint32_t command;
75 struct cz_register_index_data_pair register_index_value_pair[1];
76};
77
78struct cz_smumgr {
79 uint8_t driver_buffer_length;
80 uint8_t scratch_buffer_length;
81 uint16_t toc_entry_used_count;
82 uint16_t toc_entry_initialize_index;
83 uint16_t toc_entry_power_profiling_index;
84 uint16_t toc_entry_aram;
85 uint16_t toc_entry_ih_register_restore_task_index;
86 uint16_t toc_entry_clock_table;
87 uint16_t ih_register_restore_task_size;
88 uint16_t smu_buffer_used_bytes;
89
90 struct cz_buffer_entry toc_buffer;
91 struct cz_buffer_entry smu_buffer;
92 struct cz_buffer_entry firmware_buffer;
93 struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
94 struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
95 struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
96};
97
98#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 085d81c8b332..95fcda37f890 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -205,9 +205,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
205 int result = 0; 205 int result = 0;
206 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 206 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
207 207
208 if (0 != smu_data->avfs.avfs_btc_param) { 208 if (0 != smu_data->avfs_btc_param) {
209 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, 209 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
210 PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { 210 PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
211 pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); 211 pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
212 result = -EINVAL; 212 result = -EINVAL;
213 } 213 }
@@ -261,43 +261,21 @@ static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
261 return 0; 261 return 0;
262} 262}
263 263
264static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started) 264static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr)
265{ 265{
266 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 266 PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
267 267 "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
268 switch (smu_data->avfs.avfs_btc_status) { 268 " table over to SMU",
269 case AVFS_BTC_COMPLETED_PREVIOUSLY: 269 return -EINVAL);
270 break; 270 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
271 "[AVFS][fiji_avfs_event_mgr] Could not setup "
272 "Pwr Virus for AVFS ",
273 return -EINVAL);
274 PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
275 "[AVFS][fiji_avfs_event_mgr] Failure at "
276 "fiji_start_avfs_btc. AVFS Disabled",
277 return -EINVAL);
271 278
272 case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
273 if (!smu_started)
274 break;
275 smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
276 PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
277 "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
278 " table over to SMU",
279 return -EINVAL;);
280 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
281 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
282 "[AVFS][fiji_avfs_event_mgr] Could not setup "
283 "Pwr Virus for AVFS ",
284 return -EINVAL;);
285 smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
286 PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
287 "[AVFS][fiji_avfs_event_mgr] Failure at "
288 "fiji_start_avfs_btc. AVFS Disabled",
289 return -EINVAL;);
290
291 smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
292 break;
293 case AVFS_BTC_DISABLED: /* Do nothing */
294 case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
295 case AVFS_BTC_ENABLEAVFS:
296 break;
297 default:
298 pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status);
299 break;
300 }
301 return 0; 279 return 0;
302} 280}
303 281
@@ -309,8 +287,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
309 /* Only start SMC if SMC RAM is not running */ 287 /* Only start SMC if SMC RAM is not running */
310 if (!(smu7_is_smc_ram_running(hwmgr) 288 if (!(smu7_is_smc_ram_running(hwmgr)
311 || cgs_is_virtualization_enabled(hwmgr->device))) { 289 || cgs_is_virtualization_enabled(hwmgr->device))) {
312 fiji_avfs_event_mgr(hwmgr, false);
313
314 /* Check if SMU is running in protected mode */ 290 /* Check if SMU is running in protected mode */
315 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 291 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
316 CGS_IND_REG__SMC, 292 CGS_IND_REG__SMC,
@@ -323,7 +299,8 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
323 if (result) 299 if (result)
324 return result; 300 return result;
325 } 301 }
326 fiji_avfs_event_mgr(hwmgr, true); 302 if (fiji_avfs_event_mgr(hwmgr))
303 hwmgr->avfs_supported = false;
327 } 304 }
328 305
329 /* To initialize all clock gating before RLC loaded and running.*/ 306 /* To initialize all clock gating before RLC loaded and running.*/
@@ -368,7 +345,6 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
368 345
369static int fiji_smu_init(struct pp_hwmgr *hwmgr) 346static int fiji_smu_init(struct pp_hwmgr *hwmgr)
370{ 347{
371 int i;
372 struct fiji_smumgr *fiji_priv = NULL; 348 struct fiji_smumgr *fiji_priv = NULL;
373 349
374 fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL); 350 fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL);
@@ -378,11 +354,10 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr)
378 354
379 hwmgr->smu_backend = fiji_priv; 355 hwmgr->smu_backend = fiji_priv;
380 356
381 if (smu7_init(hwmgr)) 357 if (smu7_init(hwmgr)) {
358 kfree(fiji_priv);
382 return -EINVAL; 359 return -EINVAL;
383 360 }
384 for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
385 fiji_priv->activity_target[i] = 30;
386 361
387 return 0; 362 return 0;
388} 363}
@@ -972,8 +947,7 @@ static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
972} 947}
973 948
974static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, 949static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
975 uint32_t clock, uint16_t sclk_al_threshold, 950 uint32_t clock, struct SMU73_Discrete_GraphicsLevel *level)
976 struct SMU73_Discrete_GraphicsLevel *level)
977{ 951{
978 int result; 952 int result;
979 /* PP_Clocks minClocks; */ 953 /* PP_Clocks minClocks; */
@@ -981,12 +955,18 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
981 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 955 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
982 struct phm_ppt_v1_information *table_info = 956 struct phm_ppt_v1_information *table_info =
983 (struct phm_ppt_v1_information *)(hwmgr->pptable); 957 (struct phm_ppt_v1_information *)(hwmgr->pptable);
958 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
984 959
985 result = fiji_calculate_sclk_params(hwmgr, clock, level); 960 result = fiji_calculate_sclk_params(hwmgr, clock, level);
986 961
962 if (hwmgr->od_enabled)
963 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
964 else
965 vdd_dep_table = table_info->vdd_dep_on_sclk;
966
987 /* populate graphics levels */ 967 /* populate graphics levels */
988 result = fiji_get_dependency_volt_by_clk(hwmgr, 968 result = fiji_get_dependency_volt_by_clk(hwmgr,
989 table_info->vdd_dep_on_sclk, clock, 969 vdd_dep_table, clock,
990 (uint32_t *)(&level->MinVoltage), &mvdd); 970 (uint32_t *)(&level->MinVoltage), &mvdd);
991 PP_ASSERT_WITH_CODE((0 == result), 971 PP_ASSERT_WITH_CODE((0 == result),
992 "can not find VDDC voltage value for " 972 "can not find VDDC voltage value for "
@@ -994,13 +974,13 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
994 return result); 974 return result);
995 975
996 level->SclkFrequency = clock; 976 level->SclkFrequency = clock;
997 level->ActivityLevel = sclk_al_threshold; 977 level->ActivityLevel = data->current_profile_setting.sclk_activity;
998 level->CcPwrDynRm = 0; 978 level->CcPwrDynRm = 0;
999 level->CcPwrDynRm1 = 0; 979 level->CcPwrDynRm1 = 0;
1000 level->EnabledForActivity = 0; 980 level->EnabledForActivity = 0;
1001 level->EnabledForThrottle = 1; 981 level->EnabledForThrottle = 1;
1002 level->UpHyst = 10; 982 level->UpHyst = data->current_profile_setting.sclk_up_hyst;
1003 level->DownHyst = 0; 983 level->DownHyst = data->current_profile_setting.sclk_down_hyst;
1004 level->VoltageDownHyst = 0; 984 level->VoltageDownHyst = 0;
1005 level->PowerThrottle = 0; 985 level->PowerThrottle = 0;
1006 986
@@ -1057,7 +1037,6 @@ static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1057 for (i = 0; i < dpm_table->sclk_table.count; i++) { 1037 for (i = 0; i < dpm_table->sclk_table.count; i++) {
1058 result = fiji_populate_single_graphic_level(hwmgr, 1038 result = fiji_populate_single_graphic_level(hwmgr,
1059 dpm_table->sclk_table.dpm_levels[i].value, 1039 dpm_table->sclk_table.dpm_levels[i].value,
1060 (uint16_t)smu_data->activity_target[i],
1061 &levels[i]); 1040 &levels[i]);
1062 if (result) 1041 if (result)
1063 return result; 1042 return result;
@@ -1202,10 +1181,16 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1202 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1181 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1203 int result = 0; 1182 int result = 0;
1204 uint32_t mclk_stutter_mode_threshold = 60000; 1183 uint32_t mclk_stutter_mode_threshold = 60000;
1184 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
1205 1185
1206 if (table_info->vdd_dep_on_mclk) { 1186 if (hwmgr->od_enabled)
1187 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
1188 else
1189 vdd_dep_table = table_info->vdd_dep_on_mclk;
1190
1191 if (vdd_dep_table) {
1207 result = fiji_get_dependency_volt_by_clk(hwmgr, 1192 result = fiji_get_dependency_volt_by_clk(hwmgr,
1208 table_info->vdd_dep_on_mclk, clock, 1193 vdd_dep_table, clock,
1209 (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd); 1194 (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
1210 PP_ASSERT_WITH_CODE((0 == result), 1195 PP_ASSERT_WITH_CODE((0 == result),
1211 "can not find MinVddc voltage value from memory " 1196 "can not find MinVddc voltage value from memory "
@@ -1214,10 +1199,10 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1214 1199
1215 mem_level->EnabledForThrottle = 1; 1200 mem_level->EnabledForThrottle = 1;
1216 mem_level->EnabledForActivity = 0; 1201 mem_level->EnabledForActivity = 0;
1217 mem_level->UpHyst = 0; 1202 mem_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
1218 mem_level->DownHyst = 100; 1203 mem_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
1219 mem_level->VoltageDownHyst = 0; 1204 mem_level->VoltageDownHyst = 0;
1220 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; 1205 mem_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1221 mem_level->StutterEnable = false; 1206 mem_level->StutterEnable = false;
1222 1207
1223 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1208 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@@ -1435,7 +1420,7 @@ static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1435 table->MemoryACPILevel.DownHyst = 100; 1420 table->MemoryACPILevel.DownHyst = 100;
1436 table->MemoryACPILevel.VoltageDownHyst = 0; 1421 table->MemoryACPILevel.VoltageDownHyst = 0;
1437 table->MemoryACPILevel.ActivityLevel = 1422 table->MemoryACPILevel.ActivityLevel =
1438 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); 1423 PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1439 1424
1440 table->MemoryACPILevel.StutterEnable = false; 1425 table->MemoryACPILevel.StutterEnable = false;
1441 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); 1426 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
@@ -1799,7 +1784,7 @@ static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1799 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1784 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1800 PHM_PlatformCaps_ClockStretcher); 1785 PHM_PlatformCaps_ClockStretcher);
1801 PP_ASSERT_WITH_CODE(false, 1786 PP_ASSERT_WITH_CODE(false,
1802 "Stretch Amount in PPTable not supported\n", 1787 "Stretch Amount in PPTable not supported",
1803 return -EINVAL); 1788 return -EINVAL);
1804 } 1789 }
1805 1790
@@ -1954,44 +1939,6 @@ static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
1954 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); 1939 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1955} 1940}
1956 1941
1957static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
1958{
1959 struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1960 struct SMU73_Discrete_GraphicsLevel *levels =
1961 data->smc_state_table.GraphicsLevel;
1962 unsigned min_level = 1;
1963
1964 hwmgr->default_gfx_power_profile.activity_threshold =
1965 be16_to_cpu(levels[0].ActivityLevel);
1966 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
1967 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
1968 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
1969
1970 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
1971 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
1972
1973 /* Workaround compute SDMA instability: disable lowest SCLK
1974 * DPM level. Optimize compute power profile: Use only highest
1975 * 2 power levels (if more than 2 are available), Hysteresis:
1976 * 0ms up, 5ms down
1977 */
1978 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
1979 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
1980 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
1981 min_level = 1;
1982 else
1983 min_level = 0;
1984 hwmgr->default_compute_power_profile.min_sclk =
1985 be32_to_cpu(levels[min_level].SclkFrequency);
1986 hwmgr->default_compute_power_profile.up_hyst = 0;
1987 hwmgr->default_compute_power_profile.down_hyst = 5;
1988
1989 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
1990 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
1991
1992 return 0;
1993}
1994
1995static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) 1942static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
1996{ 1943{
1997 pp_atomctrl_voltage_table param_led_dpm; 1944 pp_atomctrl_voltage_table param_led_dpm;
@@ -2141,7 +2088,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2141 result = fiji_populate_vr_config(hwmgr, table); 2088 result = fiji_populate_vr_config(hwmgr, table);
2142 PP_ASSERT_WITH_CODE(0 == result, 2089 PP_ASSERT_WITH_CODE(0 == result,
2143 "Failed to populate VRConfig setting!", return result); 2090 "Failed to populate VRConfig setting!", return result);
2144 2091 data->vr_config = table->VRConfig;
2145 table->ThermGpio = 17; 2092 table->ThermGpio = 17;
2146 table->SclkStepSize = 0x4000; 2093 table->SclkStepSize = 0x4000;
2147 2094
@@ -2232,8 +2179,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2232 PP_ASSERT_WITH_CODE(0 == result, 2179 PP_ASSERT_WITH_CODE(0 == result,
2233 "Failed to setup dpm led config", return result); 2180 "Failed to setup dpm led config", return result);
2234 2181
2235 fiji_save_default_power_profile(hwmgr);
2236
2237 return 0; 2182 return 0;
2238} 2183}
2239 2184
@@ -2349,19 +2294,12 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2349 2294
2350static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) 2295static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2351{ 2296{
2352 int ret; 2297 if (!hwmgr->avfs_supported)
2353 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2354
2355 if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
2356 return 0; 2298 return 0;
2357 2299
2358 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); 2300 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2359
2360 if (!ret)
2361 /* If this param is not changed, this function could fire unnecessarily */
2362 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
2363 2301
2364 return ret; 2302 return 0;
2365} 2303}
2366 2304
2367static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) 2305static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
@@ -2688,29 +2626,100 @@ static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
2688 ? true : false; 2626 ? true : false;
2689} 2627}
2690 2628
2691static int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, 2629static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
2692 struct amd_pp_profile *request) 2630 void *profile_setting)
2693{ 2631{
2632 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2694 struct fiji_smumgr *smu_data = (struct fiji_smumgr *) 2633 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
2695 (hwmgr->smu_backend); 2634 (hwmgr->smu_backend);
2635 struct profile_mode_setting *setting;
2696 struct SMU73_Discrete_GraphicsLevel *levels = 2636 struct SMU73_Discrete_GraphicsLevel *levels =
2697 smu_data->smc_state_table.GraphicsLevel; 2637 smu_data->smc_state_table.GraphicsLevel;
2698 uint32_t array = smu_data->smu7_data.dpm_table_start + 2638 uint32_t array = smu_data->smu7_data.dpm_table_start +
2699 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); 2639 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
2700 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * 2640
2701 SMU73_MAX_LEVELS_GRAPHICS; 2641 uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
2642 offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
2643 struct SMU73_Discrete_MemoryLevel *mclk_levels =
2644 smu_data->smc_state_table.MemoryLevel;
2702 uint32_t i; 2645 uint32_t i;
2646 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2703 2647
2704 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 2648 if (profile_setting == NULL)
2705 levels[i].ActivityLevel = 2649 return -EINVAL;
2706 cpu_to_be16(request->activity_threshold); 2650
2707 levels[i].EnabledForActivity = 1; 2651 setting = (struct profile_mode_setting *)profile_setting;
2708 levels[i].UpHyst = request->up_hyst; 2652
2709 levels[i].DownHyst = request->down_hyst; 2653 if (setting->bupdate_sclk) {
2654 if (!data->sclk_dpm_key_disabled)
2655 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
2656 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2657 if (levels[i].ActivityLevel !=
2658 cpu_to_be16(setting->sclk_activity)) {
2659 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2660
2661 clk_activity_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i)
2662 + offsetof(SMU73_Discrete_GraphicsLevel, ActivityLevel);
2663 offset = clk_activity_offset & ~0x3;
2664 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2665 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2666 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2667
2668 }
2669 if (levels[i].UpHyst != setting->sclk_up_hyst ||
2670 levels[i].DownHyst != setting->sclk_down_hyst) {
2671 levels[i].UpHyst = setting->sclk_up_hyst;
2672 levels[i].DownHyst = setting->sclk_down_hyst;
2673 up_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i)
2674 + offsetof(SMU73_Discrete_GraphicsLevel, UpHyst);
2675 down_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i)
2676 + offsetof(SMU73_Discrete_GraphicsLevel, DownHyst);
2677 offset = up_hyst_offset & ~0x3;
2678 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2679 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
2680 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
2681 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2682 }
2683 }
2684 if (!data->sclk_dpm_key_disabled)
2685 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
2710 } 2686 }
2711 2687
2712 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, 2688 if (setting->bupdate_mclk) {
2713 array_size, SMC_RAM_END); 2689 if (!data->mclk_dpm_key_disabled)
2690 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
2691 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2692 if (mclk_levels[i].ActivityLevel !=
2693 cpu_to_be16(setting->mclk_activity)) {
2694 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2695
2696 clk_activity_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i)
2697 + offsetof(SMU73_Discrete_MemoryLevel, ActivityLevel);
2698 offset = clk_activity_offset & ~0x3;
2699 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2700 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2701 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2702
2703 }
2704 if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
2705 mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
2706 mclk_levels[i].UpHyst = setting->mclk_up_hyst;
2707 mclk_levels[i].DownHyst = setting->mclk_down_hyst;
2708 up_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i)
2709 + offsetof(SMU73_Discrete_MemoryLevel, UpHyst);
2710 down_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i)
2711 + offsetof(SMU73_Discrete_MemoryLevel, DownHyst);
2712 offset = up_hyst_offset & ~0x3;
2713 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2714 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
2715 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
2716 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2717 }
2718 }
2719 if (!data->mclk_dpm_key_disabled)
2720 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
2721 }
2722 return 0;
2714} 2723}
2715 2724
2716const struct pp_smumgr_func fiji_smu_funcs = { 2725const struct pp_smumgr_func fiji_smu_funcs = {
@@ -2736,6 +2745,6 @@ const struct pp_smumgr_func fiji_smu_funcs = {
2736 .get_mac_definition = fiji_get_mac_definition, 2745 .get_mac_definition = fiji_get_mac_definition,
2737 .initialize_mc_reg_table = fiji_initialize_mc_reg_table, 2746 .initialize_mc_reg_table = fiji_initialize_mc_reg_table,
2738 .is_dpm_running = fiji_is_dpm_running, 2747 .is_dpm_running = fiji_is_dpm_running,
2739 .populate_requested_graphic_levels = fiji_populate_requested_graphic_levels,
2740 .is_hw_avfs_present = fiji_is_hw_avfs_present, 2748 .is_hw_avfs_present = fiji_is_hw_avfs_present,
2749 .update_dpm_settings = fiji_update_dpm_settings,
2741}; 2750};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 279647772578..6d3746268ccf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -43,8 +43,6 @@ struct fiji_smumgr {
43 struct SMU73_Discrete_Ulv ulv_setting; 43 struct SMU73_Discrete_Ulv ulv_setting;
44 struct SMU73_Discrete_PmFuses power_tune_table; 44 struct SMU73_Discrete_PmFuses power_tune_table;
45 const struct fiji_pt_defaults *power_tune_defaults; 45 const struct fiji_pt_defaults *power_tune_defaults;
46 uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
47
48}; 46};
49 47
50#endif 48#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 125312691f75..4e2f62e659ef 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -262,7 +262,6 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
262 262
263static int iceland_smu_init(struct pp_hwmgr *hwmgr) 263static int iceland_smu_init(struct pp_hwmgr *hwmgr)
264{ 264{
265 int i;
266 struct iceland_smumgr *iceland_priv = NULL; 265 struct iceland_smumgr *iceland_priv = NULL;
267 266
268 iceland_priv = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); 267 iceland_priv = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
@@ -272,11 +271,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
272 271
273 hwmgr->smu_backend = iceland_priv; 272 hwmgr->smu_backend = iceland_priv;
274 273
275 if (smu7_init(hwmgr)) 274 if (smu7_init(hwmgr)) {
275 kfree(iceland_priv);
276 return -EINVAL; 276 return -EINVAL;
277 277 }
278 for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
279 iceland_priv->activity_target[i] = 30;
280 278
281 return 0; 279 return 0;
282} 280}
@@ -285,13 +283,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
285static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) 283static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
286{ 284{
287 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); 285 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
288 struct cgs_system_info sys_info = {0}; 286 struct amdgpu_device *adev = hwmgr->adev;
289 uint32_t dev_id; 287 uint32_t dev_id;
290 288
291 sys_info.size = sizeof(struct cgs_system_info); 289 dev_id = adev->pdev->device;
292 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
293 cgs_query_system_info(hwmgr->device, &sys_info);
294 dev_id = (uint32_t)sys_info.value;
295 290
296 switch (dev_id) { 291 switch (dev_id) {
297 case DEVICE_ID_VI_ICELAND_M_6900: 292 case DEVICE_ID_VI_ICELAND_M_6900:
@@ -546,7 +541,7 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
546 541
547 /* SCLK/VDDC Dependency Table has to exist. */ 542 /* SCLK/VDDC Dependency Table has to exist. */
548 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, 543 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
549 "The SCLK/VDDC Dependency Table does not exist.\n", 544 "The SCLK/VDDC Dependency Table does not exist.",
550 return -EINVAL); 545 return -EINVAL);
551 546
552 if (NULL == hwmgr->dyn_state.cac_leakage_table) { 547 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
@@ -898,7 +893,6 @@ static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
898 893
899static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, 894static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
900 uint32_t engine_clock, 895 uint32_t engine_clock,
901 uint16_t sclk_activity_level_threshold,
902 SMU71_Discrete_GraphicsLevel *graphic_level) 896 SMU71_Discrete_GraphicsLevel *graphic_level)
903{ 897{
904 int result; 898 int result;
@@ -924,7 +918,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
924 &graphic_level->MinVddcPhases); 918 &graphic_level->MinVddcPhases);
925 919
926 /* Indicates maximum activity level for this performance level. 50% for now*/ 920 /* Indicates maximum activity level for this performance level. 50% for now*/
927 graphic_level->ActivityLevel = sclk_activity_level_threshold; 921 graphic_level->ActivityLevel = data->current_profile_setting.sclk_activity;
928 922
929 graphic_level->CcPwrDynRm = 0; 923 graphic_level->CcPwrDynRm = 0;
930 graphic_level->CcPwrDynRm1 = 0; 924 graphic_level->CcPwrDynRm1 = 0;
@@ -932,8 +926,8 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
932 graphic_level->EnabledForActivity = 0; 926 graphic_level->EnabledForActivity = 0;
933 /* this level can be used for throttling.*/ 927 /* this level can be used for throttling.*/
934 graphic_level->EnabledForThrottle = 1; 928 graphic_level->EnabledForThrottle = 1;
935 graphic_level->UpHyst = 0; 929 graphic_level->UpHyst = data->current_profile_setting.sclk_up_hyst;
936 graphic_level->DownHyst = 100; 930 graphic_level->DownHyst = data->current_profile_setting.sclk_down_hyst;
937 graphic_level->VoltageDownHyst = 0; 931 graphic_level->VoltageDownHyst = 0;
938 graphic_level->PowerThrottle = 0; 932 graphic_level->PowerThrottle = 0;
939 933
@@ -989,7 +983,6 @@ static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
989 for (i = 0; i < dpm_table->sclk_table.count; i++) { 983 for (i = 0; i < dpm_table->sclk_table.count; i++) {
990 result = iceland_populate_single_graphic_level(hwmgr, 984 result = iceland_populate_single_graphic_level(hwmgr,
991 dpm_table->sclk_table.dpm_levels[i].value, 985 dpm_table->sclk_table.dpm_levels[i].value,
992 (uint16_t)smu_data->activity_target[i],
993 &(smu_data->smc_state_table.GraphicsLevel[i])); 986 &(smu_data->smc_state_table.GraphicsLevel[i]));
994 if (result != 0) 987 if (result != 0)
995 return result; 988 return result;
@@ -1275,12 +1268,12 @@ static int iceland_populate_single_memory_level(
1275 1268
1276 memory_level->EnabledForThrottle = 1; 1269 memory_level->EnabledForThrottle = 1;
1277 memory_level->EnabledForActivity = 0; 1270 memory_level->EnabledForActivity = 0;
1278 memory_level->UpHyst = 0; 1271 memory_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
1279 memory_level->DownHyst = 100; 1272 memory_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
1280 memory_level->VoltageDownHyst = 0; 1273 memory_level->VoltageDownHyst = 0;
1281 1274
1282 /* Indicates maximum activity level for this performance level.*/ 1275 /* Indicates maximum activity level for this performance level.*/
1283 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; 1276 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1284 memory_level->StutterEnable = 0; 1277 memory_level->StutterEnable = 0;
1285 memory_level->StrobeEnable = 0; 1278 memory_level->StrobeEnable = 0;
1286 memory_level->EdcReadEnable = 0; 1279 memory_level->EdcReadEnable = 0;
@@ -1561,7 +1554,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1561 table->MemoryACPILevel.DownHyst = 100; 1554 table->MemoryACPILevel.DownHyst = 100;
1562 table->MemoryACPILevel.VoltageDownHyst = 0; 1555 table->MemoryACPILevel.VoltageDownHyst = 0;
1563 /* Indicates maximum activity level for this performance level.*/ 1556 /* Indicates maximum activity level for this performance level.*/
1564 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); 1557 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1565 1558
1566 table->MemoryACPILevel.StutterEnable = 0; 1559 table->MemoryACPILevel.StutterEnable = 0;
1567 table->MemoryACPILevel.StrobeEnable = 0; 1560 table->MemoryACPILevel.StrobeEnable = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
index 802472530d34..f32c506779c9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -65,7 +65,6 @@ struct iceland_smumgr {
65 const struct iceland_pt_defaults *power_tune_defaults; 65 const struct iceland_pt_defaults *power_tune_defaults;
66 SMU71_Discrete_MCRegisters mc_regs; 66 SMU71_Discrete_MCRegisters mc_regs;
67 struct iceland_mc_reg_table mc_reg_table; 67 struct iceland_mc_reg_table mc_reg_table;
68 uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
69}; 68};
70 69
71#endif 70#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index cdb47657b567..03ec1e59876b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -99,13 +99,13 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
99 int result = 0; 99 int result = 0;
100 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 100 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
101 101
102 if (0 != smu_data->avfs.avfs_btc_param) { 102 if (0 != smu_data->avfs_btc_param) {
103 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { 103 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
104 pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); 104 pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
105 result = -1; 105 result = -1;
106 } 106 }
107 } 107 }
108 if (smu_data->avfs.avfs_btc_param > 1) { 108 if (smu_data->avfs_btc_param > 1) {
109 /* Soft-Reset to reset the engine before loading uCode */ 109 /* Soft-Reset to reset the engine before loading uCode */
110 /* halt */ 110 /* halt */
111 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); 111 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
@@ -173,46 +173,25 @@ static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
173 173
174 174
175static int 175static int
176polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT) 176polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr)
177{ 177{
178 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 178 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
179 179
180 switch (smu_data->avfs.avfs_btc_status) { 180 PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
181 case AVFS_BTC_COMPLETED_PREVIOUSLY: 181 "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
182 break; 182 return -EINVAL);
183
184 case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
185
186 smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
187 PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
188 "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
189 return -EINVAL);
190
191 if (smu_data->avfs.avfs_btc_param > 1) {
192 pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
193 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
194 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
195 "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
196 return -EINVAL);
197 }
198
199 smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
200 PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
201 "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
202 return -EINVAL);
203 smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
204 break;
205 183
206 case AVFS_BTC_DISABLED: 184 if (smu_data->avfs_btc_param > 1) {
207 case AVFS_BTC_ENABLEAVFS: 185 pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
208 case AVFS_BTC_NOTSUPPORTED: 186 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
209 break; 187 "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
210 188 return -EINVAL);
211 default:
212 pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status);
213 break;
214 } 189 }
215 190
191 PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
192 "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
193 return -EINVAL);
194
216 return 0; 195 return 0;
217} 196}
218 197
@@ -312,11 +291,10 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
312{ 291{
313 int result = 0; 292 int result = 0;
314 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); 293 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
315 bool SMU_VFT_INTACT;
316 294
317 /* Only start SMC if SMC RAM is not running */ 295 /* Only start SMC if SMC RAM is not running */
318 if (!smu7_is_smc_ram_running(hwmgr)) { 296 if (!(smu7_is_smc_ram_running(hwmgr)
319 SMU_VFT_INTACT = false; 297 || cgs_is_virtualization_enabled(hwmgr->device))) {
320 smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); 298 smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
321 smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); 299 smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
322 300
@@ -337,11 +315,9 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
337 if (result != 0) 315 if (result != 0)
338 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); 316 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
339 317
340 polaris10_avfs_event_mgr(hwmgr, true); 318 polaris10_avfs_event_mgr(hwmgr);
341 } else 319 }
342 SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
343 320
344 polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
345 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ 321 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
346 smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), 322 smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
347 &(smu_data->smu7_data.soft_regs_start), 0x40000); 323 &(smu_data->smu7_data.soft_regs_start), 0x40000);
@@ -366,7 +342,6 @@ static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
366static int polaris10_smu_init(struct pp_hwmgr *hwmgr) 342static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
367{ 343{
368 struct polaris10_smumgr *smu_data; 344 struct polaris10_smumgr *smu_data;
369 int i;
370 345
371 smu_data = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL); 346 smu_data = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
372 if (smu_data == NULL) 347 if (smu_data == NULL)
@@ -374,11 +349,10 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
374 349
375 hwmgr->smu_backend = smu_data; 350 hwmgr->smu_backend = smu_data;
376 351
377 if (smu7_init(hwmgr)) 352 if (smu7_init(hwmgr)) {
353 kfree(smu_data);
378 return -EINVAL; 354 return -EINVAL;
379 355 }
380 for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
381 smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
382 356
383 return 0; 357 return 0;
384} 358}
@@ -938,8 +912,7 @@ static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
938} 912}
939 913
940static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, 914static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
941 uint32_t clock, uint16_t sclk_al_threshold, 915 uint32_t clock, struct SMU74_Discrete_GraphicsLevel *level)
942 struct SMU74_Discrete_GraphicsLevel *level)
943{ 916{
944 int result; 917 int result;
945 /* PP_Clocks minClocks; */ 918 /* PP_Clocks minClocks; */
@@ -948,26 +921,32 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
948 struct phm_ppt_v1_information *table_info = 921 struct phm_ppt_v1_information *table_info =
949 (struct phm_ppt_v1_information *)(hwmgr->pptable); 922 (struct phm_ppt_v1_information *)(hwmgr->pptable);
950 SMU_SclkSetting curr_sclk_setting = { 0 }; 923 SMU_SclkSetting curr_sclk_setting = { 0 };
924 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
951 925
952 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting); 926 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
953 927
928 if (hwmgr->od_enabled)
929 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
930 else
931 vdd_dep_table = table_info->vdd_dep_on_sclk;
932
954 /* populate graphics levels */ 933 /* populate graphics levels */
955 result = polaris10_get_dependency_volt_by_clk(hwmgr, 934 result = polaris10_get_dependency_volt_by_clk(hwmgr,
956 table_info->vdd_dep_on_sclk, clock, 935 vdd_dep_table, clock,
957 &level->MinVoltage, &mvdd); 936 &level->MinVoltage, &mvdd);
958 937
959 PP_ASSERT_WITH_CODE((0 == result), 938 PP_ASSERT_WITH_CODE((0 == result),
960 "can not find VDDC voltage value for " 939 "can not find VDDC voltage value for "
961 "VDDC engine clock dependency table", 940 "VDDC engine clock dependency table",
962 return result); 941 return result);
963 level->ActivityLevel = sclk_al_threshold; 942 level->ActivityLevel = data->current_profile_setting.sclk_activity;
964 943
965 level->CcPwrDynRm = 0; 944 level->CcPwrDynRm = 0;
966 level->CcPwrDynRm1 = 0; 945 level->CcPwrDynRm1 = 0;
967 level->EnabledForActivity = 0; 946 level->EnabledForActivity = 0;
968 level->EnabledForThrottle = 1; 947 level->EnabledForThrottle = 1;
969 level->UpHyst = 10; 948 level->UpHyst = data->current_profile_setting.sclk_up_hyst;
970 level->DownHyst = 0; 949 level->DownHyst = data->current_profile_setting.sclk_down_hyst;
971 level->VoltageDownHyst = 0; 950 level->VoltageDownHyst = 0;
972 level->PowerThrottle = 0; 951 level->PowerThrottle = 0;
973 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; 952 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
@@ -1031,7 +1010,6 @@ static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1031 1010
1032 result = polaris10_populate_single_graphic_level(hwmgr, 1011 result = polaris10_populate_single_graphic_level(hwmgr,
1033 dpm_table->sclk_table.dpm_levels[i].value, 1012 dpm_table->sclk_table.dpm_levels[i].value,
1034 (uint16_t)smu_data->activity_target[i],
1035 &(smu_data->smc_state_table.GraphicsLevel[i])); 1013 &(smu_data->smc_state_table.GraphicsLevel[i]));
1036 if (result) 1014 if (result)
1037 return result; 1015 return result;
@@ -1107,12 +1085,18 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1107 int result = 0; 1085 int result = 0;
1108 struct cgs_display_info info = {0, 0, NULL}; 1086 struct cgs_display_info info = {0, 0, NULL};
1109 uint32_t mclk_stutter_mode_threshold = 40000; 1087 uint32_t mclk_stutter_mode_threshold = 40000;
1088 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
1110 1089
1111 cgs_get_active_displays_info(hwmgr->device, &info); 1090 cgs_get_active_displays_info(hwmgr->device, &info);
1112 1091
1113 if (table_info->vdd_dep_on_mclk) { 1092 if (hwmgr->od_enabled)
1093 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
1094 else
1095 vdd_dep_table = table_info->vdd_dep_on_mclk;
1096
1097 if (vdd_dep_table) {
1114 result = polaris10_get_dependency_volt_by_clk(hwmgr, 1098 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1115 table_info->vdd_dep_on_mclk, clock, 1099 vdd_dep_table, clock,
1116 &mem_level->MinVoltage, &mem_level->MinMvdd); 1100 &mem_level->MinVoltage, &mem_level->MinMvdd);
1117 PP_ASSERT_WITH_CODE((0 == result), 1101 PP_ASSERT_WITH_CODE((0 == result),
1118 "can not find MinVddc voltage value from memory " 1102 "can not find MinVddc voltage value from memory "
@@ -1122,10 +1106,10 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1122 mem_level->MclkFrequency = clock; 1106 mem_level->MclkFrequency = clock;
1123 mem_level->EnabledForThrottle = 1; 1107 mem_level->EnabledForThrottle = 1;
1124 mem_level->EnabledForActivity = 0; 1108 mem_level->EnabledForActivity = 0;
1125 mem_level->UpHyst = 0; 1109 mem_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
1126 mem_level->DownHyst = 100; 1110 mem_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
1127 mem_level->VoltageDownHyst = 0; 1111 mem_level->VoltageDownHyst = 0;
1128 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; 1112 mem_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1129 mem_level->StutterEnable = false; 1113 mem_level->StutterEnable = false;
1130 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1114 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1131 1115
@@ -1306,7 +1290,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1306 table->MemoryACPILevel.DownHyst = 100; 1290 table->MemoryACPILevel.DownHyst = 100;
1307 table->MemoryACPILevel.VoltageDownHyst = 0; 1291 table->MemoryACPILevel.VoltageDownHyst = 0;
1308 table->MemoryACPILevel.ActivityLevel = 1292 table->MemoryACPILevel.ActivityLevel =
1309 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); 1293 PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1310 1294
1311 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); 1295 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1312 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); 1296 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
@@ -1652,7 +1636,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1652 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1636 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1653 PHM_PlatformCaps_ClockStretcher); 1637 PHM_PlatformCaps_ClockStretcher);
1654 PP_ASSERT_WITH_CODE(false, 1638 PP_ASSERT_WITH_CODE(false,
1655 "Stretch Amount in PPTable not supported\n", 1639 "Stretch Amount in PPTable not supported",
1656 return -EINVAL); 1640 return -EINVAL);
1657 } 1641 }
1658 1642
@@ -1726,8 +1710,8 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1726 table_info->vdd_dep_on_sclk; 1710 table_info->vdd_dep_on_sclk;
1727 1711
1728 1712
1729 if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) 1713 if (!hwmgr->avfs_supported)
1730 return result; 1714 return 0;
1731 1715
1732 result = atomctrl_get_avfs_information(hwmgr, &avfs_params); 1716 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1733 1717
@@ -1834,42 +1818,6 @@ static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
1834 1818
1835} 1819}
1836 1820
1837static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
1838{
1839 struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1840 struct SMU74_Discrete_GraphicsLevel *levels =
1841 data->smc_state_table.GraphicsLevel;
1842 unsigned min_level = 1;
1843
1844 hwmgr->default_gfx_power_profile.activity_threshold =
1845 be16_to_cpu(levels[0].ActivityLevel);
1846 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
1847 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
1848 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
1849
1850 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
1851 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
1852
1853 /* Workaround compute SDMA instability: disable lowest SCLK
1854 * DPM level. Optimize compute power profile: Use only highest
1855 * 2 power levels (if more than 2 are available), Hysteresis:
1856 * 0ms up, 5ms down
1857 */
1858 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
1859 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
1860 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
1861 min_level = 1;
1862 else
1863 min_level = 0;
1864 hwmgr->default_compute_power_profile.min_sclk =
1865 be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
1866 hwmgr->default_compute_power_profile.up_hyst = 0;
1867 hwmgr->default_compute_power_profile.down_hyst = 5;
1868
1869 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
1870 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
1871}
1872
1873static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) 1821static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
1874{ 1822{
1875 int result; 1823 int result;
@@ -1991,7 +1939,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
1991 result = polaris10_populate_vr_config(hwmgr, table); 1939 result = polaris10_populate_vr_config(hwmgr, table);
1992 PP_ASSERT_WITH_CODE(0 == result, 1940 PP_ASSERT_WITH_CODE(0 == result,
1993 "Failed to populate VRConfig setting!", return result); 1941 "Failed to populate VRConfig setting!", return result);
1994 1942 hw_data->vr_config = table->VRConfig;
1995 table->ThermGpio = 17; 1943 table->ThermGpio = 17;
1996 table->SclkStepSize = 0x4000; 1944 table->SclkStepSize = 0x4000;
1997 1945
@@ -2084,8 +2032,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
2084 PP_ASSERT_WITH_CODE(0 == result, 2032 PP_ASSERT_WITH_CODE(0 == result,
2085 "Failed to populate PM fuses to SMC memory!", return result); 2033 "Failed to populate PM fuses to SMC memory!", return result);
2086 2034
2087 polaris10_save_default_power_profile(hwmgr);
2088
2089 return 0; 2035 return 0;
2090} 2036}
2091 2037
@@ -2102,24 +2048,17 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2102 2048
2103int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) 2049int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2104{ 2050{
2105 int ret;
2106 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2107 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2051 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2108 2052
2109 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) 2053 if (!hwmgr->avfs_supported)
2110 return 0; 2054 return 0;
2111 2055
2112 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2056 smum_send_msg_to_smc_with_parameter(hwmgr,
2113 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); 2057 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
2114 2058
2115 ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ? 2059 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2116 0 : -1;
2117
2118 if (!ret)
2119 /* If this param is not changed, this function could fire unnecessarily */
2120 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
2121 2060
2122 return ret; 2061 return 0;
2123} 2062}
2124 2063
2125static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) 2064static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
@@ -2544,29 +2483,100 @@ static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
2544 ? true : false; 2483 ? true : false;
2545} 2484}
2546 2485
2547static int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, 2486static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
2548 struct amd_pp_profile *request) 2487 void *profile_setting)
2549{ 2488{
2489 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2550 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *) 2490 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
2551 (hwmgr->smu_backend); 2491 (hwmgr->smu_backend);
2492 struct profile_mode_setting *setting;
2552 struct SMU74_Discrete_GraphicsLevel *levels = 2493 struct SMU74_Discrete_GraphicsLevel *levels =
2553 smu_data->smc_state_table.GraphicsLevel; 2494 smu_data->smc_state_table.GraphicsLevel;
2554 uint32_t array = smu_data->smu7_data.dpm_table_start + 2495 uint32_t array = smu_data->smu7_data.dpm_table_start +
2555 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); 2496 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
2556 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) * 2497
2557 SMU74_MAX_LEVELS_GRAPHICS; 2498 uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
2499 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
2500 struct SMU74_Discrete_MemoryLevel *mclk_levels =
2501 smu_data->smc_state_table.MemoryLevel;
2558 uint32_t i; 2502 uint32_t i;
2503 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2559 2504
2560 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 2505 if (profile_setting == NULL)
2561 levels[i].ActivityLevel = 2506 return -EINVAL;
2562 cpu_to_be16(request->activity_threshold); 2507
2563 levels[i].EnabledForActivity = 1; 2508 setting = (struct profile_mode_setting *)profile_setting;
2564 levels[i].UpHyst = request->up_hyst; 2509
2565 levels[i].DownHyst = request->down_hyst; 2510 if (setting->bupdate_sclk) {
2511 if (!data->sclk_dpm_key_disabled)
2512 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
2513 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2514 if (levels[i].ActivityLevel !=
2515 cpu_to_be16(setting->sclk_activity)) {
2516 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2517
2518 clk_activity_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
2519 + offsetof(SMU74_Discrete_GraphicsLevel, ActivityLevel);
2520 offset = clk_activity_offset & ~0x3;
2521 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2522 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2523 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2524
2525 }
2526 if (levels[i].UpHyst != setting->sclk_up_hyst ||
2527 levels[i].DownHyst != setting->sclk_down_hyst) {
2528 levels[i].UpHyst = setting->sclk_up_hyst;
2529 levels[i].DownHyst = setting->sclk_down_hyst;
2530 up_hyst_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
2531 + offsetof(SMU74_Discrete_GraphicsLevel, UpHyst);
2532 down_hyst_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
2533 + offsetof(SMU74_Discrete_GraphicsLevel, DownHyst);
2534 offset = up_hyst_offset & ~0x3;
2535 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2536 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
2537 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
2538 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2539 }
2540 }
2541 if (!data->sclk_dpm_key_disabled)
2542 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
2566 } 2543 }
2567 2544
2568 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, 2545 if (setting->bupdate_mclk) {
2569 array_size, SMC_RAM_END); 2546 if (!data->mclk_dpm_key_disabled)
2547 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
2548 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2549 if (mclk_levels[i].ActivityLevel !=
2550 cpu_to_be16(setting->mclk_activity)) {
2551 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2552
2553 clk_activity_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
2554 + offsetof(SMU74_Discrete_MemoryLevel, ActivityLevel);
2555 offset = clk_activity_offset & ~0x3;
2556 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2557 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2558 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2559
2560 }
2561 if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
2562 mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
2563 mclk_levels[i].UpHyst = setting->mclk_up_hyst;
2564 mclk_levels[i].DownHyst = setting->mclk_down_hyst;
2565 up_hyst_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
2566 + offsetof(SMU74_Discrete_MemoryLevel, UpHyst);
2567 down_hyst_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
2568 + offsetof(SMU74_Discrete_MemoryLevel, DownHyst);
2569 offset = up_hyst_offset & ~0x3;
2570 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2571 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
2572 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
2573 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2574 }
2575 }
2576 if (!data->mclk_dpm_key_disabled)
2577 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
2578 }
2579 return 0;
2570} 2580}
2571 2581
2572const struct pp_smumgr_func polaris10_smu_funcs = { 2582const struct pp_smumgr_func polaris10_smu_funcs = {
@@ -2591,6 +2601,6 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
2591 .populate_all_memory_levels = polaris10_populate_all_memory_levels, 2601 .populate_all_memory_levels = polaris10_populate_all_memory_levels,
2592 .get_mac_definition = polaris10_get_mac_definition, 2602 .get_mac_definition = polaris10_get_mac_definition,
2593 .is_dpm_running = polaris10_is_dpm_running, 2603 .is_dpm_running = polaris10_is_dpm_running,
2594 .populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels,
2595 .is_hw_avfs_present = polaris10_is_hw_avfs_present, 2604 .is_hw_avfs_present = polaris10_is_hw_avfs_present,
2605 .update_dpm_settings = polaris10_update_dpm_settings,
2596}; 2606};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index 5e19c24b0561..1ec425df9eda 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -59,7 +59,6 @@ struct polaris10_smumgr {
59 struct SMU74_Discrete_PmFuses power_tune_table; 59 struct SMU74_Discrete_PmFuses power_tune_table;
60 struct polaris10_range_table range_table[NUM_SCLK_RANGE]; 60 struct polaris10_range_table range_table[NUM_SCLK_RANGE];
61 const struct polaris10_pt_defaults *power_tune_defaults; 61 const struct polaris10_pt_defaults *power_tune_defaults;
62 uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
63 uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; 62 uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
64}; 63};
65 64
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
deleted file mode 100644
index 2d662b44af54..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ /dev/null
@@ -1,406 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "smumgr.h"
25#include "rv_inc.h"
26#include "pp_soc15.h"
27#include "rv_smumgr.h"
28#include "ppatomctrl.h"
29#include "rv_ppsmc.h"
30#include "smu10_driver_if.h"
31#include "smu10.h"
32#include "ppatomctrl.h"
33#include "pp_debug.h"
34#include "smu_ucode_xfer_vi.h"
35#include "smu7_smumgr.h"
36
37#define VOLTAGE_SCALE 4
38
39#define BUFFER_SIZE 80000
40#define MAX_STRING_SIZE 15
41#define BUFFER_SIZETWO 131072
42
43#define MP0_Public 0x03800000
44#define MP0_SRAM 0x03900000
45#define MP1_Public 0x03b00000
46#define MP1_SRAM 0x03c00004
47
48#define smnMP1_FIRMWARE_FLAGS 0x3010028
49
50
51bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
52{
53 uint32_t mp1_fw_flags, reg;
54
55 reg = soc15_get_register_offset(NBIF_HWID, 0,
56 mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
57
58 cgs_write_register(hwmgr->device, reg,
59 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
60
61 reg = soc15_get_register_offset(NBIF_HWID, 0,
62 mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
63
64 mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
65
66 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
67 return true;
68
69 return false;
70}
71
72static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
73{
74 uint32_t reg;
75
76 if (!rv_is_smc_ram_running(hwmgr))
77 return -EINVAL;
78
79 reg = soc15_get_register_offset(MP1_HWID, 0,
80 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
81
82 phm_wait_for_register_unequal(hwmgr, reg,
83 0, MP1_C2PMSG_90__CONTENT_MASK);
84
85 return cgs_read_register(hwmgr->device, reg);
86}
87
88int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
89 uint16_t msg)
90{
91 uint32_t reg;
92
93 if (!rv_is_smc_ram_running(hwmgr))
94 return -EINVAL;
95
96 reg = soc15_get_register_offset(MP1_HWID, 0,
97 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
98 cgs_write_register(hwmgr->device, reg, msg);
99
100 return 0;
101}
102
103int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
104{
105 uint32_t reg;
106
107 reg = soc15_get_register_offset(MP1_HWID, 0,
108 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
109
110 *arg = cgs_read_register(hwmgr->device, reg);
111
112 return 0;
113}
114
115int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
116{
117 uint32_t reg;
118
119 rv_wait_for_response(hwmgr);
120
121 reg = soc15_get_register_offset(MP1_HWID, 0,
122 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
123 cgs_write_register(hwmgr->device, reg, 0);
124
125 rv_send_msg_to_smc_without_waiting(hwmgr, msg);
126
127 if (rv_wait_for_response(hwmgr) == 0)
128 printk("Failed to send Message %x.\n", msg);
129
130 return 0;
131}
132
133
134int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
135 uint16_t msg, uint32_t parameter)
136{
137 uint32_t reg;
138
139 rv_wait_for_response(hwmgr);
140
141 reg = soc15_get_register_offset(MP1_HWID, 0,
142 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
143 cgs_write_register(hwmgr->device, reg, 0);
144
145 reg = soc15_get_register_offset(MP1_HWID, 0,
146 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
147 cgs_write_register(hwmgr->device, reg, parameter);
148
149 rv_send_msg_to_smc_without_waiting(hwmgr, msg);
150
151
152 if (rv_wait_for_response(hwmgr) == 0)
153 printk("Failed to send Message %x.\n", msg);
154
155 return 0;
156}
157
158int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
159 uint8_t *table, int16_t table_id)
160{
161 struct rv_smumgr *priv =
162 (struct rv_smumgr *)(hwmgr->smu_backend);
163
164 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
165 "Invalid SMU Table ID!", return -EINVAL;);
166 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
167 "Invalid SMU Table version!", return -EINVAL;);
168 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
169 "Invalid SMU Table Length!", return -EINVAL;);
170 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
171 PPSMC_MSG_SetDriverDramAddrHigh,
172 priv->smu_tables.entry[table_id].table_addr_high) == 0,
173 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
174 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
175 PPSMC_MSG_SetDriverDramAddrLow,
176 priv->smu_tables.entry[table_id].table_addr_low) == 0,
177 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
178 return -EINVAL;);
179 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
180 PPSMC_MSG_TransferTableSmu2Dram,
181 priv->smu_tables.entry[table_id].table_id) == 0,
182 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
183 return -EINVAL;);
184
185 memcpy(table, priv->smu_tables.entry[table_id].table,
186 priv->smu_tables.entry[table_id].size);
187
188 return 0;
189}
190
191int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
192 uint8_t *table, int16_t table_id)
193{
194 struct rv_smumgr *priv =
195 (struct rv_smumgr *)(hwmgr->smu_backend);
196
197 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
198 "Invalid SMU Table ID!", return -EINVAL;);
199 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
200 "Invalid SMU Table version!", return -EINVAL;);
201 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
202 "Invalid SMU Table Length!", return -EINVAL;);
203
204 memcpy(priv->smu_tables.entry[table_id].table, table,
205 priv->smu_tables.entry[table_id].size);
206
207 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
208 PPSMC_MSG_SetDriverDramAddrHigh,
209 priv->smu_tables.entry[table_id].table_addr_high) == 0,
210 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
211 return -EINVAL;);
212 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
213 PPSMC_MSG_SetDriverDramAddrLow,
214 priv->smu_tables.entry[table_id].table_addr_low) == 0,
215 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
216 return -EINVAL;);
217 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
218 PPSMC_MSG_TransferTableDram2Smu,
219 priv->smu_tables.entry[table_id].table_id) == 0,
220 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
221 return -EINVAL;);
222
223 return 0;
224}
225
226static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
227{
228 uint32_t smc_driver_if_version;
229
230 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
231 PPSMC_MSG_GetDriverIfVersion),
232 "Attempt to get SMC IF Version Number Failed!",
233 return -EINVAL);
234 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
235 &smc_driver_if_version),
236 "Attempt to read SMC IF Version Number Failed!",
237 return -EINVAL);
238
239 if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION)
240 return -EINVAL;
241
242 return 0;
243}
244
245/* sdma is disabled by default in vbios, need to re-enable in driver */
246static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
247{
248 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
249 PPSMC_MSG_PowerUpSdma),
250 "Attempt to power up sdma Failed!",
251 return -EINVAL);
252
253 return 0;
254}
255
256static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
257{
258 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
259 PPSMC_MSG_PowerDownSdma),
260 "Attempt to power down sdma Failed!",
261 return -EINVAL);
262
263 return 0;
264}
265
266/* vcn is disabled by default in vbios, need to re-enable in driver */
267static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
268{
269 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
270 PPSMC_MSG_PowerUpVcn, 0),
271 "Attempt to power up vcn Failed!",
272 return -EINVAL);
273
274 return 0;
275}
276
277static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
278{
279 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
280 PPSMC_MSG_PowerDownVcn, 0),
281 "Attempt to power down vcn Failed!",
282 return -EINVAL);
283
284 return 0;
285}
286
287static int rv_smu_fini(struct pp_hwmgr *hwmgr)
288{
289 struct rv_smumgr *priv =
290 (struct rv_smumgr *)(hwmgr->smu_backend);
291
292 if (priv) {
293 rv_smc_disable_sdma(hwmgr);
294 rv_smc_disable_vcn(hwmgr);
295 cgs_free_gpu_mem(hwmgr->device,
296 priv->smu_tables.entry[WMTABLE].handle);
297 cgs_free_gpu_mem(hwmgr->device,
298 priv->smu_tables.entry[CLOCKTABLE].handle);
299 kfree(hwmgr->smu_backend);
300 hwmgr->smu_backend = NULL;
301 }
302
303 return 0;
304}
305
306static int rv_start_smu(struct pp_hwmgr *hwmgr)
307{
308 struct cgs_firmware_info info = {0};
309
310 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
311 rv_read_arg_from_smc(hwmgr, &hwmgr->smu_version);
312 info.version = hwmgr->smu_version >> 8;
313
314 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
315
316 if (rv_verify_smc_interface(hwmgr))
317 return -EINVAL;
318 if (rv_smc_enable_sdma(hwmgr))
319 return -EINVAL;
320 if (rv_smc_enable_vcn(hwmgr))
321 return -EINVAL;
322
323 return 0;
324}
325
326static int rv_smu_init(struct pp_hwmgr *hwmgr)
327{
328 struct rv_smumgr *priv;
329 uint64_t mc_addr;
330 void *kaddr = NULL;
331 unsigned long handle;
332
333 priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
334
335 if (!priv)
336 return -ENOMEM;
337
338 hwmgr->smu_backend = priv;
339
340 /* allocate space for watermarks table */
341 smu_allocate_memory(hwmgr->device,
342 sizeof(Watermarks_t),
343 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
344 PAGE_SIZE,
345 &mc_addr,
346 &kaddr,
347 &handle);
348
349 PP_ASSERT_WITH_CODE(kaddr,
350 "[rv_smu_init] Out of memory for wmtable.",
351 kfree(hwmgr->smu_backend);
352 hwmgr->smu_backend = NULL;
353 return -EINVAL);
354
355 priv->smu_tables.entry[WMTABLE].version = 0x01;
356 priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
357 priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
358 priv->smu_tables.entry[WMTABLE].table_addr_high =
359 smu_upper_32_bits(mc_addr);
360 priv->smu_tables.entry[WMTABLE].table_addr_low =
361 smu_lower_32_bits(mc_addr);
362 priv->smu_tables.entry[WMTABLE].table = kaddr;
363 priv->smu_tables.entry[WMTABLE].handle = handle;
364
365 /* allocate space for watermarks table */
366 smu_allocate_memory(hwmgr->device,
367 sizeof(DpmClocks_t),
368 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
369 PAGE_SIZE,
370 &mc_addr,
371 &kaddr,
372 &handle);
373
374 PP_ASSERT_WITH_CODE(kaddr,
375 "[rv_smu_init] Out of memory for CLOCKTABLE.",
376 cgs_free_gpu_mem(hwmgr->device,
377 (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
378 kfree(hwmgr->smu_backend);
379 hwmgr->smu_backend = NULL;
380 return -EINVAL);
381
382 priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
383 priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
384 priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
385 priv->smu_tables.entry[CLOCKTABLE].table_addr_high =
386 smu_upper_32_bits(mc_addr);
387 priv->smu_tables.entry[CLOCKTABLE].table_addr_low =
388 smu_lower_32_bits(mc_addr);
389 priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
390 priv->smu_tables.entry[CLOCKTABLE].handle = handle;
391
392 return 0;
393}
394
395const struct pp_smumgr_func rv_smu_funcs = {
396 .smu_init = &rv_smu_init,
397 .smu_fini = &rv_smu_fini,
398 .start_smu = &rv_start_smu,
399 .request_smu_load_specific_fw = NULL,
400 .send_msg_to_smc = &rv_send_msg_to_smc,
401 .send_msg_to_smc_with_parameter = &rv_send_msg_to_smc_with_parameter,
402 .download_pptable_settings = NULL,
403 .upload_pptable_settings = NULL,
404};
405
406
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
new file mode 100644
index 000000000000..bc53f2beda30
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -0,0 +1,344 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "smumgr.h"
25#include "smu10_inc.h"
26#include "pp_soc15.h"
27#include "smu10_smumgr.h"
28#include "ppatomctrl.h"
29#include "rv_ppsmc.h"
30#include "smu10_driver_if.h"
31#include "smu10.h"
32#include "ppatomctrl.h"
33#include "pp_debug.h"
34
35
36#define VOLTAGE_SCALE 4
37
38#define BUFFER_SIZE 80000
39#define MAX_STRING_SIZE 15
40#define BUFFER_SIZETWO 131072
41
42#define MP0_Public 0x03800000
43#define MP0_SRAM 0x03900000
44#define MP1_Public 0x03b00000
45#define MP1_SRAM 0x03c00004
46
47#define smnMP1_FIRMWARE_FLAGS 0x3010028
48
49
50static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
51{
52 uint32_t reg;
53
54 reg = soc15_get_register_offset(MP1_HWID, 0,
55 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
56
57 phm_wait_for_register_unequal(hwmgr, reg,
58 0, MP1_C2PMSG_90__CONTENT_MASK);
59
60 return cgs_read_register(hwmgr->device, reg);
61}
62
63static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
64 uint16_t msg)
65{
66 uint32_t reg;
67
68 reg = soc15_get_register_offset(MP1_HWID, 0,
69 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
70 cgs_write_register(hwmgr->device, reg, msg);
71
72 return 0;
73}
74
75static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
76{
77 uint32_t reg;
78
79 reg = soc15_get_register_offset(MP1_HWID, 0,
80 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
81
82 return cgs_read_register(hwmgr->device, reg);
83}
84
85static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{
87 uint32_t reg;
88
89 smu10_wait_for_response(hwmgr);
90
91 reg = soc15_get_register_offset(MP1_HWID, 0,
92 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
93 cgs_write_register(hwmgr->device, reg, 0);
94
95 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
96
97 if (smu10_wait_for_response(hwmgr) == 0)
98 printk("Failed to send Message %x.\n", msg);
99
100 return 0;
101}
102
103
104static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
105 uint16_t msg, uint32_t parameter)
106{
107 uint32_t reg;
108
109 smu10_wait_for_response(hwmgr);
110
111 reg = soc15_get_register_offset(MP1_HWID, 0,
112 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
113 cgs_write_register(hwmgr->device, reg, 0);
114
115 reg = soc15_get_register_offset(MP1_HWID, 0,
116 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
117 cgs_write_register(hwmgr->device, reg, parameter);
118
119 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
120
121
122 if (smu10_wait_for_response(hwmgr) == 0)
123 printk("Failed to send Message %x.\n", msg);
124
125 return 0;
126}
127
128static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
129 uint8_t *table, int16_t table_id)
130{
131 struct smu10_smumgr *priv =
132 (struct smu10_smumgr *)(hwmgr->smu_backend);
133
134 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
135 "Invalid SMU Table ID!", return -EINVAL;);
136 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
137 "Invalid SMU Table version!", return -EINVAL;);
138 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
139 "Invalid SMU Table Length!", return -EINVAL;);
140 smu10_send_msg_to_smc_with_parameter(hwmgr,
141 PPSMC_MSG_SetDriverDramAddrHigh,
142 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
143 smu10_send_msg_to_smc_with_parameter(hwmgr,
144 PPSMC_MSG_SetDriverDramAddrLow,
145 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
146 smu10_send_msg_to_smc_with_parameter(hwmgr,
147 PPSMC_MSG_TransferTableSmu2Dram,
148 priv->smu_tables.entry[table_id].table_id);
149
150 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
151 priv->smu_tables.entry[table_id].size);
152
153 return 0;
154}
155
156static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
157 uint8_t *table, int16_t table_id)
158{
159 struct smu10_smumgr *priv =
160 (struct smu10_smumgr *)(hwmgr->smu_backend);
161
162 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
163 "Invalid SMU Table ID!", return -EINVAL;);
164 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
165 "Invalid SMU Table version!", return -EINVAL;);
166 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
167 "Invalid SMU Table Length!", return -EINVAL;);
168
169 memcpy(priv->smu_tables.entry[table_id].table, table,
170 priv->smu_tables.entry[table_id].size);
171
172 smu10_send_msg_to_smc_with_parameter(hwmgr,
173 PPSMC_MSG_SetDriverDramAddrHigh,
174 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
175 smu10_send_msg_to_smc_with_parameter(hwmgr,
176 PPSMC_MSG_SetDriverDramAddrLow,
177 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
178 smu10_send_msg_to_smc_with_parameter(hwmgr,
179 PPSMC_MSG_TransferTableDram2Smu,
180 priv->smu_tables.entry[table_id].table_id);
181
182 return 0;
183}
184
185static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
186{
187 uint32_t smc_driver_if_version;
188
189 smu10_send_msg_to_smc(hwmgr,
190 PPSMC_MSG_GetDriverIfVersion);
191 smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
192
193 if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) {
194 pr_err("Attempt to read SMC IF Version Number Failed!\n");
195 return -EINVAL;
196 }
197
198 return 0;
199}
200
201/* sdma is disabled by default in vbios, need to re-enable in driver */
202static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
203{
204 smu10_send_msg_to_smc(hwmgr,
205 PPSMC_MSG_PowerUpSdma);
206}
207
208static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
209{
210 smu10_send_msg_to_smc(hwmgr,
211 PPSMC_MSG_PowerDownSdma);
212}
213
214/* vcn is disabled by default in vbios, need to re-enable in driver */
215static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
216{
217 smu10_send_msg_to_smc_with_parameter(hwmgr,
218 PPSMC_MSG_PowerUpVcn, 0);
219}
220
221static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
222{
223 smu10_send_msg_to_smc_with_parameter(hwmgr,
224 PPSMC_MSG_PowerDownVcn, 0);
225}
226
227static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
228{
229 struct smu10_smumgr *priv =
230 (struct smu10_smumgr *)(hwmgr->smu_backend);
231
232 if (priv) {
233 smu10_smc_disable_sdma(hwmgr);
234 smu10_smc_disable_vcn(hwmgr);
235 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
236 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
237 &priv->smu_tables.entry[SMU10_WMTABLE].table);
238 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
239 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
240 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
241 kfree(hwmgr->smu_backend);
242 hwmgr->smu_backend = NULL;
243 }
244
245 return 0;
246}
247
248static int smu10_start_smu(struct pp_hwmgr *hwmgr)
249{
250 struct amdgpu_device *adev = hwmgr->adev;
251
252 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
253 hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
254 adev->pm.fw_version = hwmgr->smu_version >> 8;
255
256 if (smu10_verify_smc_interface(hwmgr))
257 return -EINVAL;
258 smu10_smc_enable_sdma(hwmgr);
259 smu10_smc_enable_vcn(hwmgr);
260 return 0;
261}
262
263static int smu10_smu_init(struct pp_hwmgr *hwmgr)
264{
265 struct smu10_smumgr *priv;
266 int r;
267
268 priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL);
269
270 if (!priv)
271 return -ENOMEM;
272
273 hwmgr->smu_backend = priv;
274
275 /* allocate space for watermarks table */
276 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
277 sizeof(Watermarks_t),
278 PAGE_SIZE,
279 AMDGPU_GEM_DOMAIN_VRAM,
280 &priv->smu_tables.entry[SMU10_WMTABLE].handle,
281 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
282 &priv->smu_tables.entry[SMU10_WMTABLE].table);
283
284 if (r)
285 goto err0;
286
287 priv->smu_tables.entry[SMU10_WMTABLE].version = 0x01;
288 priv->smu_tables.entry[SMU10_WMTABLE].size = sizeof(Watermarks_t);
289 priv->smu_tables.entry[SMU10_WMTABLE].table_id = TABLE_WATERMARKS;
290
291 /* allocate space for watermarks table */
292 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
293 sizeof(DpmClocks_t),
294 PAGE_SIZE,
295 AMDGPU_GEM_DOMAIN_VRAM,
296 &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
297 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
298 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
299
300 if (r)
301 goto err1;
302
303 priv->smu_tables.entry[SMU10_CLOCKTABLE].version = 0x01;
304 priv->smu_tables.entry[SMU10_CLOCKTABLE].size = sizeof(DpmClocks_t);
305 priv->smu_tables.entry[SMU10_CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
306
307 return 0;
308
309err1:
310 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
311 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
312 &priv->smu_tables.entry[SMU10_WMTABLE].table);
313err0:
314 kfree(priv);
315 return -EINVAL;
316}
317
318static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
319{
320 int ret;
321
322 if (rw)
323 ret = smu10_copy_table_from_smc(hwmgr, table, table_id);
324 else
325 ret = smu10_copy_table_to_smc(hwmgr, table, table_id);
326
327 return ret;
328}
329
330
331const struct pp_smumgr_func smu10_smu_funcs = {
332 .smu_init = &smu10_smu_init,
333 .smu_fini = &smu10_smu_fini,
334 .start_smu = &smu10_start_smu,
335 .request_smu_load_specific_fw = NULL,
336 .send_msg_to_smc = &smu10_send_msg_to_smc,
337 .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter,
338 .download_pptable_settings = NULL,
339 .upload_pptable_settings = NULL,
340 .get_argument = smu10_read_arg_from_smc,
341 .smc_table_manager = smu10_smc_table_manager,
342};
343
344
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
index caebdbebdcd8..9c2be74a2b2f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
@@ -21,42 +21,30 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef PP_RAVEN_SMUMANAGER_H 24#ifndef PP_SMU10_SMUMANAGER_H
25#define PP_RAVEN_SMUMANAGER_H 25#define PP_SMU10_SMUMANAGER_H
26 26
27#include "rv_ppsmc.h" 27#include "rv_ppsmc.h"
28#include "smu10_driver_if.h" 28#include "smu10_driver_if.h"
29 29
30enum SMU_TABLE_ID { 30#define MAX_SMU_TABLE 2
31 WMTABLE = 0,
32 CLOCKTABLE,
33 MAX_SMU_TABLE,
34};
35 31
36struct smu_table_entry { 32struct smu_table_entry {
37 uint32_t version; 33 uint32_t version;
38 uint32_t size; 34 uint32_t size;
39 uint32_t table_id; 35 uint32_t table_id;
40 uint32_t table_addr_high; 36 uint64_t mc_addr;
41 uint32_t table_addr_low; 37 void *table;
42 uint8_t *table; 38 struct amdgpu_bo *handle;
43 unsigned long handle;
44}; 39};
45 40
46struct smu_table_array { 41struct smu_table_array {
47 struct smu_table_entry entry[MAX_SMU_TABLE]; 42 struct smu_table_entry entry[MAX_SMU_TABLE];
48}; 43};
49 44
50struct rv_smumgr { 45struct smu10_smumgr {
51 struct smu_table_array smu_tables; 46 struct smu_table_array smu_tables;
52}; 47};
53 48
54int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
55bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
56int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
57 uint8_t *table, int16_t table_id);
58int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
59 uint8_t *table, int16_t table_id);
60
61 49
62#endif 50#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 311ff3718618..0399c10d2be0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -369,8 +369,8 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
369 if (!result) { 369 if (!result) {
370 entry->version = info.fw_version; 370 entry->version = info.fw_version;
371 entry->id = (uint16_t)fw_type; 371 entry->id = (uint16_t)fw_type;
372 entry->image_addr_high = smu_upper_32_bits(info.mc_addr); 372 entry->image_addr_high = upper_32_bits(info.mc_addr);
373 entry->image_addr_low = smu_lower_32_bits(info.mc_addr); 373 entry->image_addr_low = lower_32_bits(info.mc_addr);
374 entry->meta_data_addr_high = 0; 374 entry->meta_data_addr_high = 0;
375 entry->meta_data_addr_low = 0; 375 entry->meta_data_addr_low = 0;
376 376
@@ -412,10 +412,10 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
412 if (!cgs_is_virtualization_enabled(hwmgr->device)) { 412 if (!cgs_is_virtualization_enabled(hwmgr->device)) {
413 smu7_send_msg_to_smc_with_parameter(hwmgr, 413 smu7_send_msg_to_smc_with_parameter(hwmgr,
414 PPSMC_MSG_SMU_DRAM_ADDR_HI, 414 PPSMC_MSG_SMU_DRAM_ADDR_HI,
415 smu_data->smu_buffer.mc_addr_high); 415 upper_32_bits(smu_data->smu_buffer.mc_addr));
416 smu7_send_msg_to_smc_with_parameter(hwmgr, 416 smu7_send_msg_to_smc_with_parameter(hwmgr,
417 PPSMC_MSG_SMU_DRAM_ADDR_LO, 417 PPSMC_MSG_SMU_DRAM_ADDR_LO,
418 smu_data->smu_buffer.mc_addr_low); 418 lower_32_bits(smu_data->smu_buffer.mc_addr));
419 } 419 }
420 fw_to_load = UCODE_ID_RLC_G_MASK 420 fw_to_load = UCODE_ID_RLC_G_MASK
421 + UCODE_ID_SDMA0_MASK 421 + UCODE_ID_SDMA0_MASK
@@ -472,8 +472,8 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
472 UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), 472 UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
473 "Failed to Get Firmware Entry.", return -EINVAL); 473 "Failed to Get Firmware Entry.", return -EINVAL);
474 474
475 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); 475 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
476 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); 476 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
477 477
478 if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) 478 if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
479 pr_err("Fail to Request SMU Load uCode"); 479 pr_err("Fail to Request SMU Load uCode");
@@ -585,9 +585,8 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
585int smu7_init(struct pp_hwmgr *hwmgr) 585int smu7_init(struct pp_hwmgr *hwmgr)
586{ 586{
587 struct smu7_smumgr *smu_data; 587 struct smu7_smumgr *smu_data;
588 uint8_t *internal_buf;
589 uint64_t mc_addr = 0; 588 uint64_t mc_addr = 0;
590 589 int r;
591 /* Allocate memory for backend private data */ 590 /* Allocate memory for backend private data */
592 smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 591 smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
593 smu_data->header_buffer.data_size = 592 smu_data->header_buffer.data_size =
@@ -595,52 +594,42 @@ int smu7_init(struct pp_hwmgr *hwmgr)
595 594
596/* Allocate FW image data structure and header buffer and 595/* Allocate FW image data structure and header buffer and
597 * send the header buffer address to SMU */ 596 * send the header buffer address to SMU */
598 smu_allocate_memory(hwmgr->device, 597 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
599 smu_data->header_buffer.data_size, 598 smu_data->header_buffer.data_size,
600 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
601 PAGE_SIZE, 599 PAGE_SIZE,
600 AMDGPU_GEM_DOMAIN_VRAM,
601 &smu_data->header_buffer.handle,
602 &mc_addr, 602 &mc_addr,
603 &smu_data->header_buffer.kaddr, 603 &smu_data->header_buffer.kaddr);
604 &smu_data->header_buffer.handle);
605 604
606 smu_data->header = smu_data->header_buffer.kaddr; 605 if (r)
607 smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); 606 return -EINVAL;
608 smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
609 607
610 PP_ASSERT_WITH_CODE((NULL != smu_data->header), 608 smu_data->header = smu_data->header_buffer.kaddr;
611 "Out of memory.", 609 smu_data->header_buffer.mc_addr = mc_addr;
612 kfree(hwmgr->smu_backend);
613 cgs_free_gpu_mem(hwmgr->device,
614 (cgs_handle_t)smu_data->header_buffer.handle);
615 return -EINVAL);
616 610
617 if (cgs_is_virtualization_enabled(hwmgr->device)) 611 if (cgs_is_virtualization_enabled(hwmgr->device))
618 return 0; 612 return 0;
619 613
620 smu_data->smu_buffer.data_size = 200*4096; 614 smu_data->smu_buffer.data_size = 200*4096;
621 smu_allocate_memory(hwmgr->device, 615 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
622 smu_data->smu_buffer.data_size, 616 smu_data->smu_buffer.data_size,
623 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
624 PAGE_SIZE, 617 PAGE_SIZE,
618 AMDGPU_GEM_DOMAIN_VRAM,
619 &smu_data->smu_buffer.handle,
625 &mc_addr, 620 &mc_addr,
626 &smu_data->smu_buffer.kaddr, 621 &smu_data->smu_buffer.kaddr);
627 &smu_data->smu_buffer.handle);
628
629 internal_buf = smu_data->smu_buffer.kaddr;
630 smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
631 smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
632 622
633 PP_ASSERT_WITH_CODE((NULL != internal_buf), 623 if (r) {
634 "Out of memory.", 624 amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
635 kfree(hwmgr->smu_backend); 625 &smu_data->header_buffer.mc_addr,
636 cgs_free_gpu_mem(hwmgr->device, 626 &smu_data->header_buffer.kaddr);
637 (cgs_handle_t)smu_data->smu_buffer.handle); 627 return -EINVAL;
638 return -EINVAL); 628 }
629 smu_data->smu_buffer.mc_addr = mc_addr;
639 630
640 if (smum_is_hw_avfs_present(hwmgr)) 631 if (smum_is_hw_avfs_present(hwmgr))
641 smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; 632 hwmgr->avfs_supported = true;
642 else
643 smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
644 633
645 return 0; 634 return 0;
646} 635}
@@ -650,9 +639,14 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
650{ 639{
651 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 640 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
652 641
653 smu_free_memory(hwmgr->device, (void *) smu_data->header_buffer.handle); 642 amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
643 &smu_data->header_buffer.mc_addr,
644 &smu_data->header_buffer.kaddr);
645
654 if (!cgs_is_virtualization_enabled(hwmgr->device)) 646 if (!cgs_is_virtualization_enabled(hwmgr->device))
655 smu_free_memory(hwmgr->device, (void *) smu_data->smu_buffer.handle); 647 amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
648 &smu_data->smu_buffer.mc_addr,
649 &smu_data->smu_buffer.kaddr);
656 650
657 kfree(hwmgr->smu_backend); 651 kfree(hwmgr->smu_backend);
658 hwmgr->smu_backend = NULL; 652 hwmgr->smu_backend = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index c87263bc0caa..126d300259ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -31,15 +31,9 @@
31 31
32struct smu7_buffer_entry { 32struct smu7_buffer_entry {
33 uint32_t data_size; 33 uint32_t data_size;
34 uint32_t mc_addr_low; 34 uint64_t mc_addr;
35 uint32_t mc_addr_high;
36 void *kaddr; 35 void *kaddr;
37 unsigned long handle; 36 struct amdgpu_bo *handle;
38};
39
40struct smu7_avfs {
41 enum AVFS_BTC_STATUS avfs_btc_status;
42 uint32_t avfs_btc_param;
43}; 37};
44 38
45struct smu7_smumgr { 39struct smu7_smumgr {
@@ -56,7 +50,7 @@ struct smu7_smumgr {
56 uint32_t ulv_setting_starts; 50 uint32_t ulv_setting_starts;
57 uint8_t security_hard_key; 51 uint8_t security_hard_key;
58 uint32_t acpi_optimization; 52 uint32_t acpi_optimization;
59 struct smu7_avfs avfs; 53 uint32_t avfs_btc_param;
60}; 54};
61 55
62 56
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
new file mode 100644
index 000000000000..8c49704b81af
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -0,0 +1,891 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/gfp.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "cgs_common.h"
31#include "smu/smu_8_0_d.h"
32#include "smu/smu_8_0_sh_mask.h"
33#include "smu8.h"
34#include "smu8_fusion.h"
35#include "smu8_smumgr.h"
36#include "cz_ppsmc.h"
37#include "smu_ucode_xfer_cz.h"
38#include "gca/gfx_8_0_d.h"
39#include "gca/gfx_8_0_sh_mask.h"
40#include "smumgr.h"
41
42#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
43
44static const enum smu8_scratch_entry firmware_list[] = {
45 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
46 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
47 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
48 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
49 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
50 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
51 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
52 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
53};
54
55static int smu8_smum_get_argument(struct pp_hwmgr *hwmgr)
56{
57 if (hwmgr == NULL || hwmgr->device == NULL)
58 return -EINVAL;
59
60 return cgs_read_register(hwmgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0);
62}
63
64static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
65{
66 int result = 0;
67
68 if (hwmgr == NULL || hwmgr->device == NULL)
69 return -EINVAL;
70
71 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
72 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
73 if (result != 0) {
74 pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
75 return result;
76 }
77
78 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
79 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
80
81 return 0;
82}
83
84/* Send a message to the SMC, and wait for its response.*/
85static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{
87 int result = 0;
88
89 result = smu8_send_msg_to_smc_async(hwmgr, msg);
90 if (result != 0)
91 return result;
92
93 return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95}
96
97static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
98 uint32_t smc_address, uint32_t limit)
99{
100 if (hwmgr == NULL || hwmgr->device == NULL)
101 return -EINVAL;
102
103 if (0 != (3 & smc_address)) {
104 pr_err("SMC address must be 4 byte aligned\n");
105 return -EINVAL;
106 }
107
108 if (limit <= (smc_address + 3)) {
109 pr_err("SMC address beyond the SMC RAM area\n");
110 return -EINVAL;
111 }
112
113 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
114 SMN_MP1_SRAM_START_ADDR + smc_address);
115
116 return 0;
117}
118
119static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
120 uint32_t smc_address, uint32_t value, uint32_t limit)
121{
122 int result;
123
124 if (hwmgr == NULL || hwmgr->device == NULL)
125 return -EINVAL;
126
127 result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
128 if (!result)
129 cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
130
131 return result;
132}
133
134static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
135 uint16_t msg, uint32_t parameter)
136{
137 if (hwmgr == NULL || hwmgr->device == NULL)
138 return -EINVAL;
139
140 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
141
142 return smu8_send_msg_to_smc(hwmgr, msg);
143}
144
145static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
146 uint32_t firmware)
147{
148 int i;
149 uint32_t index = SMN_MP1_SRAM_START_ADDR +
150 SMU8_FIRMWARE_HEADER_LOCATION +
151 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
152
153 if (hwmgr == NULL || hwmgr->device == NULL)
154 return -EINVAL;
155
156 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
157
158 for (i = 0; i < hwmgr->usec_timeout; i++) {
159 if (firmware ==
160 (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
161 break;
162 udelay(1);
163 }
164
165 if (i >= hwmgr->usec_timeout) {
166 pr_err("SMU check loaded firmware failed.\n");
167 return -EINVAL;
168 }
169
170 return 0;
171}
172
173static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
174{
175 uint32_t reg_data;
176 uint32_t tmp;
177 int ret = 0;
178 struct cgs_firmware_info info = {0};
179 struct smu8_smumgr *smu8_smu;
180
181 if (hwmgr == NULL || hwmgr->device == NULL)
182 return -EINVAL;
183
184 smu8_smu = hwmgr->smu_backend;
185 ret = cgs_get_firmware_info(hwmgr->device,
186 CGS_UCODE_ID_CP_MEC, &info);
187
188 if (ret)
189 return -EINVAL;
190
191 /* Disable MEC parsing/prefetching */
192 tmp = cgs_read_register(hwmgr->device,
193 mmCP_MEC_CNTL);
194 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
195 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
196 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
197
198 tmp = cgs_read_register(hwmgr->device,
199 mmCP_CPC_IC_BASE_CNTL);
200
201 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
202 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
203 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
204 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
205 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
206
207 reg_data = lower_32_bits(info.mc_addr) &
208 PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
209 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
210
211 reg_data = upper_32_bits(info.mc_addr) &
212 PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
213 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
214
215 return 0;
216}
217
218static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
219 enum smu8_scratch_entry firmware_enum)
220{
221 uint8_t ret = 0;
222
223 switch (firmware_enum) {
224 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
225 ret = UCODE_ID_SDMA0;
226 break;
227 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
228 if (hwmgr->chip_id == CHIP_STONEY)
229 ret = UCODE_ID_SDMA0;
230 else
231 ret = UCODE_ID_SDMA1;
232 break;
233 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
234 ret = UCODE_ID_CP_CE;
235 break;
236 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
237 ret = UCODE_ID_CP_PFP;
238 break;
239 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
240 ret = UCODE_ID_CP_ME;
241 break;
242 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
243 ret = UCODE_ID_CP_MEC_JT1;
244 break;
245 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
246 if (hwmgr->chip_id == CHIP_STONEY)
247 ret = UCODE_ID_CP_MEC_JT1;
248 else
249 ret = UCODE_ID_CP_MEC_JT2;
250 break;
251 case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
252 ret = UCODE_ID_GMCON_RENG;
253 break;
254 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
255 ret = UCODE_ID_RLC_G;
256 break;
257 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
258 ret = UCODE_ID_RLC_SCRATCH;
259 break;
260 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
261 ret = UCODE_ID_RLC_SRM_ARAM;
262 break;
263 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
264 ret = UCODE_ID_RLC_SRM_DRAM;
265 break;
266 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
267 ret = UCODE_ID_DMCU_ERAM;
268 break;
269 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
270 ret = UCODE_ID_DMCU_IRAM;
271 break;
272 case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
273 ret = TASK_ARG_INIT_MM_PWR_LOG;
274 break;
275 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
276 case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
277 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
278 case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
279 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
280 case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
281 ret = TASK_ARG_REG_MMIO;
282 break;
283 case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
284 ret = TASK_ARG_INIT_CLK_TABLE;
285 break;
286 }
287
288 return ret;
289}
290
291static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
292{
293 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
294
295 switch (fw_type) {
296 case UCODE_ID_SDMA0:
297 result = CGS_UCODE_ID_SDMA0;
298 break;
299 case UCODE_ID_SDMA1:
300 result = CGS_UCODE_ID_SDMA1;
301 break;
302 case UCODE_ID_CP_CE:
303 result = CGS_UCODE_ID_CP_CE;
304 break;
305 case UCODE_ID_CP_PFP:
306 result = CGS_UCODE_ID_CP_PFP;
307 break;
308 case UCODE_ID_CP_ME:
309 result = CGS_UCODE_ID_CP_ME;
310 break;
311 case UCODE_ID_CP_MEC_JT1:
312 result = CGS_UCODE_ID_CP_MEC_JT1;
313 break;
314 case UCODE_ID_CP_MEC_JT2:
315 result = CGS_UCODE_ID_CP_MEC_JT2;
316 break;
317 case UCODE_ID_RLC_G:
318 result = CGS_UCODE_ID_RLC_G;
319 break;
320 default:
321 break;
322 }
323
324 return result;
325}
326
327static int smu8_smu_populate_single_scratch_task(
328 struct pp_hwmgr *hwmgr,
329 enum smu8_scratch_entry fw_enum,
330 uint8_t type, bool is_last)
331{
332 uint8_t i;
333 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
334 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
335 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
336
337 task->type = type;
338 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
339 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
340
341 for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
342 if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
343 break;
344
345 if (i >= smu8_smu->scratch_buffer_length) {
346 pr_err("Invalid Firmware Type\n");
347 return -EINVAL;
348 }
349
350 task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
351 task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
352 task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
353
354 if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
355 struct smu8_ih_meta_data *pIHReg_restore =
356 (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
357 pIHReg_restore->command =
358 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
359 }
360
361 return 0;
362}
363
364static int smu8_smu_populate_single_ucode_load_task(
365 struct pp_hwmgr *hwmgr,
366 enum smu8_scratch_entry fw_enum,
367 bool is_last)
368{
369 uint8_t i;
370 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
371 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
372 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
373
374 task->type = TASK_TYPE_UCODE_LOAD;
375 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
376 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
377
378 for (i = 0; i < smu8_smu->driver_buffer_length; i++)
379 if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
380 break;
381
382 if (i >= smu8_smu->driver_buffer_length) {
383 pr_err("Invalid Firmware Type\n");
384 return -EINVAL;
385 }
386
387 task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
388 task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
389 task->size_bytes = smu8_smu->driver_buffer[i].data_size;
390
391 return 0;
392}
393
394static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
395{
396 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
397
398 smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
399 smu8_smu_populate_single_scratch_task(hwmgr,
400 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
401 TASK_TYPE_UCODE_SAVE, true);
402
403 return 0;
404}
405
406static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
407{
408 int i;
409 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
410 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
411
412 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
413 toc->JobList[i] = (uint8_t)IGNORE_JOB;
414
415 return 0;
416}
417
418static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
419{
420 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
421 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
422
423 toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
424 smu8_smu_populate_single_scratch_task(hwmgr,
425 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
426 TASK_TYPE_UCODE_SAVE, false);
427
428 smu8_smu_populate_single_scratch_task(hwmgr,
429 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
430 TASK_TYPE_UCODE_SAVE, true);
431
432 return 0;
433}
434
435
436static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
437{
438 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
439 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
440
441 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
442
443 smu8_smu_populate_single_ucode_load_task(hwmgr,
444 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
445 smu8_smu_populate_single_ucode_load_task(hwmgr,
446 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
447 smu8_smu_populate_single_ucode_load_task(hwmgr,
448 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
449 smu8_smu_populate_single_ucode_load_task(hwmgr,
450 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
451
452 if (hwmgr->chip_id == CHIP_STONEY)
453 smu8_smu_populate_single_ucode_load_task(hwmgr,
454 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
455 else
456 smu8_smu_populate_single_ucode_load_task(hwmgr,
457 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
458
459 smu8_smu_populate_single_ucode_load_task(hwmgr,
460 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
461
462 /* populate scratch */
463 smu8_smu_populate_single_scratch_task(hwmgr,
464 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
465 TASK_TYPE_UCODE_LOAD, false);
466
467 smu8_smu_populate_single_scratch_task(hwmgr,
468 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
469 TASK_TYPE_UCODE_LOAD, false);
470
471 smu8_smu_populate_single_scratch_task(hwmgr,
472 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
473 TASK_TYPE_UCODE_LOAD, true);
474
475 return 0;
476}
477
478static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
479{
480 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
481
482 smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
483
484 smu8_smu_populate_single_scratch_task(hwmgr,
485 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
486 TASK_TYPE_INITIALIZE, true);
487 return 0;
488}
489
490static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
491{
492 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
493
494 smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
495
496 smu8_smu_populate_single_ucode_load_task(hwmgr,
497 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
498 if (hwmgr->chip_id != CHIP_STONEY)
499 smu8_smu_populate_single_ucode_load_task(hwmgr,
500 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
501 smu8_smu_populate_single_ucode_load_task(hwmgr,
502 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
503 smu8_smu_populate_single_ucode_load_task(hwmgr,
504 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
505 smu8_smu_populate_single_ucode_load_task(hwmgr,
506 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
507 smu8_smu_populate_single_ucode_load_task(hwmgr,
508 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
509 if (hwmgr->chip_id != CHIP_STONEY)
510 smu8_smu_populate_single_ucode_load_task(hwmgr,
511 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
512 smu8_smu_populate_single_ucode_load_task(hwmgr,
513 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
514
515 return 0;
516}
517
518static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
519{
520 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
521
522 smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
523
524 smu8_smu_populate_single_scratch_task(hwmgr,
525 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
526 TASK_TYPE_INITIALIZE, true);
527
528 return 0;
529}
530
531static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
532{
533 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
534
535 smu8_smu->toc_entry_used_count = 0;
536 smu8_smu_initialize_toc_empty_job_list(hwmgr);
537 smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
538 smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
539 smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
540 smu8_smu_construct_toc_for_power_profiling(hwmgr);
541 smu8_smu_construct_toc_for_bootup(hwmgr);
542 smu8_smu_construct_toc_for_clock_table(hwmgr);
543
544 return 0;
545}
546
547static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
548{
549 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
550 uint32_t firmware_type;
551 uint32_t i;
552 int ret;
553 enum cgs_ucode_id ucode_id;
554 struct cgs_firmware_info info = {0};
555
556 smu8_smu->driver_buffer_length = 0;
557
558 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
559
560 firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
561 firmware_list[i]);
562
563 ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
564
565 ret = cgs_get_firmware_info(hwmgr->device,
566 ucode_id, &info);
567
568 if (ret == 0) {
569 smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
570
571 smu8_smu->driver_buffer[i].data_size = info.image_size;
572
573 smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
574 smu8_smu->driver_buffer_length++;
575 }
576 }
577
578 return 0;
579}
580
581static int smu8_smu_populate_single_scratch_entry(
582 struct pp_hwmgr *hwmgr,
583 enum smu8_scratch_entry scratch_type,
584 uint32_t ulsize_byte,
585 struct smu8_buffer_entry *entry)
586{
587 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
588 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
589
590 entry->data_size = ulsize_byte;
591 entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
592 smu8_smu->smu_buffer_used_bytes;
593 entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
594 entry->firmware_ID = scratch_type;
595
596 smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
597
598 return 0;
599}
600
601static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
602{
603 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
604 unsigned long i;
605
606 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
607 if (smu8_smu->scratch_buffer[i].firmware_ID
608 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
609 break;
610 }
611
612 *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
613
614 smu8_send_msg_to_smc_with_parameter(hwmgr,
615 PPSMC_MSG_SetClkTableAddrHi,
616 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
617
618 smu8_send_msg_to_smc_with_parameter(hwmgr,
619 PPSMC_MSG_SetClkTableAddrLo,
620 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
621
622 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
623 smu8_smu->toc_entry_clock_table);
624
625 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
626
627 return 0;
628}
629
630static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
631{
632 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
633 unsigned long i;
634
635 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
636 if (smu8_smu->scratch_buffer[i].firmware_ID
637 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
638 break;
639 }
640
641 smu8_send_msg_to_smc_with_parameter(hwmgr,
642 PPSMC_MSG_SetClkTableAddrHi,
643 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
644
645 smu8_send_msg_to_smc_with_parameter(hwmgr,
646 PPSMC_MSG_SetClkTableAddrLo,
647 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
648
649 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
650 smu8_smu->toc_entry_clock_table);
651
652 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
653
654 return 0;
655}
656
657static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
658{
659 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
660 uint32_t smc_address;
661
662 if (!hwmgr->reload_fw) {
663 pr_info("skip reloading...\n");
664 return 0;
665 }
666
667 smu8_smu_populate_firmware_entries(hwmgr);
668
669 smu8_smu_construct_toc(hwmgr);
670
671 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
672 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
673
674 smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
675
676 smu8_send_msg_to_smc_with_parameter(hwmgr,
677 PPSMC_MSG_DriverDramAddrHi,
678 upper_32_bits(smu8_smu->toc_buffer.mc_addr));
679
680 smu8_send_msg_to_smc_with_parameter(hwmgr,
681 PPSMC_MSG_DriverDramAddrLo,
682 lower_32_bits(smu8_smu->toc_buffer.mc_addr));
683
684 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
685
686 smu8_send_msg_to_smc_with_parameter(hwmgr,
687 PPSMC_MSG_ExecuteJob,
688 smu8_smu->toc_entry_aram);
689 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
690 smu8_smu->toc_entry_power_profiling_index);
691
692 return smu8_send_msg_to_smc_with_parameter(hwmgr,
693 PPSMC_MSG_ExecuteJob,
694 smu8_smu->toc_entry_initialize_index);
695}
696
697static int smu8_start_smu(struct pp_hwmgr *hwmgr)
698{
699 int ret = 0;
700 uint32_t fw_to_check = 0;
701 struct amdgpu_device *adev = hwmgr->adev;
702
703 uint32_t index = SMN_MP1_SRAM_START_ADDR +
704 SMU8_FIRMWARE_HEADER_LOCATION +
705 offsetof(struct SMU8_Firmware_Header, Version);
706
707
708 if (hwmgr == NULL || hwmgr->device == NULL)
709 return -EINVAL;
710
711 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
712 hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
713 adev->pm.fw_version = hwmgr->smu_version >> 8;
714
715 fw_to_check = UCODE_ID_RLC_G_MASK |
716 UCODE_ID_SDMA0_MASK |
717 UCODE_ID_SDMA1_MASK |
718 UCODE_ID_CP_CE_MASK |
719 UCODE_ID_CP_ME_MASK |
720 UCODE_ID_CP_PFP_MASK |
721 UCODE_ID_CP_MEC_JT1_MASK |
722 UCODE_ID_CP_MEC_JT2_MASK;
723
724 if (hwmgr->chip_id == CHIP_STONEY)
725 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
726
727 ret = smu8_request_smu_load_fw(hwmgr);
728 if (ret)
729 pr_err("SMU firmware load failed\n");
730
731 smu8_check_fw_load_finish(hwmgr, fw_to_check);
732
733 ret = smu8_load_mec_firmware(hwmgr);
734 if (ret)
735 pr_err("Mec Firmware load failed\n");
736
737 return ret;
738}
739
740static int smu8_smu_init(struct pp_hwmgr *hwmgr)
741{
742 int ret = 0;
743 struct smu8_smumgr *smu8_smu;
744
745 smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
746 if (smu8_smu == NULL)
747 return -ENOMEM;
748
749 hwmgr->smu_backend = smu8_smu;
750
751 smu8_smu->toc_buffer.data_size = 4096;
752 smu8_smu->smu_buffer.data_size =
753 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
754 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
755 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
756 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
757 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
758
759 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
760 smu8_smu->toc_buffer.data_size,
761 PAGE_SIZE,
762 AMDGPU_GEM_DOMAIN_VRAM,
763 &smu8_smu->toc_buffer.handle,
764 &smu8_smu->toc_buffer.mc_addr,
765 &smu8_smu->toc_buffer.kaddr);
766 if (ret)
767 goto err2;
768
769 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
770 smu8_smu->smu_buffer.data_size,
771 PAGE_SIZE,
772 AMDGPU_GEM_DOMAIN_VRAM,
773 &smu8_smu->smu_buffer.handle,
774 &smu8_smu->smu_buffer.mc_addr,
775 &smu8_smu->smu_buffer.kaddr);
776 if (ret)
777 goto err1;
778
779 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
780 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
781 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
782 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
783 pr_err("Error when Populate Firmware Entry.\n");
784 goto err0;
785 }
786
787 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
788 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
789 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
790 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
791 pr_err("Error when Populate Firmware Entry.\n");
792 goto err0;
793 }
794 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
795 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
796 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
797 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
798 pr_err("Error when Populate Firmware Entry.\n");
799 goto err0;
800 }
801
802 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
803 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
804 sizeof(struct SMU8_MultimediaPowerLogData),
805 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
806 pr_err("Error when Populate Firmware Entry.\n");
807 goto err0;
808 }
809
810 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
811 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
812 sizeof(struct SMU8_Fusion_ClkTable),
813 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
814 pr_err("Error when Populate Firmware Entry.\n");
815 goto err0;
816 }
817
818 return 0;
819
820err0:
821 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
822 &smu8_smu->smu_buffer.mc_addr,
823 &smu8_smu->smu_buffer.kaddr);
824err1:
825 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
826 &smu8_smu->toc_buffer.mc_addr,
827 &smu8_smu->toc_buffer.kaddr);
828err2:
829 kfree(smu8_smu);
830 return -EINVAL;
831}
832
833static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
834{
835 struct smu8_smumgr *smu8_smu;
836
837 if (hwmgr == NULL || hwmgr->device == NULL)
838 return -EINVAL;
839
840 smu8_smu = hwmgr->smu_backend;
841 if (smu8_smu) {
842 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
843 &smu8_smu->toc_buffer.mc_addr,
844 &smu8_smu->toc_buffer.kaddr);
845 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
846 &smu8_smu->smu_buffer.mc_addr,
847 &smu8_smu->smu_buffer.kaddr);
848 kfree(smu8_smu);
849 }
850
851 return 0;
852}
853
854static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
855 unsigned long check_feature)
856{
857 int result;
858 unsigned long features;
859
860 result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
861 if (result == 0) {
862 features = smum_get_argument(hwmgr);
863 if (features & check_feature)
864 return true;
865 }
866
867 return false;
868}
869
870static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
871{
872 if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
873 return true;
874 return false;
875}
876
877const struct pp_smumgr_func smu8_smu_funcs = {
878 .smu_init = smu8_smu_init,
879 .smu_fini = smu8_smu_fini,
880 .start_smu = smu8_start_smu,
881 .check_fw_load_finish = smu8_check_fw_load_finish,
882 .request_smu_load_fw = NULL,
883 .request_smu_load_specific_fw = NULL,
884 .get_argument = smu8_smum_get_argument,
885 .send_msg_to_smc = smu8_send_msg_to_smc,
886 .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
887 .download_pptable_settings = smu8_download_pptable_settings,
888 .upload_pptable_settings = smu8_upload_pptable_settings,
889 .is_dpm_running = smu8_is_dpm_running,
890};
891
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
new file mode 100644
index 000000000000..c7b61222d258
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _SMU8_SMUMGR_H_
24#define _SMU8_SMUMGR_H_
25
26
27#define MAX_NUM_FIRMWARE 8
28#define MAX_NUM_SCRATCH 11
29#define SMU8_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
30#define SMU8_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
31#define SMU8_SCRATCH_SIZE_SDMA_METADATA 1024
32#define SMU8_SCRATCH_SIZE_IH ((2*256+1)*4)
33
34#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
35
36enum smu8_scratch_entry {
37 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
38 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
39 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
40 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
41 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
42 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
43 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
44 SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
45 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
46 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
47 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
48 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
49 SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
50 SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
51 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
52 SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
53 SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
54 SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
55 SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
56 SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START,
57 SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
58 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
59};
60
61struct smu8_buffer_entry {
62 uint32_t data_size;
63 uint64_t mc_addr;
64 void *kaddr;
65 enum smu8_scratch_entry firmware_ID;
66 struct amdgpu_bo *handle; /* as bo handle used when release bo */
67};
68
69struct smu8_register_index_data_pair {
70 uint32_t offset;
71 uint32_t value;
72};
73
74struct smu8_ih_meta_data {
75 uint32_t command;
76 struct smu8_register_index_data_pair register_index_value_pair[1];
77};
78
79struct smu8_smumgr {
80 uint8_t driver_buffer_length;
81 uint8_t scratch_buffer_length;
82 uint16_t toc_entry_used_count;
83 uint16_t toc_entry_initialize_index;
84 uint16_t toc_entry_power_profiling_index;
85 uint16_t toc_entry_aram;
86 uint16_t toc_entry_ih_register_restore_task_index;
87 uint16_t toc_entry_clock_table;
88 uint16_t ih_register_restore_task_size;
89 uint16_t smu_buffer_used_bytes;
90
91 struct smu8_buffer_entry toc_buffer;
92 struct smu8_buffer_entry smu_buffer;
93 struct smu8_buffer_entry firmware_buffer;
94 struct smu8_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
95 struct smu8_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
96 struct smu8_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
97};
98
99#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 867388456530..04c45c236a73 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,7 +28,6 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <drm/amdgpu_drm.h> 29#include <drm/amdgpu_drm.h>
30#include "smumgr.h" 30#include "smumgr.h"
31#include "cgs_common.h"
32 31
33MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 32MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
34MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); 33MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
@@ -144,57 +143,6 @@ int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
144 hwmgr, msg, parameter); 143 hwmgr, msg, parameter);
145} 144}
146 145
147int smu_allocate_memory(void *device, uint32_t size,
148 enum cgs_gpu_mem_type type,
149 uint32_t byte_align, uint64_t *mc_addr,
150 void **kptr, void *handle)
151{
152 int ret = 0;
153 cgs_handle_t cgs_handle;
154
155 if (device == NULL || handle == NULL ||
156 mc_addr == NULL || kptr == NULL)
157 return -EINVAL;
158
159 ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
160 (cgs_handle_t *)handle);
161 if (ret)
162 return -ENOMEM;
163
164 cgs_handle = *(cgs_handle_t *)handle;
165
166 ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr);
167 if (ret)
168 goto error_gmap;
169
170 ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr);
171 if (ret)
172 goto error_kmap;
173
174 return 0;
175
176error_kmap:
177 cgs_gunmap_gpu_mem(device, cgs_handle);
178
179error_gmap:
180 cgs_free_gpu_mem(device, cgs_handle);
181 return ret;
182}
183
184int smu_free_memory(void *device, void *handle)
185{
186 cgs_handle_t cgs_handle = (cgs_handle_t)handle;
187
188 if (device == NULL || handle == NULL)
189 return -EINVAL;
190
191 cgs_kunmap_gpu_mem(device, cgs_handle);
192 cgs_gunmap_gpu_mem(device, cgs_handle);
193 cgs_free_gpu_mem(device, cgs_handle);
194
195 return 0;
196}
197
198int smum_init_smc_table(struct pp_hwmgr *hwmgr) 146int smum_init_smc_table(struct pp_hwmgr *hwmgr)
199{ 147{
200 if (NULL != hwmgr->smumgr_funcs->init_smc_table) 148 if (NULL != hwmgr->smumgr_funcs->init_smc_table)
@@ -236,16 +184,6 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
236 return true; 184 return true;
237} 185}
238 186
239int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
240 struct amd_pp_profile *request)
241{
242 if (hwmgr->smumgr_funcs->populate_requested_graphic_levels)
243 return hwmgr->smumgr_funcs->populate_requested_graphic_levels(
244 hwmgr, request);
245
246 return 0;
247}
248
249bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr) 187bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
250{ 188{
251 if (hwmgr->smumgr_funcs->is_hw_avfs_present) 189 if (hwmgr->smumgr_funcs->is_hw_avfs_present)
@@ -253,3 +191,19 @@ bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
253 191
254 return false; 192 return false;
255} 193}
194
195int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting)
196{
197 if (hwmgr->smumgr_funcs->update_dpm_settings)
198 return hwmgr->smumgr_funcs->update_dpm_settings(hwmgr, profile_setting);
199
200 return -EINVAL;
201}
202
203int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
204{
205 if (hwmgr->smumgr_funcs->smc_table_manager)
206 return hwmgr->smumgr_funcs->smc_table_manager(hwmgr, table, table_id, rw);
207
208 return -EINVAL;
209}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 79e5c05571bc..26cca8cce8f1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -222,7 +222,6 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
222static int tonga_smu_init(struct pp_hwmgr *hwmgr) 222static int tonga_smu_init(struct pp_hwmgr *hwmgr)
223{ 223{
224 struct tonga_smumgr *tonga_priv = NULL; 224 struct tonga_smumgr *tonga_priv = NULL;
225 int i;
226 225
227 tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL); 226 tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL);
228 if (tonga_priv == NULL) 227 if (tonga_priv == NULL)
@@ -230,11 +229,10 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr)
230 229
231 hwmgr->smu_backend = tonga_priv; 230 hwmgr->smu_backend = tonga_priv;
232 231
233 if (smu7_init(hwmgr)) 232 if (smu7_init(hwmgr)) {
233 kfree(tonga_priv);
234 return -EINVAL; 234 return -EINVAL;
235 235 }
236 for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
237 tonga_priv->activity_target[i] = 30;
238 236
239 return 0; 237 return 0;
240} 238}
@@ -416,7 +414,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
416 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); 414 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
417 } 415 }
418 416
419 if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) { 417 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
420 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */ 418 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
421 for (count = 0; count < vddgfx_level_count; count++) { 419 for (count = 0; count < vddgfx_level_count; count++) {
422 index = phm_get_voltage_index(vddgfx_lookup_table, 420 index = phm_get_voltage_index(vddgfx_lookup_table,
@@ -612,7 +610,6 @@ static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
612 610
613static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, 611static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
614 uint32_t engine_clock, 612 uint32_t engine_clock,
615 uint16_t sclk_activity_level_threshold,
616 SMU72_Discrete_GraphicsLevel *graphic_level) 613 SMU72_Discrete_GraphicsLevel *graphic_level)
617{ 614{
618 int result; 615 int result;
@@ -620,12 +617,18 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
620 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 617 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
621 struct phm_ppt_v1_information *pptable_info = 618 struct phm_ppt_v1_information *pptable_info =
622 (struct phm_ppt_v1_information *)(hwmgr->pptable); 619 (struct phm_ppt_v1_information *)(hwmgr->pptable);
620 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
623 621
624 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); 622 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
625 623
624 if (hwmgr->od_enabled)
625 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
626 else
627 vdd_dep_table = pptable_info->vdd_dep_on_sclk;
628
626 /* populate graphics levels*/ 629 /* populate graphics levels*/
627 result = tonga_get_dependency_volt_by_clk(hwmgr, 630 result = tonga_get_dependency_volt_by_clk(hwmgr,
628 pptable_info->vdd_dep_on_sclk, engine_clock, 631 vdd_dep_table, engine_clock,
629 &graphic_level->MinVoltage, &mvdd); 632 &graphic_level->MinVoltage, &mvdd);
630 PP_ASSERT_WITH_CODE((!result), 633 PP_ASSERT_WITH_CODE((!result),
631 "can not find VDDC voltage value for VDDC " 634 "can not find VDDC voltage value for VDDC "
@@ -634,7 +637,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
634 /* SCLK frequency in units of 10KHz*/ 637 /* SCLK frequency in units of 10KHz*/
635 graphic_level->SclkFrequency = engine_clock; 638 graphic_level->SclkFrequency = engine_clock;
636 /* Indicates maximum activity level for this performance level. 50% for now*/ 639 /* Indicates maximum activity level for this performance level. 50% for now*/
637 graphic_level->ActivityLevel = sclk_activity_level_threshold; 640 graphic_level->ActivityLevel = data->current_profile_setting.sclk_activity;
638 641
639 graphic_level->CcPwrDynRm = 0; 642 graphic_level->CcPwrDynRm = 0;
640 graphic_level->CcPwrDynRm1 = 0; 643 graphic_level->CcPwrDynRm1 = 0;
@@ -642,8 +645,8 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
642 graphic_level->EnabledForActivity = 0; 645 graphic_level->EnabledForActivity = 0;
643 /* this level can be used for throttling.*/ 646 /* this level can be used for throttling.*/
644 graphic_level->EnabledForThrottle = 1; 647 graphic_level->EnabledForThrottle = 1;
645 graphic_level->UpHyst = 0; 648 graphic_level->UpHyst = data->current_profile_setting.sclk_up_hyst;
646 graphic_level->DownHyst = 0; 649 graphic_level->DownHyst = data->current_profile_setting.sclk_down_hyst;
647 graphic_level->VoltageDownHyst = 0; 650 graphic_level->VoltageDownHyst = 0;
648 graphic_level->PowerThrottle = 0; 651 graphic_level->PowerThrottle = 0;
649 652
@@ -702,7 +705,6 @@ static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
702 for (i = 0; i < dpm_table->sclk_table.count; i++) { 705 for (i = 0; i < dpm_table->sclk_table.count; i++) {
703 result = tonga_populate_single_graphic_level(hwmgr, 706 result = tonga_populate_single_graphic_level(hwmgr,
704 dpm_table->sclk_table.dpm_levels[i].value, 707 dpm_table->sclk_table.dpm_levels[i].value,
705 (uint16_t)smu_data->activity_target[i],
706 &(smu_data->smc_state_table.GraphicsLevel[i])); 708 &(smu_data->smc_state_table.GraphicsLevel[i]));
707 if (result != 0) 709 if (result != 0)
708 return result; 710 return result;
@@ -966,10 +968,16 @@ static int tonga_populate_single_memory_level(
966 uint32_t mclk_stutter_mode_threshold = 30000; 968 uint32_t mclk_stutter_mode_threshold = 30000;
967 uint32_t mclk_edc_enable_threshold = 40000; 969 uint32_t mclk_edc_enable_threshold = 40000;
968 uint32_t mclk_strobe_mode_threshold = 40000; 970 uint32_t mclk_strobe_mode_threshold = 40000;
971 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
969 972
970 if (NULL != pptable_info->vdd_dep_on_mclk) { 973 if (hwmgr->od_enabled)
974 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
975 else
976 vdd_dep_table = pptable_info->vdd_dep_on_mclk;
977
978 if (NULL != vdd_dep_table) {
971 result = tonga_get_dependency_volt_by_clk(hwmgr, 979 result = tonga_get_dependency_volt_by_clk(hwmgr,
972 pptable_info->vdd_dep_on_mclk, 980 vdd_dep_table,
973 memory_clock, 981 memory_clock,
974 &memory_level->MinVoltage, &mvdd); 982 &memory_level->MinVoltage, &mvdd);
975 PP_ASSERT_WITH_CODE( 983 PP_ASSERT_WITH_CODE(
@@ -986,12 +994,12 @@ static int tonga_populate_single_memory_level(
986 994
987 memory_level->EnabledForThrottle = 1; 995 memory_level->EnabledForThrottle = 1;
988 memory_level->EnabledForActivity = 0; 996 memory_level->EnabledForActivity = 0;
989 memory_level->UpHyst = 0; 997 memory_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
990 memory_level->DownHyst = 100; 998 memory_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
991 memory_level->VoltageDownHyst = 0; 999 memory_level->VoltageDownHyst = 0;
992 1000
993 /* Indicates maximum activity level for this performance level.*/ 1001 /* Indicates maximum activity level for this performance level.*/
994 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; 1002 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
995 memory_level->StutterEnable = 0; 1003 memory_level->StutterEnable = 0;
996 memory_level->StrobeEnable = 0; 1004 memory_level->StrobeEnable = 0;
997 memory_level->EdcReadEnable = 0; 1005 memory_level->EdcReadEnable = 0;
@@ -1281,7 +1289,7 @@ static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1281 table->MemoryACPILevel.VoltageDownHyst = 0; 1289 table->MemoryACPILevel.VoltageDownHyst = 0;
1282 /* Indicates maximum activity level for this performance level.*/ 1290 /* Indicates maximum activity level for this performance level.*/
1283 table->MemoryACPILevel.ActivityLevel = 1291 table->MemoryACPILevel.ActivityLevel =
1284 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); 1292 PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1285 1293
1286 table->MemoryACPILevel.StutterEnable = 0; 1294 table->MemoryACPILevel.StutterEnable = 0;
1287 table->MemoryACPILevel.StrobeEnable = 0; 1295 table->MemoryACPILevel.StrobeEnable = 0;
@@ -1617,19 +1625,12 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1617 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 1625 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1618 table_info->vdd_dep_on_sclk; 1626 table_info->vdd_dep_on_sclk;
1619 uint32_t hw_revision, dev_id; 1627 uint32_t hw_revision, dev_id;
1620 struct cgs_system_info sys_info = {0}; 1628 struct amdgpu_device *adev = hwmgr->adev;
1621 1629
1622 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; 1630 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1623 1631
1624 sys_info.size = sizeof(struct cgs_system_info); 1632 hw_revision = adev->pdev->revision;
1625 1633 dev_id = adev->pdev->device;
1626 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1627 cgs_query_system_info(hwmgr->device, &sys_info);
1628 hw_revision = (uint32_t)sys_info.value;
1629
1630 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1631 cgs_query_system_info(hwmgr->device, &sys_info);
1632 dev_id = (uint32_t)sys_info.value;
1633 1634
1634 /* Read SMU_Eefuse to read and calculate RO and determine 1635 /* Read SMU_Eefuse to read and calculate RO and determine
1635 * if the part is SS or FF. if RO >= 1660MHz, part is FF. 1636 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
@@ -1699,7 +1700,7 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1699 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1700 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1700 PHM_PlatformCaps_ClockStretcher); 1701 PHM_PlatformCaps_ClockStretcher);
1701 PP_ASSERT_WITH_CODE(false, 1702 PP_ASSERT_WITH_CODE(false,
1702 "Stretch Amount in PPTable not supported\n", 1703 "Stretch Amount in PPTable not supported",
1703 return -EINVAL); 1704 return -EINVAL);
1704 } 1705 }
1705 1706
@@ -2257,42 +2258,6 @@ static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
2257 smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0]; 2258 smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
2258} 2259}
2259 2260
2260static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
2261{
2262 struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2263 struct SMU72_Discrete_GraphicsLevel *levels =
2264 data->smc_state_table.GraphicsLevel;
2265 unsigned min_level = 1;
2266
2267 hwmgr->default_gfx_power_profile.activity_threshold =
2268 be16_to_cpu(levels[0].ActivityLevel);
2269 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
2270 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
2271 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2272
2273 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
2274 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2275
2276 /* Workaround compute SDMA instability: disable lowest SCLK
2277 * DPM level. Optimize compute power profile: Use only highest
2278 * 2 power levels (if more than 2 are available), Hysteresis:
2279 * 0ms up, 5ms down
2280 */
2281 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
2282 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
2283 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
2284 min_level = 1;
2285 else
2286 min_level = 0;
2287 hwmgr->default_compute_power_profile.min_sclk =
2288 be32_to_cpu(levels[min_level].SclkFrequency);
2289 hwmgr->default_compute_power_profile.up_hyst = 0;
2290 hwmgr->default_compute_power_profile.down_hyst = 5;
2291
2292 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2293 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2294}
2295
2296static int tonga_init_smc_table(struct pp_hwmgr *hwmgr) 2261static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2297{ 2262{
2298 int result; 2263 int result;
@@ -2434,7 +2399,7 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2434 result = tonga_populate_vr_config(hwmgr, table); 2399 result = tonga_populate_vr_config(hwmgr, table);
2435 PP_ASSERT_WITH_CODE(!result, 2400 PP_ASSERT_WITH_CODE(!result,
2436 "Failed to populate VRConfig setting !", return result); 2401 "Failed to populate VRConfig setting !", return result);
2437 2402 data->vr_config = table->VRConfig;
2438 table->ThermGpio = 17; 2403 table->ThermGpio = 17;
2439 table->SclkStepSize = 0x4000; 2404 table->SclkStepSize = 0x4000;
2440 2405
@@ -2501,7 +2466,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2501 2466
2502 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) 2467 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
2503 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); 2468 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2504
2505 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); 2469 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2506 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); 2470 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2507 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); 2471 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
@@ -2535,8 +2499,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2535 PP_ASSERT_WITH_CODE((!result), 2499 PP_ASSERT_WITH_CODE((!result),
2536 "Failed to populate initialize MC Reg table !", return result); 2500 "Failed to populate initialize MC Reg table !", return result);
2537 2501
2538 tonga_save_default_power_profile(hwmgr);
2539
2540 return 0; 2502 return 0;
2541} 2503}
2542 2504
@@ -3254,29 +3216,100 @@ static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
3254 ? true : false; 3216 ? true : false;
3255} 3217}
3256 3218
3257static int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, 3219static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
3258 struct amd_pp_profile *request) 3220 void *profile_setting)
3259{ 3221{
3222 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3260 struct tonga_smumgr *smu_data = (struct tonga_smumgr *) 3223 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
3261 (hwmgr->smu_backend); 3224 (hwmgr->smu_backend);
3225 struct profile_mode_setting *setting;
3262 struct SMU72_Discrete_GraphicsLevel *levels = 3226 struct SMU72_Discrete_GraphicsLevel *levels =
3263 smu_data->smc_state_table.GraphicsLevel; 3227 smu_data->smc_state_table.GraphicsLevel;
3264 uint32_t array = smu_data->smu7_data.dpm_table_start + 3228 uint32_t array = smu_data->smu7_data.dpm_table_start +
3265 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); 3229 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
3266 uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) * 3230
3267 SMU72_MAX_LEVELS_GRAPHICS; 3231 uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
3232 offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
3233 struct SMU72_Discrete_MemoryLevel *mclk_levels =
3234 smu_data->smc_state_table.MemoryLevel;
3268 uint32_t i; 3235 uint32_t i;
3236 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
3237
3238 if (profile_setting == NULL)
3239 return -EINVAL;
3240
3241 setting = (struct profile_mode_setting *)profile_setting;
3269 3242
3270 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 3243 if (setting->bupdate_sclk) {
3271 levels[i].ActivityLevel = 3244 if (!data->sclk_dpm_key_disabled)
3272 cpu_to_be16(request->activity_threshold); 3245 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
3273 levels[i].EnabledForActivity = 1; 3246 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
3274 levels[i].UpHyst = request->up_hyst; 3247 if (levels[i].ActivityLevel !=
3275 levels[i].DownHyst = request->down_hyst; 3248 cpu_to_be16(setting->sclk_activity)) {
3249 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
3250
3251 clk_activity_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
3252 + offsetof(SMU72_Discrete_GraphicsLevel, ActivityLevel);
3253 offset = clk_activity_offset & ~0x3;
3254 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
3255 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
3256 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
3257
3258 }
3259 if (levels[i].UpHyst != setting->sclk_up_hyst ||
3260 levels[i].DownHyst != setting->sclk_down_hyst) {
3261 levels[i].UpHyst = setting->sclk_up_hyst;
3262 levels[i].DownHyst = setting->sclk_down_hyst;
3263 up_hyst_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
3264 + offsetof(SMU72_Discrete_GraphicsLevel, UpHyst);
3265 down_hyst_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
3266 + offsetof(SMU72_Discrete_GraphicsLevel, DownHyst);
3267 offset = up_hyst_offset & ~0x3;
3268 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
3269 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
3270 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
3271 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
3272 }
3273 }
3274 if (!data->sclk_dpm_key_disabled)
3275 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
3276 } 3276 }
3277 3277
3278 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, 3278 if (setting->bupdate_mclk) {
3279 array_size, SMC_RAM_END); 3279 if (!data->mclk_dpm_key_disabled)
3280 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
3281 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
3282 if (mclk_levels[i].ActivityLevel !=
3283 cpu_to_be16(setting->mclk_activity)) {
3284 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
3285
3286 clk_activity_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
3287 + offsetof(SMU72_Discrete_MemoryLevel, ActivityLevel);
3288 offset = clk_activity_offset & ~0x3;
3289 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
3290 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
3291 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
3292
3293 }
3294 if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
3295 mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
3296 mclk_levels[i].UpHyst = setting->mclk_up_hyst;
3297 mclk_levels[i].DownHyst = setting->mclk_down_hyst;
3298 up_hyst_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
3299 + offsetof(SMU72_Discrete_MemoryLevel, UpHyst);
3300 down_hyst_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
3301 + offsetof(SMU72_Discrete_MemoryLevel, DownHyst);
3302 offset = up_hyst_offset & ~0x3;
3303 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
3304 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
3305 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
3306 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
3307 }
3308 }
3309 if (!data->mclk_dpm_key_disabled)
3310 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
3311 }
3312 return 0;
3280} 3313}
3281 3314
3282const struct pp_smumgr_func tonga_smu_funcs = { 3315const struct pp_smumgr_func tonga_smu_funcs = {
@@ -3301,5 +3334,5 @@ const struct pp_smumgr_func tonga_smu_funcs = {
3301 .get_mac_definition = tonga_get_mac_definition, 3334 .get_mac_definition = tonga_get_mac_definition,
3302 .initialize_mc_reg_table = tonga_initialize_mc_reg_table, 3335 .initialize_mc_reg_table = tonga_initialize_mc_reg_table,
3303 .is_dpm_running = tonga_is_dpm_running, 3336 .is_dpm_running = tonga_is_dpm_running,
3304 .populate_requested_graphic_levels = tonga_populate_requested_graphic_levels, 3337 .update_dpm_settings = tonga_update_dpm_settings,
3305}; 3338};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 5d70a00348e2..d664fedd3d85 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -69,9 +69,6 @@ struct tonga_smumgr {
69 const struct tonga_pt_defaults *power_tune_defaults; 69 const struct tonga_pt_defaults *power_tune_defaults;
70 SMU72_Discrete_MCRegisters mc_regs; 70 SMU72_Discrete_MCRegisters mc_regs;
71 struct tonga_mc_reg_table mc_reg_table; 71 struct tonga_mc_reg_table mc_reg_table;
72
73 uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS];
74
75}; 72};
76 73
77#endif 74#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index f6f39d01d227..e08a6116ac05 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -27,11 +27,9 @@
27#include "vega10_smumgr.h" 27#include "vega10_smumgr.h"
28#include "vega10_ppsmc.h" 28#include "vega10_ppsmc.h"
29#include "smu9_driver_if.h" 29#include "smu9_driver_if.h"
30
31#include "ppatomctrl.h" 30#include "ppatomctrl.h"
32#include "pp_debug.h" 31#include "pp_debug.h"
33#include "smu_ucode_xfer_vi.h" 32
34#include "smu7_smumgr.h"
35 33
36#define AVFS_EN_MSB 1568 34#define AVFS_EN_MSB 1568
37#define AVFS_EN_LSB 1568 35#define AVFS_EN_LSB 1568
@@ -83,16 +81,17 @@ static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
83static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) 81static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
84{ 82{
85 uint32_t reg; 83 uint32_t reg;
86 84 uint32_t ret;
87 if (!vega10_is_smc_ram_running(hwmgr))
88 return -EINVAL;
89 85
90 reg = soc15_get_register_offset(MP1_HWID, 0, 86 reg = soc15_get_register_offset(MP1_HWID, 0,
91 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); 87 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
92 88
93 phm_wait_for_register_unequal(hwmgr, reg, 89 ret = phm_wait_for_register_unequal(hwmgr, reg,
94 0, MP1_C2PMSG_90__CONTENT_MASK); 90 0, MP1_C2PMSG_90__CONTENT_MASK);
95 91
92 if (ret)
93 pr_err("No response from smu\n");
94
96 return cgs_read_register(hwmgr->device, reg); 95 return cgs_read_register(hwmgr->device, reg);
97} 96}
98 97
@@ -107,9 +106,6 @@ int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
107{ 106{
108 uint32_t reg; 107 uint32_t reg;
109 108
110 if (!vega10_is_smc_ram_running(hwmgr))
111 return -EINVAL;
112
113 reg = soc15_get_register_offset(MP1_HWID, 0, 109 reg = soc15_get_register_offset(MP1_HWID, 0,
114 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); 110 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
115 cgs_write_register(hwmgr->device, reg, msg); 111 cgs_write_register(hwmgr->device, reg, msg);
@@ -126,9 +122,7 @@ int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
126int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 122int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
127{ 123{
128 uint32_t reg; 124 uint32_t reg;
129 125 uint32_t ret;
130 if (!vega10_is_smc_ram_running(hwmgr))
131 return -EINVAL;
132 126
133 vega10_wait_for_response(hwmgr); 127 vega10_wait_for_response(hwmgr);
134 128
@@ -138,8 +132,9 @@ int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
138 132
139 vega10_send_msg_to_smc_without_waiting(hwmgr, msg); 133 vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
140 134
141 if (vega10_wait_for_response(hwmgr) != 1) 135 ret = vega10_wait_for_response(hwmgr);
142 pr_err("Failed to send message: 0x%x\n", msg); 136 if (ret != 1)
137 pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
143 138
144 return 0; 139 return 0;
145} 140}
@@ -155,9 +150,7 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
155 uint16_t msg, uint32_t parameter) 150 uint16_t msg, uint32_t parameter)
156{ 151{
157 uint32_t reg; 152 uint32_t reg;
158 153 uint32_t ret;
159 if (!vega10_is_smc_ram_running(hwmgr))
160 return -EINVAL;
161 154
162 vega10_wait_for_response(hwmgr); 155 vega10_wait_for_response(hwmgr);
163 156
@@ -171,8 +164,9 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
171 164
172 vega10_send_msg_to_smc_without_waiting(hwmgr, msg); 165 vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
173 166
174 if (vega10_wait_for_response(hwmgr) != 1) 167 ret = vega10_wait_for_response(hwmgr);
175 pr_err("Failed to send message: 0x%x\n", msg); 168 if (ret != 1)
169 pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
176 170
177 return 0; 171 return 0;
178} 172}
@@ -232,20 +226,15 @@ int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
232 "Invalid SMU Table version!", return -EINVAL); 226 "Invalid SMU Table version!", return -EINVAL);
233 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 227 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
234 "Invalid SMU Table Length!", return -EINVAL); 228 "Invalid SMU Table Length!", return -EINVAL);
235 PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, 229 vega10_send_msg_to_smc_with_parameter(hwmgr,
236 PPSMC_MSG_SetDriverDramAddrHigh, 230 PPSMC_MSG_SetDriverDramAddrHigh,
237 priv->smu_tables.entry[table_id].table_addr_high) == 0, 231 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
238 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); 232 vega10_send_msg_to_smc_with_parameter(hwmgr,
239 PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
240 PPSMC_MSG_SetDriverDramAddrLow, 233 PPSMC_MSG_SetDriverDramAddrLow,
241 priv->smu_tables.entry[table_id].table_addr_low) == 0, 234 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
242 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", 235 vega10_send_msg_to_smc_with_parameter(hwmgr,
243 return -EINVAL);
244 PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
245 PPSMC_MSG_TransferTableSmu2Dram, 236 PPSMC_MSG_TransferTableSmu2Dram,
246 priv->smu_tables.entry[table_id].table_id) == 0, 237 priv->smu_tables.entry[table_id].table_id);
247 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
248 return -EINVAL);
249 238
250 memcpy(table, priv->smu_tables.entry[table_id].table, 239 memcpy(table, priv->smu_tables.entry[table_id].table,
251 priv->smu_tables.entry[table_id].size); 240 priv->smu_tables.entry[table_id].size);
@@ -274,21 +263,15 @@ int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
274 memcpy(priv->smu_tables.entry[table_id].table, table, 263 memcpy(priv->smu_tables.entry[table_id].table, table,
275 priv->smu_tables.entry[table_id].size); 264 priv->smu_tables.entry[table_id].size);
276 265
277 PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, 266 vega10_send_msg_to_smc_with_parameter(hwmgr,
278 PPSMC_MSG_SetDriverDramAddrHigh, 267 PPSMC_MSG_SetDriverDramAddrHigh,
279 priv->smu_tables.entry[table_id].table_addr_high) == 0, 268 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
280 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", 269 vega10_send_msg_to_smc_with_parameter(hwmgr,
281 return -EINVAL;);
282 PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
283 PPSMC_MSG_SetDriverDramAddrLow, 270 PPSMC_MSG_SetDriverDramAddrLow,
284 priv->smu_tables.entry[table_id].table_addr_low) == 0, 271 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
285 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", 272 vega10_send_msg_to_smc_with_parameter(hwmgr,
286 return -EINVAL);
287 PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
288 PPSMC_MSG_TransferTableDram2Smu, 273 PPSMC_MSG_TransferTableDram2Smu,
289 priv->smu_tables.entry[table_id].table_id) == 0, 274 priv->smu_tables.entry[table_id].table_id);
290 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
291 return -EINVAL);
292 275
293 return 0; 276 return 0;
294} 277}
@@ -327,13 +310,21 @@ int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
327 if (features_enabled == NULL) 310 if (features_enabled == NULL)
328 return -EINVAL; 311 return -EINVAL;
329 312
330 if (!vega10_send_msg_to_smc(hwmgr, 313 vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
331 PPSMC_MSG_GetEnabledSmuFeatures)) { 314 vega10_read_arg_from_smc(hwmgr, features_enabled);
332 vega10_read_arg_from_smc(hwmgr, features_enabled); 315 return 0;
333 return 0; 316}
334 }
335 317
336 return -EINVAL; 318static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
319{
320 uint32_t features_enabled = 0;
321
322 vega10_get_smc_features(hwmgr, &features_enabled);
323
324 if (features_enabled & SMC_DPM_FEATURES)
325 return true;
326 else
327 return false;
337} 328}
338 329
339int vega10_set_tools_address(struct pp_hwmgr *hwmgr) 330int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
@@ -341,14 +332,13 @@ int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
341 struct vega10_smumgr *priv = 332 struct vega10_smumgr *priv =
342 (struct vega10_smumgr *)(hwmgr->smu_backend); 333 (struct vega10_smumgr *)(hwmgr->smu_backend);
343 334
344 if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || 335 if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
345 priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { 336 vega10_send_msg_to_smc_with_parameter(hwmgr,
346 if (!vega10_send_msg_to_smc_with_parameter(hwmgr,
347 PPSMC_MSG_SetToolsDramAddrHigh, 337 PPSMC_MSG_SetToolsDramAddrHigh,
348 priv->smu_tables.entry[TOOLSTABLE].table_addr_high)) 338 upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
349 vega10_send_msg_to_smc_with_parameter(hwmgr, 339 vega10_send_msg_to_smc_with_parameter(hwmgr,
350 PPSMC_MSG_SetToolsDramAddrLow, 340 PPSMC_MSG_SetToolsDramAddrLow,
351 priv->smu_tables.entry[TOOLSTABLE].table_addr_low); 341 lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
352 } 342 }
353 return 0; 343 return 0;
354} 344}
@@ -356,7 +346,7 @@ int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
356static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr) 346static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
357{ 347{
358 uint32_t smc_driver_if_version; 348 uint32_t smc_driver_if_version;
359 struct cgs_system_info sys_info = {0}; 349 struct amdgpu_device *adev = hwmgr->adev;
360 uint32_t dev_id; 350 uint32_t dev_id;
361 uint32_t rev_id; 351 uint32_t rev_id;
362 352
@@ -366,15 +356,8 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
366 return -EINVAL); 356 return -EINVAL);
367 vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version); 357 vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version);
368 358
369 sys_info.size = sizeof(struct cgs_system_info); 359 dev_id = adev->pdev->device;
370 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; 360 rev_id = adev->pdev->revision;
371 cgs_query_system_info(hwmgr->device, &sys_info);
372 dev_id = (uint32_t)sys_info.value;
373
374 sys_info.size = sizeof(struct cgs_system_info);
375 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
376 cgs_query_system_info(hwmgr->device, &sys_info);
377 rev_id = (uint32_t)sys_info.value;
378 361
379 if (!((dev_id == 0x687f) && 362 if (!((dev_id == 0x687f) &&
380 ((rev_id == 0xc0) || 363 ((rev_id == 0xc0) ||
@@ -393,14 +376,12 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
393static int vega10_smu_init(struct pp_hwmgr *hwmgr) 376static int vega10_smu_init(struct pp_hwmgr *hwmgr)
394{ 377{
395 struct vega10_smumgr *priv; 378 struct vega10_smumgr *priv;
396 uint64_t mc_addr; 379 unsigned long tools_size;
397 void *kaddr = NULL;
398 unsigned long handle, tools_size;
399 int ret; 380 int ret;
400 struct cgs_firmware_info info = {0}; 381 struct cgs_firmware_info info = {0};
401 382
402 ret = cgs_get_firmware_info(hwmgr->device, 383 ret = cgs_get_firmware_info(hwmgr->device,
403 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), 384 CGS_UCODE_ID_SMU,
404 &info); 385 &info);
405 if (ret || !info.kptr) 386 if (ret || !info.kptr)
406 return -EINVAL; 387 return -EINVAL;
@@ -413,147 +394,107 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
413 hwmgr->smu_backend = priv; 394 hwmgr->smu_backend = priv;
414 395
415 /* allocate space for pptable */ 396 /* allocate space for pptable */
416 smu_allocate_memory(hwmgr->device, 397 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
417 sizeof(PPTable_t), 398 sizeof(PPTable_t),
418 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
419 PAGE_SIZE, 399 PAGE_SIZE,
420 &mc_addr, 400 AMDGPU_GEM_DOMAIN_VRAM,
421 &kaddr, 401 &priv->smu_tables.entry[PPTABLE].handle,
422 &handle); 402 &priv->smu_tables.entry[PPTABLE].mc_addr,
423 403 &priv->smu_tables.entry[PPTABLE].table);
424 PP_ASSERT_WITH_CODE(kaddr, 404 if (ret)
425 "[vega10_smu_init] Out of memory for pptable.", 405 goto free_backend;
426 kfree(hwmgr->smu_backend);
427 cgs_free_gpu_mem(hwmgr->device,
428 (cgs_handle_t)handle);
429 return -EINVAL);
430 406
431 priv->smu_tables.entry[PPTABLE].version = 0x01; 407 priv->smu_tables.entry[PPTABLE].version = 0x01;
432 priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t); 408 priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
433 priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE; 409 priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE;
434 priv->smu_tables.entry[PPTABLE].table_addr_high =
435 smu_upper_32_bits(mc_addr);
436 priv->smu_tables.entry[PPTABLE].table_addr_low =
437 smu_lower_32_bits(mc_addr);
438 priv->smu_tables.entry[PPTABLE].table = kaddr;
439 priv->smu_tables.entry[PPTABLE].handle = handle;
440 410
441 /* allocate space for watermarks table */ 411 /* allocate space for watermarks table */
442 smu_allocate_memory(hwmgr->device, 412 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
443 sizeof(Watermarks_t), 413 sizeof(Watermarks_t),
444 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
445 PAGE_SIZE, 414 PAGE_SIZE,
446 &mc_addr, 415 AMDGPU_GEM_DOMAIN_VRAM,
447 &kaddr, 416 &priv->smu_tables.entry[WMTABLE].handle,
448 &handle); 417 &priv->smu_tables.entry[WMTABLE].mc_addr,
449 418 &priv->smu_tables.entry[WMTABLE].table);
450 PP_ASSERT_WITH_CODE(kaddr, 419
451 "[vega10_smu_init] Out of memory for wmtable.", 420 if (ret)
452 kfree(hwmgr->smu_backend); 421 goto err0;
453 cgs_free_gpu_mem(hwmgr->device,
454 (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
455 cgs_free_gpu_mem(hwmgr->device,
456 (cgs_handle_t)handle);
457 return -EINVAL);
458 422
459 priv->smu_tables.entry[WMTABLE].version = 0x01; 423 priv->smu_tables.entry[WMTABLE].version = 0x01;
460 priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); 424 priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
461 priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; 425 priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
462 priv->smu_tables.entry[WMTABLE].table_addr_high =
463 smu_upper_32_bits(mc_addr);
464 priv->smu_tables.entry[WMTABLE].table_addr_low =
465 smu_lower_32_bits(mc_addr);
466 priv->smu_tables.entry[WMTABLE].table = kaddr;
467 priv->smu_tables.entry[WMTABLE].handle = handle;
468 426
469 /* allocate space for AVFS table */ 427 /* allocate space for AVFS table */
470 smu_allocate_memory(hwmgr->device, 428 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
471 sizeof(AvfsTable_t), 429 sizeof(AvfsTable_t),
472 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
473 PAGE_SIZE, 430 PAGE_SIZE,
474 &mc_addr, 431 AMDGPU_GEM_DOMAIN_VRAM,
475 &kaddr, 432 &priv->smu_tables.entry[AVFSTABLE].handle,
476 &handle); 433 &priv->smu_tables.entry[AVFSTABLE].mc_addr,
477 434 &priv->smu_tables.entry[AVFSTABLE].table);
478 PP_ASSERT_WITH_CODE(kaddr, 435
479 "[vega10_smu_init] Out of memory for avfs table.", 436 if (ret)
480 kfree(hwmgr->smu_backend); 437 goto err1;
481 cgs_free_gpu_mem(hwmgr->device,
482 (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
483 cgs_free_gpu_mem(hwmgr->device,
484 (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
485 cgs_free_gpu_mem(hwmgr->device,
486 (cgs_handle_t)handle);
487 return -EINVAL);
488 438
489 priv->smu_tables.entry[AVFSTABLE].version = 0x01; 439 priv->smu_tables.entry[AVFSTABLE].version = 0x01;
490 priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t); 440 priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
491 priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS; 441 priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS;
492 priv->smu_tables.entry[AVFSTABLE].table_addr_high =
493 smu_upper_32_bits(mc_addr);
494 priv->smu_tables.entry[AVFSTABLE].table_addr_low =
495 smu_lower_32_bits(mc_addr);
496 priv->smu_tables.entry[AVFSTABLE].table = kaddr;
497 priv->smu_tables.entry[AVFSTABLE].handle = handle;
498 442
499 tools_size = 0x19000; 443 tools_size = 0x19000;
500 if (tools_size) { 444 if (tools_size) {
501 smu_allocate_memory(hwmgr->device, 445 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
502 tools_size, 446 tools_size,
503 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
504 PAGE_SIZE, 447 PAGE_SIZE,
505 &mc_addr, 448 AMDGPU_GEM_DOMAIN_VRAM,
506 &kaddr, 449 &priv->smu_tables.entry[TOOLSTABLE].handle,
507 &handle); 450 &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
508 451 &priv->smu_tables.entry[TOOLSTABLE].table);
509 if (kaddr) { 452 if (ret)
510 priv->smu_tables.entry[TOOLSTABLE].version = 0x01; 453 goto err2;
511 priv->smu_tables.entry[TOOLSTABLE].size = tools_size; 454 priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
512 priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG; 455 priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
513 priv->smu_tables.entry[TOOLSTABLE].table_addr_high = 456 priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
514 smu_upper_32_bits(mc_addr);
515 priv->smu_tables.entry[TOOLSTABLE].table_addr_low =
516 smu_lower_32_bits(mc_addr);
517 priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
518 priv->smu_tables.entry[TOOLSTABLE].handle = handle;
519 }
520 } 457 }
521 458
522 /* allocate space for AVFS Fuse table */ 459 /* allocate space for AVFS Fuse table */
523 smu_allocate_memory(hwmgr->device, 460 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
524 sizeof(AvfsFuseOverride_t), 461 sizeof(AvfsFuseOverride_t),
525 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
526 PAGE_SIZE, 462 PAGE_SIZE,
527 &mc_addr, 463 AMDGPU_GEM_DOMAIN_VRAM,
528 &kaddr, 464 &priv->smu_tables.entry[AVFSFUSETABLE].handle,
529 &handle); 465 &priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
530 466 &priv->smu_tables.entry[AVFSFUSETABLE].table);
531 PP_ASSERT_WITH_CODE(kaddr, 467 if (ret)
532 "[vega10_smu_init] Out of memory for avfs fuse table.", 468 goto err3;
533 kfree(hwmgr->smu_backend);
534 cgs_free_gpu_mem(hwmgr->device,
535 (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
536 cgs_free_gpu_mem(hwmgr->device,
537 (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
538 cgs_free_gpu_mem(hwmgr->device,
539 (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
540 cgs_free_gpu_mem(hwmgr->device,
541 (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
542 cgs_free_gpu_mem(hwmgr->device,
543 (cgs_handle_t)handle);
544 return -EINVAL);
545 469
546 priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01; 470 priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
547 priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t); 471 priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
548 priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE; 472 priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE;
549 priv->smu_tables.entry[AVFSFUSETABLE].table_addr_high = 473
550 smu_upper_32_bits(mc_addr);
551 priv->smu_tables.entry[AVFSFUSETABLE].table_addr_low =
552 smu_lower_32_bits(mc_addr);
553 priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr;
554 priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
555 474
556 return 0; 475 return 0;
476
477err3:
478 if (priv->smu_tables.entry[TOOLSTABLE].table)
479 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
480 &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
481 &priv->smu_tables.entry[TOOLSTABLE].table);
482err2:
483 amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
484 &priv->smu_tables.entry[AVFSTABLE].mc_addr,
485 &priv->smu_tables.entry[AVFSTABLE].table);
486err1:
487 amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
488 &priv->smu_tables.entry[WMTABLE].mc_addr,
489 &priv->smu_tables.entry[WMTABLE].table);
490err0:
491 amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
492 &priv->smu_tables.entry[PPTABLE].mc_addr,
493 &priv->smu_tables.entry[PPTABLE].table);
494free_backend:
495 kfree(hwmgr->smu_backend);
496
497 return -EINVAL;
557} 498}
558 499
559static int vega10_smu_fini(struct pp_hwmgr *hwmgr) 500static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
@@ -562,17 +503,22 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
562 (struct vega10_smumgr *)(hwmgr->smu_backend); 503 (struct vega10_smumgr *)(hwmgr->smu_backend);
563 504
564 if (priv) { 505 if (priv) {
565 cgs_free_gpu_mem(hwmgr->device, 506 amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
566 (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); 507 &priv->smu_tables.entry[PPTABLE].mc_addr,
567 cgs_free_gpu_mem(hwmgr->device, 508 &priv->smu_tables.entry[PPTABLE].table);
568 (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); 509 amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
569 cgs_free_gpu_mem(hwmgr->device, 510 &priv->smu_tables.entry[WMTABLE].mc_addr,
570 (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); 511 &priv->smu_tables.entry[WMTABLE].table);
512 amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
513 &priv->smu_tables.entry[AVFSTABLE].mc_addr,
514 &priv->smu_tables.entry[AVFSTABLE].table);
571 if (priv->smu_tables.entry[TOOLSTABLE].table) 515 if (priv->smu_tables.entry[TOOLSTABLE].table)
572 cgs_free_gpu_mem(hwmgr->device, 516 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
573 (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); 517 &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
574 cgs_free_gpu_mem(hwmgr->device, 518 &priv->smu_tables.entry[TOOLSTABLE].table);
575 (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle); 519 amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSFUSETABLE].handle,
520 &priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
521 &priv->smu_tables.entry[AVFSFUSETABLE].table);
576 kfree(hwmgr->smu_backend); 522 kfree(hwmgr->smu_backend);
577 hwmgr->smu_backend = NULL; 523 hwmgr->smu_backend = NULL;
578 } 524 }
@@ -581,6 +527,9 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
581 527
582static int vega10_start_smu(struct pp_hwmgr *hwmgr) 528static int vega10_start_smu(struct pp_hwmgr *hwmgr)
583{ 529{
530 if (!vega10_is_smc_ram_running(hwmgr))
531 return -EINVAL;
532
584 PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr), 533 PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
585 "Failed to verify SMC interface!", 534 "Failed to verify SMC interface!",
586 return -EINVAL); 535 return -EINVAL);
@@ -599,4 +548,5 @@ const struct pp_smumgr_func vega10_smu_funcs = {
599 .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter, 548 .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter,
600 .download_pptable_settings = NULL, 549 .download_pptable_settings = NULL,
601 .upload_pptable_settings = NULL, 550 .upload_pptable_settings = NULL,
551 .is_dpm_running = vega10_is_dpm_running,
602}; 552};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 0695455b21b2..736f8cfdbbdc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -38,10 +38,9 @@ struct smu_table_entry {
38 uint32_t version; 38 uint32_t version;
39 uint32_t size; 39 uint32_t size;
40 uint32_t table_id; 40 uint32_t table_id;
41 uint32_t table_addr_high; 41 uint64_t mc_addr;
42 uint32_t table_addr_low; 42 void *table;
43 uint8_t *table; 43 struct amdgpu_bo *handle;
44 unsigned long handle;
45}; 44};
46 45
47struct smu_table_array { 46struct smu_table_array {