aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c12
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/display/TODO8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c31
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c694
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c94
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c406
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c168
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h138
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h59
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/luts_1d.h51
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h55
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h98
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h32
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h33
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h37
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c80
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c150
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c168
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c220
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c7
-rw-r--r--include/drm/gpu_scheduler.h8
-rw-r--r--include/drm/ttm/ttm_bo_api.h25
-rw-r--r--include/uapi/drm/amdgpu_drm.h1
189 files changed, 3650 insertions, 2272 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8a440b9fa0fd..44f62fda4022 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,7 @@
73#include "amdgpu_virt.h" 73#include "amdgpu_virt.h"
74#include "amdgpu_gart.h" 74#include "amdgpu_gart.h"
75#include "amdgpu_debugfs.h" 75#include "amdgpu_debugfs.h"
76#include "amdgpu_job.h"
76 77
77/* 78/*
78 * Modules parameters. 79 * Modules parameters.
@@ -105,11 +106,8 @@ extern int amdgpu_vm_fault_stop;
105extern int amdgpu_vm_debug; 106extern int amdgpu_vm_debug;
106extern int amdgpu_vm_update_mode; 107extern int amdgpu_vm_update_mode;
107extern int amdgpu_dc; 108extern int amdgpu_dc;
108extern int amdgpu_dc_log;
109extern int amdgpu_sched_jobs; 109extern int amdgpu_sched_jobs;
110extern int amdgpu_sched_hw_submission; 110extern int amdgpu_sched_hw_submission;
111extern int amdgpu_no_evict;
112extern int amdgpu_direct_gma_size;
113extern uint amdgpu_pcie_gen_cap; 111extern uint amdgpu_pcie_gen_cap;
114extern uint amdgpu_pcie_lane_cap; 112extern uint amdgpu_pcie_lane_cap;
115extern uint amdgpu_cg_mask; 113extern uint amdgpu_cg_mask;
@@ -600,17 +598,6 @@ struct amdgpu_ib {
600 598
601extern const struct drm_sched_backend_ops amdgpu_sched_ops; 599extern const struct drm_sched_backend_ops amdgpu_sched_ops;
602 600
603int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
604 struct amdgpu_job **job, struct amdgpu_vm *vm);
605int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
606 struct amdgpu_job **job);
607
608void amdgpu_job_free_resources(struct amdgpu_job *job);
609void amdgpu_job_free(struct amdgpu_job *job);
610int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
611 struct drm_sched_entity *entity, void *owner,
612 struct dma_fence **f);
613
614/* 601/*
615 * Queue manager 602 * Queue manager
616 */ 603 */
@@ -732,6 +719,14 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
732 struct list_head *validated); 719 struct list_head *validated);
733void amdgpu_bo_list_put(struct amdgpu_bo_list *list); 720void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
734void amdgpu_bo_list_free(struct amdgpu_bo_list *list); 721void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
722int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
723 struct drm_amdgpu_bo_list_entry **info_param);
724
725int amdgpu_bo_list_create(struct amdgpu_device *adev,
726 struct drm_file *filp,
727 struct drm_amdgpu_bo_list_entry *info,
728 unsigned num_entries,
729 struct amdgpu_bo_list **list);
735 730
736/* 731/*
737 * GFX stuff 732 * GFX stuff
@@ -1029,6 +1024,7 @@ struct amdgpu_cs_parser {
1029 1024
1030 /* scheduler job object */ 1025 /* scheduler job object */
1031 struct amdgpu_job *job; 1026 struct amdgpu_job *job;
1027 struct amdgpu_ring *ring;
1032 1028
1033 /* buffer objects */ 1029 /* buffer objects */
1034 struct ww_acquire_ctx ticket; 1030 struct ww_acquire_ctx ticket;
@@ -1050,40 +1046,6 @@ struct amdgpu_cs_parser {
1050 struct drm_syncobj **post_dep_syncobjs; 1046 struct drm_syncobj **post_dep_syncobjs;
1051}; 1047};
1052 1048
1053#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
1054#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
1055#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
1056
1057struct amdgpu_job {
1058 struct drm_sched_job base;
1059 struct amdgpu_device *adev;
1060 struct amdgpu_vm *vm;
1061 struct amdgpu_ring *ring;
1062 struct amdgpu_sync sync;
1063 struct amdgpu_sync sched_sync;
1064 struct amdgpu_ib *ibs;
1065 struct dma_fence *fence; /* the hw fence */
1066 uint32_t preamble_status;
1067 uint32_t num_ibs;
1068 void *owner;
1069 uint64_t fence_ctx; /* the fence_context this job uses */
1070 bool vm_needs_flush;
1071 uint64_t vm_pd_addr;
1072 unsigned vmid;
1073 unsigned pasid;
1074 uint32_t gds_base, gds_size;
1075 uint32_t gws_base, gws_size;
1076 uint32_t oa_base, oa_size;
1077 uint32_t vram_lost_counter;
1078
1079 /* user fence handling */
1080 uint64_t uf_addr;
1081 uint64_t uf_sequence;
1082
1083};
1084#define to_amdgpu_job(sched_job) \
1085 container_of((sched_job), struct amdgpu_job, base)
1086
1087static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 1049static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
1088 uint32_t ib_idx, int idx) 1050 uint32_t ib_idx, int idx)
1089{ 1051{
@@ -1398,6 +1360,7 @@ enum amd_hw_ip_block_type {
1398 PWR_HWIP, 1360 PWR_HWIP,
1399 NBIF_HWIP, 1361 NBIF_HWIP,
1400 THM_HWIP, 1362 THM_HWIP,
1363 CLK_HWIP,
1401 MAX_HWIP 1364 MAX_HWIP
1402}; 1365};
1403 1366
@@ -1588,9 +1551,9 @@ struct amdgpu_device {
1588 DECLARE_HASHTABLE(mn_hash, 7); 1551 DECLARE_HASHTABLE(mn_hash, 7);
1589 1552
1590 /* tracking pinned memory */ 1553 /* tracking pinned memory */
1591 u64 vram_pin_size; 1554 atomic64_t vram_pin_size;
1592 u64 invisible_pin_size; 1555 atomic64_t visible_pin_size;
1593 u64 gart_pin_size; 1556 atomic64_t gart_pin_size;
1594 1557
1595 /* amdkfd interface */ 1558 /* amdkfd interface */
1596 struct kfd_dev *kfd; 1559 struct kfd_dev *kfd;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 305143fcc1ce..e3ed08dca7b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -251,7 +251,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
251 struct amdgpu_bo *bo = NULL; 251 struct amdgpu_bo *bo = NULL;
252 struct amdgpu_bo_param bp; 252 struct amdgpu_bo_param bp;
253 int r; 253 int r;
254 uint64_t gpu_addr_tmp = 0;
255 void *cpu_ptr_tmp = NULL; 254 void *cpu_ptr_tmp = NULL;
256 255
257 memset(&bp, 0, sizeof(bp)); 256 memset(&bp, 0, sizeof(bp));
@@ -275,13 +274,18 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
275 goto allocate_mem_reserve_bo_failed; 274 goto allocate_mem_reserve_bo_failed;
276 } 275 }
277 276
278 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, 277 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
279 &gpu_addr_tmp);
280 if (r) { 278 if (r) {
281 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r); 279 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
282 goto allocate_mem_pin_bo_failed; 280 goto allocate_mem_pin_bo_failed;
283 } 281 }
284 282
283 r = amdgpu_ttm_alloc_gart(&bo->tbo);
284 if (r) {
285 dev_err(adev->dev, "%p bind failed\n", bo);
286 goto allocate_mem_kmap_bo_failed;
287 }
288
285 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp); 289 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
286 if (r) { 290 if (r) {
287 dev_err(adev->dev, 291 dev_err(adev->dev,
@@ -290,7 +294,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
290 } 294 }
291 295
292 *mem_obj = bo; 296 *mem_obj = bo;
293 *gpu_addr = gpu_addr_tmp; 297 *gpu_addr = amdgpu_bo_gpu_offset(bo);
294 *cpu_ptr = cpu_ptr_tmp; 298 *cpu_ptr = cpu_ptr_tmp;
295 299
296 amdgpu_bo_unreserve(bo); 300 amdgpu_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ff8fd75f7ca5..079af8ac2636 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1587,7 +1587,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1587 goto bo_reserve_failed; 1587 goto bo_reserve_failed;
1588 } 1588 }
1589 1589
1590 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); 1590 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1591 if (ret) { 1591 if (ret) {
1592 pr_err("Failed to pin bo. ret %d\n", ret); 1592 pr_err("Failed to pin bo. ret %d\n", ret);
1593 goto pin_failed; 1593 goto pin_failed;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 19cfff31f2e1..3079ea8523c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -95,11 +95,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
95 r = amdgpu_bo_reserve(sobj, false); 95 r = amdgpu_bo_reserve(sobj, false);
96 if (unlikely(r != 0)) 96 if (unlikely(r != 0))
97 goto out_cleanup; 97 goto out_cleanup;
98 r = amdgpu_bo_pin(sobj, sdomain, &saddr); 98 r = amdgpu_bo_pin(sobj, sdomain);
99 if (r) {
100 amdgpu_bo_unreserve(sobj);
101 goto out_cleanup;
102 }
103 r = amdgpu_ttm_alloc_gart(&sobj->tbo);
99 amdgpu_bo_unreserve(sobj); 104 amdgpu_bo_unreserve(sobj);
100 if (r) { 105 if (r) {
101 goto out_cleanup; 106 goto out_cleanup;
102 } 107 }
108 saddr = amdgpu_bo_gpu_offset(sobj);
103 bp.domain = ddomain; 109 bp.domain = ddomain;
104 r = amdgpu_bo_create(adev, &bp, &dobj); 110 r = amdgpu_bo_create(adev, &bp, &dobj);
105 if (r) { 111 if (r) {
@@ -108,11 +114,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
108 r = amdgpu_bo_reserve(dobj, false); 114 r = amdgpu_bo_reserve(dobj, false);
109 if (unlikely(r != 0)) 115 if (unlikely(r != 0))
110 goto out_cleanup; 116 goto out_cleanup;
111 r = amdgpu_bo_pin(dobj, ddomain, &daddr); 117 r = amdgpu_bo_pin(dobj, ddomain);
118 if (r) {
119 amdgpu_bo_unreserve(sobj);
120 goto out_cleanup;
121 }
122 r = amdgpu_ttm_alloc_gart(&dobj->tbo);
112 amdgpu_bo_unreserve(dobj); 123 amdgpu_bo_unreserve(dobj);
113 if (r) { 124 if (r) {
114 goto out_cleanup; 125 goto out_cleanup;
115 } 126 }
127 daddr = amdgpu_bo_gpu_offset(dobj);
116 128
117 if (adev->mman.buffer_funcs) { 129 if (adev->mman.buffer_funcs) {
118 time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n); 130 time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 92be7f6de197..7679c068c89a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -55,15 +55,15 @@ static void amdgpu_bo_list_release_rcu(struct kref *ref)
55 kfree_rcu(list, rhead); 55 kfree_rcu(list, rhead);
56} 56}
57 57
58static int amdgpu_bo_list_create(struct amdgpu_device *adev, 58int amdgpu_bo_list_create(struct amdgpu_device *adev,
59 struct drm_file *filp, 59 struct drm_file *filp,
60 struct drm_amdgpu_bo_list_entry *info, 60 struct drm_amdgpu_bo_list_entry *info,
61 unsigned num_entries, 61 unsigned num_entries,
62 int *id) 62 struct amdgpu_bo_list **list_out)
63{ 63{
64 int r;
65 struct amdgpu_fpriv *fpriv = filp->driver_priv;
66 struct amdgpu_bo_list *list; 64 struct amdgpu_bo_list *list;
65 int r;
66
67 67
68 list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); 68 list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
69 if (!list) 69 if (!list)
@@ -78,16 +78,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev,
78 return r; 78 return r;
79 } 79 }
80 80
81 /* idr alloc should be called only after initialization of bo list. */ 81 *list_out = list;
82 mutex_lock(&fpriv->bo_list_lock);
83 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
84 mutex_unlock(&fpriv->bo_list_lock);
85 if (r < 0) {
86 amdgpu_bo_list_free(list);
87 return r;
88 }
89 *id = r;
90
91 return 0; 82 return 0;
92} 83}
93 84
@@ -263,55 +254,79 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
263 kfree(list); 254 kfree(list);
264} 255}
265 256
266int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 257int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
267 struct drm_file *filp) 258 struct drm_amdgpu_bo_list_entry **info_param)
268{ 259{
260 const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
269 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); 261 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
270
271 struct amdgpu_device *adev = dev->dev_private;
272 struct amdgpu_fpriv *fpriv = filp->driver_priv;
273 union drm_amdgpu_bo_list *args = data;
274 uint32_t handle = args->in.list_handle;
275 const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
276
277 struct drm_amdgpu_bo_list_entry *info; 262 struct drm_amdgpu_bo_list_entry *info;
278 struct amdgpu_bo_list *list;
279
280 int r; 263 int r;
281 264
282 info = kvmalloc_array(args->in.bo_number, 265 info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
283 sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
284 if (!info) 266 if (!info)
285 return -ENOMEM; 267 return -ENOMEM;
286 268
287 /* copy the handle array from userspace to a kernel buffer */ 269 /* copy the handle array from userspace to a kernel buffer */
288 r = -EFAULT; 270 r = -EFAULT;
289 if (likely(info_size == args->in.bo_info_size)) { 271 if (likely(info_size == in->bo_info_size)) {
290 unsigned long bytes = args->in.bo_number * 272 unsigned long bytes = in->bo_number *
291 args->in.bo_info_size; 273 in->bo_info_size;
292 274
293 if (copy_from_user(info, uptr, bytes)) 275 if (copy_from_user(info, uptr, bytes))
294 goto error_free; 276 goto error_free;
295 277
296 } else { 278 } else {
297 unsigned long bytes = min(args->in.bo_info_size, info_size); 279 unsigned long bytes = min(in->bo_info_size, info_size);
298 unsigned i; 280 unsigned i;
299 281
300 memset(info, 0, args->in.bo_number * info_size); 282 memset(info, 0, in->bo_number * info_size);
301 for (i = 0; i < args->in.bo_number; ++i) { 283 for (i = 0; i < in->bo_number; ++i) {
302 if (copy_from_user(&info[i], uptr, bytes)) 284 if (copy_from_user(&info[i], uptr, bytes))
303 goto error_free; 285 goto error_free;
304 286
305 uptr += args->in.bo_info_size; 287 uptr += in->bo_info_size;
306 } 288 }
307 } 289 }
308 290
291 *info_param = info;
292 return 0;
293
294error_free:
295 kvfree(info);
296 return r;
297}
298
299int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *filp)
301{
302 struct amdgpu_device *adev = dev->dev_private;
303 struct amdgpu_fpriv *fpriv = filp->driver_priv;
304 union drm_amdgpu_bo_list *args = data;
305 uint32_t handle = args->in.list_handle;
306 struct drm_amdgpu_bo_list_entry *info = NULL;
307 struct amdgpu_bo_list *list;
308 int r;
309
310 r = amdgpu_bo_create_list_entry_array(&args->in, &info);
311 if (r)
312 goto error_free;
313
309 switch (args->in.operation) { 314 switch (args->in.operation) {
310 case AMDGPU_BO_LIST_OP_CREATE: 315 case AMDGPU_BO_LIST_OP_CREATE:
311 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, 316 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
312 &handle); 317 &list);
313 if (r) 318 if (r)
314 goto error_free; 319 goto error_free;
320
321 mutex_lock(&fpriv->bo_list_lock);
322 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
323 mutex_unlock(&fpriv->bo_list_lock);
324 if (r < 0) {
325 amdgpu_bo_list_free(list);
326 return r;
327 }
328
329 handle = r;
315 break; 330 break;
316 331
317 case AMDGPU_BO_LIST_OP_DESTROY: 332 case AMDGPU_BO_LIST_OP_DESTROY:
@@ -345,6 +360,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
345 return 0; 360 return 0;
346 361
347error_free: 362error_free:
348 kvfree(info); 363 if (info)
364 kvfree(info);
349 return r; 365 return r;
350} 366}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7a625f3989a0..7c5cc33d0cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -66,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
66 return 0; 66 return 0;
67} 67}
68 68
69static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 69static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
70 struct drm_amdgpu_bo_list_in *data)
71{
72 int r;
73 struct drm_amdgpu_bo_list_entry *info = NULL;
74
75 r = amdgpu_bo_create_list_entry_array(data, &info);
76 if (r)
77 return r;
78
79 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
80 &p->bo_list);
81 if (r)
82 goto error_free;
83
84 kvfree(info);
85 return 0;
86
87error_free:
88 if (info)
89 kvfree(info);
90
91 return r;
92}
93
94static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
70{ 95{
71 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 96 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
72 struct amdgpu_vm *vm = &fpriv->vm; 97 struct amdgpu_vm *vm = &fpriv->vm;
73 union drm_amdgpu_cs *cs = data;
74 uint64_t *chunk_array_user; 98 uint64_t *chunk_array_user;
75 uint64_t *chunk_array; 99 uint64_t *chunk_array;
76 unsigned size, num_ibs = 0; 100 unsigned size, num_ibs = 0;
@@ -164,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
164 188
165 break; 189 break;
166 190
191 case AMDGPU_CHUNK_ID_BO_HANDLES:
192 size = sizeof(struct drm_amdgpu_bo_list_in);
193 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
194 ret = -EINVAL;
195 goto free_partial_kdata;
196 }
197
198 ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
199 if (ret)
200 goto free_partial_kdata;
201
202 break;
203
167 case AMDGPU_CHUNK_ID_DEPENDENCIES: 204 case AMDGPU_CHUNK_ID_DEPENDENCIES:
168 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 205 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
169 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 206 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
@@ -187,6 +224,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
187 if (p->uf_entry.robj) 224 if (p->uf_entry.robj)
188 p->job->uf_addr = uf_offset; 225 p->job->uf_addr = uf_offset;
189 kfree(chunk_array); 226 kfree(chunk_array);
227
228 /* Use this opportunity to fill in task info for the vm */
229 amdgpu_vm_set_task_info(vm);
230
190 return 0; 231 return 0;
191 232
192free_all_kdata: 233free_all_kdata:
@@ -258,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
258 return; 299 return;
259 } 300 }
260 301
261 total_vram = adev->gmc.real_vram_size - adev->vram_pin_size; 302 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
262 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 303 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
263 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 304 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
264 305
@@ -530,7 +571,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
530 571
531 INIT_LIST_HEAD(&p->validated); 572 INIT_LIST_HEAD(&p->validated);
532 573
533 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 574 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
575 if (!p->bo_list)
576 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
577 else
578 mutex_lock(&p->bo_list->lock);
579
534 if (p->bo_list) { 580 if (p->bo_list) {
535 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 581 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
536 if (p->bo_list->first_userptr != p->bo_list->num_entries) 582 if (p->bo_list->first_userptr != p->bo_list->num_entries)
@@ -866,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
866{ 912{
867 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 913 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
868 struct amdgpu_vm *vm = &fpriv->vm; 914 struct amdgpu_vm *vm = &fpriv->vm;
869 struct amdgpu_ring *ring = p->job->ring; 915 struct amdgpu_ring *ring = p->ring;
870 int r; 916 int r;
871 917
872 /* Only for UVD/VCE VM emulation */ 918 /* Only for UVD/VCE VM emulation */
873 if (p->job->ring->funcs->parse_cs) { 919 if (p->ring->funcs->parse_cs) {
874 unsigned i, j; 920 unsigned i, j;
875 921
876 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { 922 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -928,6 +974,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
928 r = amdgpu_bo_vm_update_pte(p); 974 r = amdgpu_bo_vm_update_pte(p);
929 if (r) 975 if (r)
930 return r; 976 return r;
977
978 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
979 if (r)
980 return r;
931 } 981 }
932 982
933 return amdgpu_cs_sync_rings(p); 983 return amdgpu_cs_sync_rings(p);
@@ -980,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
980 } 1030 }
981 } 1031 }
982 1032
983 if (parser->job->ring && parser->job->ring != ring) 1033 if (parser->ring && parser->ring != ring)
984 return -EINVAL; 1034 return -EINVAL;
985 1035
986 parser->job->ring = ring; 1036 parser->ring = ring;
987 1037
988 r = amdgpu_ib_get(adev, vm, 1038 r = amdgpu_ib_get(adev, vm,
989 ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, 1039 ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
@@ -1002,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
1002 1052
1003 /* UVD & VCE fw doesn't support user fences */ 1053 /* UVD & VCE fw doesn't support user fences */
1004 if (parser->job->uf_addr && ( 1054 if (parser->job->uf_addr && (
1005 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || 1055 parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
1006 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) 1056 parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1007 return -EINVAL; 1057 return -EINVAL;
1008 1058
1009 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx); 1059 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
1010} 1060}
1011 1061
1012static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, 1062static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1157,8 +1207,9 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1157static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1207static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1158 union drm_amdgpu_cs *cs) 1208 union drm_amdgpu_cs *cs)
1159{ 1209{
1160 struct amdgpu_ring *ring = p->job->ring; 1210 struct amdgpu_ring *ring = p->ring;
1161 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 1211 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
1212 enum drm_sched_priority priority;
1162 struct amdgpu_job *job; 1213 struct amdgpu_job *job;
1163 unsigned i; 1214 unsigned i;
1164 uint64_t seq; 1215 uint64_t seq;
@@ -1189,7 +1240,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1189 } 1240 }
1190 1241
1191 job->owner = p->filp; 1242 job->owner = p->filp;
1192 job->fence_ctx = entity->fence_context;
1193 p->fence = dma_fence_get(&job->base.s_fence->finished); 1243 p->fence = dma_fence_get(&job->base.s_fence->finished);
1194 1244
1195 r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq); 1245 r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
@@ -1207,11 +1257,14 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1207 job->uf_sequence = seq; 1257 job->uf_sequence = seq;
1208 1258
1209 amdgpu_job_free_resources(job); 1259 amdgpu_job_free_resources(job);
1210 amdgpu_ring_priority_get(job->ring, job->base.s_priority);
1211 1260
1212 trace_amdgpu_cs_ioctl(job); 1261 trace_amdgpu_cs_ioctl(job);
1262 priority = job->base.s_priority;
1213 drm_sched_entity_push_job(&job->base, entity); 1263 drm_sched_entity_push_job(&job->base, entity);
1214 1264
1265 ring = to_amdgpu_ring(entity->sched);
1266 amdgpu_ring_priority_get(ring, priority);
1267
1215 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1268 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1216 amdgpu_mn_unlock(p->mn); 1269 amdgpu_mn_unlock(p->mn);
1217 1270
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0120b24fae1b..83e3b320a793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
90 if (ring == &adev->gfx.kiq.ring) 90 if (ring == &adev->gfx.kiq.ring)
91 continue; 91 continue;
92 92
93 r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, 93 r = drm_sched_entity_init(&ctx->rings[i].entity,
94 rq, &ctx->guilty); 94 &rq, 1, &ctx->guilty);
95 if (r) 95 if (r)
96 goto failed; 96 goto failed;
97 } 97 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9883fa9bb41b..386a7b34d2f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2200,7 +2200,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2200 case CHIP_VEGA10: 2200 case CHIP_VEGA10:
2201 case CHIP_VEGA12: 2201 case CHIP_VEGA12:
2202 case CHIP_VEGA20: 2202 case CHIP_VEGA20:
2203#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2203#ifdef CONFIG_X86
2204 case CHIP_RAVEN: 2204 case CHIP_RAVEN:
2205#endif 2205#endif
2206 return amdgpu_dc != 0; 2206 return amdgpu_dc != 0;
@@ -2758,11 +2758,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2758 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2758 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2759 r = amdgpu_bo_reserve(aobj, true); 2759 r = amdgpu_bo_reserve(aobj, true);
2760 if (r == 0) { 2760 if (r == 0) {
2761 r = amdgpu_bo_pin(aobj, 2761 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2762 AMDGPU_GEM_DOMAIN_VRAM,
2763 &amdgpu_crtc->cursor_addr);
2764 if (r != 0) 2762 if (r != 0)
2765 DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 2763 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2764 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2766 amdgpu_bo_unreserve(aobj); 2765 amdgpu_bo_unreserve(aobj);
2767 } 2766 }
2768 } 2767 }
@@ -3254,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3254 3253
3255 kthread_park(ring->sched.thread); 3254 kthread_park(ring->sched.thread);
3256 3255
3257 if (job && job->ring->idx != i) 3256 if (job && job->base.sched == &ring->sched)
3258 continue; 3257 continue;
3259 3258
3260 drm_sched_hw_job_reset(&ring->sched, &job->base); 3259 drm_sched_hw_job_reset(&ring->sched, &job->base);
@@ -3278,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3278 * or all rings (in the case @job is NULL) 3277 * or all rings (in the case @job is NULL)
3279 * after above amdgpu_reset accomplished 3278 * after above amdgpu_reset accomplished
3280 */ 3279 */
3281 if ((!job || job->ring->idx == i) && !r) 3280 if ((!job || job->base.sched == &ring->sched) && !r)
3282 drm_sched_job_recovery(&ring->sched); 3281 drm_sched_job_recovery(&ring->sched);
3283 3282
3284 kthread_unpark(ring->sched.thread); 3283 kthread_unpark(ring->sched.thread);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76ee8e04ff11..6748cd7fc129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
157 struct amdgpu_bo *new_abo; 157 struct amdgpu_bo *new_abo;
158 unsigned long flags; 158 unsigned long flags;
159 u64 tiling_flags; 159 u64 tiling_flags;
160 u64 base;
161 int i, r; 160 int i, r;
162 161
163 work = kzalloc(sizeof *work, GFP_KERNEL); 162 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -189,12 +188,18 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
189 goto cleanup; 188 goto cleanup;
190 } 189 }
191 190
192 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base); 191 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
193 if (unlikely(r != 0)) { 192 if (unlikely(r != 0)) {
194 DRM_ERROR("failed to pin new abo buffer before flip\n"); 193 DRM_ERROR("failed to pin new abo buffer before flip\n");
195 goto unreserve; 194 goto unreserve;
196 } 195 }
197 196
197 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
198 if (unlikely(r != 0)) {
199 DRM_ERROR("%p bind failed\n", new_abo);
200 goto unpin;
201 }
202
198 r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, 203 r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
199 &work->shared_count, 204 &work->shared_count,
200 &work->shared); 205 &work->shared);
@@ -206,7 +211,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
206 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); 211 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
207 amdgpu_bo_unreserve(new_abo); 212 amdgpu_bo_unreserve(new_abo);
208 213
209 work->base = base; 214 work->base = amdgpu_bo_gpu_offset(new_abo);
210 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 215 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
211 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 216 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
212 217
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 06aede194bf8..8843a06360fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -69,9 +69,10 @@
69 * - 3.24.0 - Add high priority compute support for gfx9 69 * - 3.24.0 - Add high priority compute support for gfx9
70 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk). 70 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
71 * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE. 71 * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
72 * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
72 */ 73 */
73#define KMS_DRIVER_MAJOR 3 74#define KMS_DRIVER_MAJOR 3
74#define KMS_DRIVER_MINOR 26 75#define KMS_DRIVER_MINOR 27
75#define KMS_DRIVER_PATCHLEVEL 0 76#define KMS_DRIVER_PATCHLEVEL 0
76 77
77int amdgpu_vram_limit = 0; 78int amdgpu_vram_limit = 0;
@@ -103,11 +104,8 @@ int amdgpu_vram_page_split = 512;
103int amdgpu_vm_update_mode = -1; 104int amdgpu_vm_update_mode = -1;
104int amdgpu_exp_hw_support = 0; 105int amdgpu_exp_hw_support = 0;
105int amdgpu_dc = -1; 106int amdgpu_dc = -1;
106int amdgpu_dc_log = 0;
107int amdgpu_sched_jobs = 32; 107int amdgpu_sched_jobs = 32;
108int amdgpu_sched_hw_submission = 2; 108int amdgpu_sched_hw_submission = 2;
109int amdgpu_no_evict = 0;
110int amdgpu_direct_gma_size = 0;
111uint amdgpu_pcie_gen_cap = 0; 109uint amdgpu_pcie_gen_cap = 0;
112uint amdgpu_pcie_lane_cap = 0; 110uint amdgpu_pcie_lane_cap = 0;
113uint amdgpu_cg_mask = 0xffffffff; 111uint amdgpu_cg_mask = 0xffffffff;
@@ -340,9 +338,6 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
340MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))"); 338MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
341module_param_named(dc, amdgpu_dc, int, 0444); 339module_param_named(dc, amdgpu_dc, int, 0444);
342 340
343MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
344module_param_named(dc_log, amdgpu_dc_log, int, 0444);
345
346/** 341/**
347 * DOC: sched_jobs (int) 342 * DOC: sched_jobs (int)
348 * Override the max number of jobs supported in the sw queue. The default is 32. 343 * Override the max number of jobs supported in the sw queue. The default is 32.
@@ -365,12 +360,6 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
365MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); 360MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
366module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); 361module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
367 362
368MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
369module_param_named(no_evict, amdgpu_no_evict, int, 0444);
370
371MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
372module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
373
374/** 363/**
375 * DOC: pcie_gen_cap (uint) 364 * DOC: pcie_gen_cap (uint)
376 * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h. 365 * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 811c62927c38..d44b76455e89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -168,11 +168,19 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
168 } 168 }
169 169
170 170
171 ret = amdgpu_bo_pin(abo, domain, NULL); 171 ret = amdgpu_bo_pin(abo, domain);
172 if (ret) { 172 if (ret) {
173 amdgpu_bo_unreserve(abo); 173 amdgpu_bo_unreserve(abo);
174 goto out_unref; 174 goto out_unref;
175 } 175 }
176
177 ret = amdgpu_ttm_alloc_gart(&abo->tbo);
178 if (ret) {
179 amdgpu_bo_unreserve(abo);
180 dev_err(adev->dev, "%p bind failed\n", abo);
181 goto out_unref;
182 }
183
176 ret = amdgpu_bo_kmap(abo, NULL); 184 ret = amdgpu_bo_kmap(abo, NULL);
177 amdgpu_bo_unreserve(abo); 185 amdgpu_bo_unreserve(abo);
178 if (ret) { 186 if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 36113cb60ca2..a54d5655a191 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
143 */ 143 */
144int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev) 144int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
145{ 145{
146 uint64_t gpu_addr;
147 int r; 146 int r;
148 147
149 r = amdgpu_bo_reserve(adev->gart.robj, false); 148 r = amdgpu_bo_reserve(adev->gart.robj, false);
150 if (unlikely(r != 0)) 149 if (unlikely(r != 0))
151 return r; 150 return r;
152 r = amdgpu_bo_pin(adev->gart.robj, 151 r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
153 AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
154 if (r) { 152 if (r) {
155 amdgpu_bo_unreserve(adev->gart.robj); 153 amdgpu_bo_unreserve(adev->gart.robj);
156 return r; 154 return r;
@@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
159 if (r) 157 if (r)
160 amdgpu_bo_unpin(adev->gart.robj); 158 amdgpu_bo_unpin(adev->gart.robj);
161 amdgpu_bo_unreserve(adev->gart.robj); 159 amdgpu_bo_unreserve(adev->gart.robj);
162 adev->gart.table_addr = gpu_addr; 160 adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
163 return r; 161 return r;
164} 162}
165 163
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ce7739832d29..5518e623fed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -139,7 +139,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
139 /* ring tests don't use a job */ 139 /* ring tests don't use a job */
140 if (job) { 140 if (job) {
141 vm = job->vm; 141 vm = job->vm;
142 fence_ctx = job->fence_ctx; 142 fence_ctx = job->base.s_fence->scheduled.context;
143 } else { 143 } else {
144 vm = NULL; 144 vm = NULL;
145 fence_ctx = 0; 145 fence_ctx = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 2bd56760c744..5a2c26a85984 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,14 +30,14 @@
30 30
31static void amdgpu_job_timedout(struct drm_sched_job *s_job) 31static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32{ 32{
33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 33 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34 struct amdgpu_job *job = to_amdgpu_job(s_job);
34 35
35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", 36 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
36 job->base.sched->name, 37 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
37 atomic_read(&job->ring->fence_drv.last_seq), 38 ring->fence_drv.sync_seq);
38 job->ring->fence_drv.sync_seq);
39 39
40 amdgpu_device_gpu_recover(job->adev, job, false); 40 amdgpu_device_gpu_recover(ring->adev, job, false);
41} 41}
42 42
43int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 43int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
54 if (!*job) 54 if (!*job)
55 return -ENOMEM; 55 return -ENOMEM;
56 56
57 (*job)->adev = adev; 57 /*
58 * Initialize the scheduler to at least some ring so that we always
59 * have a pointer to adev.
60 */
61 (*job)->base.sched = &adev->rings[0]->sched;
58 (*job)->vm = vm; 62 (*job)->vm = vm;
59 (*job)->ibs = (void *)&(*job)[1]; 63 (*job)->ibs = (void *)&(*job)[1];
60 (*job)->num_ibs = num_ibs; 64 (*job)->num_ibs = num_ibs;
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
86 90
87void amdgpu_job_free_resources(struct amdgpu_job *job) 91void amdgpu_job_free_resources(struct amdgpu_job *job)
88{ 92{
93 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
89 struct dma_fence *f; 94 struct dma_fence *f;
90 unsigned i; 95 unsigned i;
91 96
@@ -93,14 +98,15 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
93 f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; 98 f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
94 99
95 for (i = 0; i < job->num_ibs; ++i) 100 for (i = 0; i < job->num_ibs; ++i)
96 amdgpu_ib_free(job->adev, &job->ibs[i], f); 101 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
97} 102}
98 103
99static void amdgpu_job_free_cb(struct drm_sched_job *s_job) 104static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
100{ 105{
101 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 106 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
107 struct amdgpu_job *job = to_amdgpu_job(s_job);
102 108
103 amdgpu_ring_priority_put(job->ring, s_job->s_priority); 109 amdgpu_ring_priority_put(ring, s_job->s_priority);
104 dma_fence_put(job->fence); 110 dma_fence_put(job->fence);
105 amdgpu_sync_free(&job->sync); 111 amdgpu_sync_free(&job->sync);
106 amdgpu_sync_free(&job->sched_sync); 112 amdgpu_sync_free(&job->sched_sync);
@@ -117,50 +123,68 @@ void amdgpu_job_free(struct amdgpu_job *job)
117 kfree(job); 123 kfree(job);
118} 124}
119 125
120int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 126int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
121 struct drm_sched_entity *entity, void *owner, 127 void *owner, struct dma_fence **f)
122 struct dma_fence **f)
123{ 128{
129 enum drm_sched_priority priority;
130 struct amdgpu_ring *ring;
124 int r; 131 int r;
125 job->ring = ring;
126 132
127 if (!f) 133 if (!f)
128 return -EINVAL; 134 return -EINVAL;
129 135
130 r = drm_sched_job_init(&job->base, &ring->sched, entity, owner); 136 r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
131 if (r) 137 if (r)
132 return r; 138 return r;
133 139
134 job->owner = owner; 140 job->owner = owner;
135 job->fence_ctx = entity->fence_context;
136 *f = dma_fence_get(&job->base.s_fence->finished); 141 *f = dma_fence_get(&job->base.s_fence->finished);
137 amdgpu_job_free_resources(job); 142 amdgpu_job_free_resources(job);
138 amdgpu_ring_priority_get(job->ring, job->base.s_priority); 143 priority = job->base.s_priority;
139 drm_sched_entity_push_job(&job->base, entity); 144 drm_sched_entity_push_job(&job->base, entity);
140 145
146 ring = to_amdgpu_ring(entity->sched);
147 amdgpu_ring_priority_get(ring, priority);
148
149 return 0;
150}
151
152int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
153 struct dma_fence **fence)
154{
155 int r;
156
157 job->base.sched = &ring->sched;
158 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
159 job->fence = dma_fence_get(*fence);
160 if (r)
161 return r;
162
163 amdgpu_job_free(job);
141 return 0; 164 return 0;
142} 165}
143 166
144static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, 167static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
145 struct drm_sched_entity *s_entity) 168 struct drm_sched_entity *s_entity)
146{ 169{
170 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
147 struct amdgpu_job *job = to_amdgpu_job(sched_job); 171 struct amdgpu_job *job = to_amdgpu_job(sched_job);
148 struct amdgpu_vm *vm = job->vm; 172 struct amdgpu_vm *vm = job->vm;
173 struct dma_fence *fence;
149 bool explicit = false; 174 bool explicit = false;
150 int r; 175 int r;
151 struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
152 176
177 fence = amdgpu_sync_get_fence(&job->sync, &explicit);
153 if (fence && explicit) { 178 if (fence && explicit) {
154 if (drm_sched_dependency_optimized(fence, s_entity)) { 179 if (drm_sched_dependency_optimized(fence, s_entity)) {
155 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false); 180 r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
181 fence, false);
156 if (r) 182 if (r)
157 DRM_ERROR("Error adding fence to sync (%d)\n", r); 183 DRM_ERROR("Error adding fence (%d)\n", r);
158 } 184 }
159 } 185 }
160 186
161 while (fence == NULL && vm && !job->vmid) { 187 while (fence == NULL && vm && !job->vmid) {
162 struct amdgpu_ring *ring = job->ring;
163
164 r = amdgpu_vmid_grab(vm, ring, &job->sync, 188 r = amdgpu_vmid_grab(vm, ring, &job->sync,
165 &job->base.s_fence->finished, 189 &job->base.s_fence->finished,
166 job); 190 job);
@@ -175,30 +199,25 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
175 199
176static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) 200static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
177{ 201{
202 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
178 struct dma_fence *fence = NULL, *finished; 203 struct dma_fence *fence = NULL, *finished;
179 struct amdgpu_device *adev;
180 struct amdgpu_job *job; 204 struct amdgpu_job *job;
181 int r; 205 int r;
182 206
183 if (!sched_job) {
184 DRM_ERROR("job is null\n");
185 return NULL;
186 }
187 job = to_amdgpu_job(sched_job); 207 job = to_amdgpu_job(sched_job);
188 finished = &job->base.s_fence->finished; 208 finished = &job->base.s_fence->finished;
189 adev = job->adev;
190 209
191 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); 210 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
192 211
193 trace_amdgpu_sched_run_job(job); 212 trace_amdgpu_sched_run_job(job);
194 213
195 if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) 214 if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
196 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */ 215 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
197 216
198 if (finished->error < 0) { 217 if (finished->error < 0) {
199 DRM_INFO("Skip scheduling IBs!\n"); 218 DRM_INFO("Skip scheduling IBs!\n");
200 } else { 219 } else {
201 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, 220 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
202 &fence); 221 &fence);
203 if (r) 222 if (r)
204 DRM_ERROR("Error scheduling IBs (%d)\n", r); 223 DRM_ERROR("Error scheduling IBs (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
new file mode 100644
index 000000000000..57cfe78a262b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __AMDGPU_JOB_H__
24#define __AMDGPU_JOB_H__
25
26/* bit set means command submit involves a preamble IB */
27#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
28/* bit set means preamble IB is first presented in belonging context */
29#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
30/* bit set means context switch occured */
31#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
32
33#define to_amdgpu_job(sched_job) \
34 container_of((sched_job), struct amdgpu_job, base)
35
36struct amdgpu_fence;
37
38struct amdgpu_job {
39 struct drm_sched_job base;
40 struct amdgpu_vm *vm;
41 struct amdgpu_sync sync;
42 struct amdgpu_sync sched_sync;
43 struct amdgpu_ib *ibs;
44 struct dma_fence *fence; /* the hw fence */
45 uint32_t preamble_status;
46 uint32_t num_ibs;
47 void *owner;
48 bool vm_needs_flush;
49 uint64_t vm_pd_addr;
50 unsigned vmid;
51 unsigned pasid;
52 uint32_t gds_base, gds_size;
53 uint32_t gws_base, gws_size;
54 uint32_t oa_base, oa_size;
55 uint32_t vram_lost_counter;
56
57 /* user fence handling */
58 uint64_t uf_addr;
59 uint64_t uf_sequence;
60
61};
62
63int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
64 struct amdgpu_job **job, struct amdgpu_vm *vm);
65int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
66 struct amdgpu_job **job);
67
68void amdgpu_job_free_resources(struct amdgpu_job *job);
69void amdgpu_job_free(struct amdgpu_job *job);
70int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
71 void *owner, struct dma_fence **f);
72int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
73 struct dma_fence **fence);
74#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 2060f208e60b..207f238649b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -501,13 +501,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
501 case AMDGPU_INFO_VRAM_GTT: { 501 case AMDGPU_INFO_VRAM_GTT: {
502 struct drm_amdgpu_info_vram_gtt vram_gtt; 502 struct drm_amdgpu_info_vram_gtt vram_gtt;
503 503
504 vram_gtt.vram_size = adev->gmc.real_vram_size; 504 vram_gtt.vram_size = adev->gmc.real_vram_size -
505 vram_gtt.vram_size -= adev->vram_pin_size; 505 atomic64_read(&adev->vram_pin_size);
506 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size; 506 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
507 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); 507 atomic64_read(&adev->visible_pin_size);
508 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; 508 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
509 vram_gtt.gtt_size *= PAGE_SIZE; 509 vram_gtt.gtt_size *= PAGE_SIZE;
510 vram_gtt.gtt_size -= adev->gart_pin_size; 510 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
511 return copy_to_user(out, &vram_gtt, 511 return copy_to_user(out, &vram_gtt,
512 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 512 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
513 } 513 }
@@ -516,17 +516,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
516 516
517 memset(&mem, 0, sizeof(mem)); 517 memset(&mem, 0, sizeof(mem));
518 mem.vram.total_heap_size = adev->gmc.real_vram_size; 518 mem.vram.total_heap_size = adev->gmc.real_vram_size;
519 mem.vram.usable_heap_size = 519 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
520 adev->gmc.real_vram_size - adev->vram_pin_size; 520 atomic64_read(&adev->vram_pin_size);
521 mem.vram.heap_usage = 521 mem.vram.heap_usage =
522 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 522 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
523 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 523 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
524 524
525 mem.cpu_accessible_vram.total_heap_size = 525 mem.cpu_accessible_vram.total_heap_size =
526 adev->gmc.visible_vram_size; 526 adev->gmc.visible_vram_size;
527 mem.cpu_accessible_vram.usable_heap_size = 527 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
528 adev->gmc.visible_vram_size - 528 atomic64_read(&adev->visible_pin_size);
529 (adev->vram_pin_size - adev->invisible_pin_size);
530 mem.cpu_accessible_vram.heap_usage = 529 mem.cpu_accessible_vram.heap_usage =
531 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 530 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
532 mem.cpu_accessible_vram.max_allocation = 531 mem.cpu_accessible_vram.max_allocation =
@@ -534,8 +533,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
534 533
535 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; 534 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
536 mem.gtt.total_heap_size *= PAGE_SIZE; 535 mem.gtt.total_heap_size *= PAGE_SIZE;
537 mem.gtt.usable_heap_size = mem.gtt.total_heap_size 536 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
538 - adev->gart_pin_size; 537 atomic64_read(&adev->gart_pin_size);
539 mem.gtt.heap_usage = 538 mem.gtt.heap_usage =
540 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 539 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
541 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 540 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 512f59836436..b12526ce1a9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -63,11 +63,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
63 return true; 63 return true;
64} 64}
65 65
66/**
67 * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
68 *
69 * @bo: &amdgpu_bo buffer object
70 *
71 * This function is called when a BO stops being pinned, and updates the
72 * &amdgpu_device pin_size values accordingly.
73 */
74static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
75{
76 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
77
78 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
79 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
80 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
81 &adev->visible_pin_size);
82 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
83 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
84 }
85}
86
66static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 87static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
67{ 88{
68 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 89 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
69 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 90 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
70 91
92 if (WARN_ON_ONCE(bo->pin_count > 0))
93 amdgpu_bo_subtract_pin_size(bo);
94
71 if (bo->kfd_bo) 95 if (bo->kfd_bo)
72 amdgpu_amdkfd_unreserve_system_memory_limit(bo); 96 amdgpu_amdkfd_unreserve_system_memory_limit(bo);
73 97
@@ -252,22 +276,33 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
252 goto error_free; 276 goto error_free;
253 } 277 }
254 278
255 r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); 279 r = amdgpu_bo_pin(*bo_ptr, domain);
256 if (r) { 280 if (r) {
257 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); 281 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
258 goto error_unreserve; 282 goto error_unreserve;
259 } 283 }
260 284
285 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
286 if (r) {
287 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
288 goto error_unpin;
289 }
290
291 if (gpu_addr)
292 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
293
261 if (cpu_addr) { 294 if (cpu_addr) {
262 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); 295 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
263 if (r) { 296 if (r) {
264 dev_err(adev->dev, "(%d) kernel bo map failed\n", r); 297 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
265 goto error_unreserve; 298 goto error_unpin;
266 } 299 }
267 } 300 }
268 301
269 return 0; 302 return 0;
270 303
304error_unpin:
305 amdgpu_bo_unpin(*bo_ptr);
271error_unreserve: 306error_unreserve:
272 amdgpu_bo_unreserve(*bo_ptr); 307 amdgpu_bo_unreserve(*bo_ptr);
273 308
@@ -817,7 +852,6 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
817 * @domain: domain to be pinned to 852 * @domain: domain to be pinned to
818 * @min_offset: the start of requested address range 853 * @min_offset: the start of requested address range
819 * @max_offset: the end of requested address range 854 * @max_offset: the end of requested address range
820 * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
821 * 855 *
822 * Pins the buffer object according to requested domain and address range. If 856 * Pins the buffer object according to requested domain and address range. If
823 * the memory is unbound gart memory, binds the pages into gart table. Adjusts 857 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
@@ -835,8 +869,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
835 * 0 for success or a negative error code on failure. 869 * 0 for success or a negative error code on failure.
836 */ 870 */
837int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 871int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
838 u64 min_offset, u64 max_offset, 872 u64 min_offset, u64 max_offset)
839 u64 *gpu_addr)
840{ 873{
841 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 874 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
842 struct ttm_operation_ctx ctx = { false, false }; 875 struct ttm_operation_ctx ctx = { false, false };
@@ -868,8 +901,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
868 return -EINVAL; 901 return -EINVAL;
869 902
870 bo->pin_count++; 903 bo->pin_count++;
871 if (gpu_addr)
872 *gpu_addr = amdgpu_bo_gpu_offset(bo);
873 904
874 if (max_offset != 0) { 905 if (max_offset != 0) {
875 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; 906 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
@@ -905,22 +936,15 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
905 goto error; 936 goto error;
906 } 937 }
907 938
908 r = amdgpu_ttm_alloc_gart(&bo->tbo);
909 if (unlikely(r)) {
910 dev_err(adev->dev, "%p bind failed\n", bo);
911 goto error;
912 }
913
914 bo->pin_count = 1; 939 bo->pin_count = 1;
915 if (gpu_addr != NULL)
916 *gpu_addr = amdgpu_bo_gpu_offset(bo);
917 940
918 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 941 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
919 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 942 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
920 adev->vram_pin_size += amdgpu_bo_size(bo); 943 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
921 adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); 944 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
945 &adev->visible_pin_size);
922 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 946 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
923 adev->gart_pin_size += amdgpu_bo_size(bo); 947 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
924 } 948 }
925 949
926error: 950error:
@@ -931,7 +955,6 @@ error:
931 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object 955 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
932 * @bo: &amdgpu_bo buffer object to be pinned 956 * @bo: &amdgpu_bo buffer object to be pinned
933 * @domain: domain to be pinned to 957 * @domain: domain to be pinned to
934 * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
935 * 958 *
936 * A simple wrapper to amdgpu_bo_pin_restricted(). 959 * A simple wrapper to amdgpu_bo_pin_restricted().
937 * Provides a simpler API for buffers that do not have any strict restrictions 960 * Provides a simpler API for buffers that do not have any strict restrictions
@@ -940,9 +963,9 @@ error:
940 * Returns: 963 * Returns:
941 * 0 for success or a negative error code on failure. 964 * 0 for success or a negative error code on failure.
942 */ 965 */
943int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) 966int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
944{ 967{
945 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); 968 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
946} 969}
947 970
948/** 971/**
@@ -969,12 +992,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
969 if (bo->pin_count) 992 if (bo->pin_count)
970 return 0; 993 return 0;
971 994
972 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 995 amdgpu_bo_subtract_pin_size(bo);
973 adev->vram_pin_size -= amdgpu_bo_size(bo);
974 adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
975 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
976 adev->gart_pin_size -= amdgpu_bo_size(bo);
977 }
978 996
979 for (i = 0; i < bo->placement.num_placement; i++) { 997 for (i = 0; i < bo->placement.num_placement; i++) {
980 bo->placements[i].lpfn = 0; 998 bo->placements[i].lpfn = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 731748033878..9c3e29a04eb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -252,10 +252,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
252void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 252void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
253struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 253struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
254void amdgpu_bo_unref(struct amdgpu_bo **bo); 254void amdgpu_bo_unref(struct amdgpu_bo **bo);
255int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); 255int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
256int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 256int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
257 u64 min_offset, u64 max_offset, 257 u64 min_offset, u64 max_offset);
258 u64 *gpu_addr);
259int amdgpu_bo_unpin(struct amdgpu_bo *bo); 258int amdgpu_bo_unpin(struct amdgpu_bo *bo);
260int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 259int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
261int amdgpu_bo_init(struct amdgpu_device *adev); 260int amdgpu_bo_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f1404adc3a90..15a1192c1ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -606,40 +606,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
606 return snprintf(buf, PAGE_SIZE, "\n"); 606 return snprintf(buf, PAGE_SIZE, "\n");
607} 607}
608 608
609static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 609/*
610 struct device_attribute *attr, 610 * Worst case: 32 bits individually specified, in octal at 12 characters
611 const char *buf, 611 * per line (+1 for \n).
612 size_t count) 612 */
613#define AMDGPU_MASK_BUF_MAX (32 * 13)
614
615static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
613{ 616{
614 struct drm_device *ddev = dev_get_drvdata(dev);
615 struct amdgpu_device *adev = ddev->dev_private;
616 int ret; 617 int ret;
617 long level; 618 long level;
618 uint32_t mask = 0;
619 char *sub_str = NULL; 619 char *sub_str = NULL;
620 char *tmp; 620 char *tmp;
621 char buf_cpy[count]; 621 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
622 const char delimiter[3] = {' ', '\n', '\0'}; 622 const char delimiter[3] = {' ', '\n', '\0'};
623 size_t bytes;
623 624
624 memcpy(buf_cpy, buf, count+1); 625 *mask = 0;
626
627 bytes = min(count, sizeof(buf_cpy) - 1);
628 memcpy(buf_cpy, buf, bytes);
629 buf_cpy[bytes] = '\0';
625 tmp = buf_cpy; 630 tmp = buf_cpy;
626 while (tmp[0]) { 631 while (tmp[0]) {
627 sub_str = strsep(&tmp, delimiter); 632 sub_str = strsep(&tmp, delimiter);
628 if (strlen(sub_str)) { 633 if (strlen(sub_str)) {
629 ret = kstrtol(sub_str, 0, &level); 634 ret = kstrtol(sub_str, 0, &level);
630 635 if (ret)
631 if (ret) { 636 return -EINVAL;
632 count = -EINVAL; 637 *mask |= 1 << level;
633 goto fail;
634 }
635 mask |= 1 << level;
636 } else 638 } else
637 break; 639 break;
638 } 640 }
641
642 return 0;
643}
644
645static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
646 struct device_attribute *attr,
647 const char *buf,
648 size_t count)
649{
650 struct drm_device *ddev = dev_get_drvdata(dev);
651 struct amdgpu_device *adev = ddev->dev_private;
652 int ret;
653 uint32_t mask = 0;
654
655 ret = amdgpu_read_mask(buf, count, &mask);
656 if (ret)
657 return ret;
658
639 if (adev->powerplay.pp_funcs->force_clock_level) 659 if (adev->powerplay.pp_funcs->force_clock_level)
640 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 660 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
641 661
642fail:
643 return count; 662 return count;
644} 663}
645 664
@@ -664,32 +683,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
664 struct drm_device *ddev = dev_get_drvdata(dev); 683 struct drm_device *ddev = dev_get_drvdata(dev);
665 struct amdgpu_device *adev = ddev->dev_private; 684 struct amdgpu_device *adev = ddev->dev_private;
666 int ret; 685 int ret;
667 long level;
668 uint32_t mask = 0; 686 uint32_t mask = 0;
669 char *sub_str = NULL;
670 char *tmp;
671 char buf_cpy[count];
672 const char delimiter[3] = {' ', '\n', '\0'};
673 687
674 memcpy(buf_cpy, buf, count+1); 688 ret = amdgpu_read_mask(buf, count, &mask);
675 tmp = buf_cpy; 689 if (ret)
676 while (tmp[0]) { 690 return ret;
677 sub_str = strsep(&tmp, delimiter);
678 if (strlen(sub_str)) {
679 ret = kstrtol(sub_str, 0, &level);
680 691
681 if (ret) {
682 count = -EINVAL;
683 goto fail;
684 }
685 mask |= 1 << level;
686 } else
687 break;
688 }
689 if (adev->powerplay.pp_funcs->force_clock_level) 692 if (adev->powerplay.pp_funcs->force_clock_level)
690 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 693 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
691 694
692fail:
693 return count; 695 return count;
694} 696}
695 697
@@ -714,33 +716,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
714 struct drm_device *ddev = dev_get_drvdata(dev); 716 struct drm_device *ddev = dev_get_drvdata(dev);
715 struct amdgpu_device *adev = ddev->dev_private; 717 struct amdgpu_device *adev = ddev->dev_private;
716 int ret; 718 int ret;
717 long level;
718 uint32_t mask = 0; 719 uint32_t mask = 0;
719 char *sub_str = NULL;
720 char *tmp;
721 char buf_cpy[count];
722 const char delimiter[3] = {' ', '\n', '\0'};
723
724 memcpy(buf_cpy, buf, count+1);
725 tmp = buf_cpy;
726 720
727 while (tmp[0]) { 721 ret = amdgpu_read_mask(buf, count, &mask);
728 sub_str = strsep(&tmp, delimiter); 722 if (ret)
729 if (strlen(sub_str)) { 723 return ret;
730 ret = kstrtol(sub_str, 0, &level);
731 724
732 if (ret) {
733 count = -EINVAL;
734 goto fail;
735 }
736 mask |= 1 << level;
737 } else
738 break;
739 }
740 if (adev->powerplay.pp_funcs->force_clock_level) 725 if (adev->powerplay.pp_funcs->force_clock_level)
741 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 726 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
742 727
743fail:
744 return count; 728 return count;
745} 729}
746 730
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index df7226ad64b5..3ed02f472003 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -232,7 +232,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
232 } 232 }
233 233
234 /* pin buffer into GTT */ 234 /* pin buffer into GTT */
235 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); 235 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
236 if (r) 236 if (r)
237 goto error_unreserve; 237 goto error_unreserve;
238 238
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 19e45a3953e0..93794a85f83d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -211,7 +211,8 @@ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
211 if (!ring->funcs->set_priority) 211 if (!ring->funcs->set_priority)
212 return; 212 return;
213 213
214 atomic_inc(&ring->num_jobs[priority]); 214 if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
215 return;
215 216
216 mutex_lock(&ring->priority_mutex); 217 mutex_lock(&ring->priority_mutex);
217 if (priority <= ring->priority) 218 if (priority <= ring->priority)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index a293f4e6760d..5018c0b6bf1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -44,6 +44,8 @@
44#define AMDGPU_FENCE_FLAG_INT (1 << 1) 44#define AMDGPU_FENCE_FLAG_INT (1 << 1)
45#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) 45#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
46 46
47#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
48
47enum amdgpu_ring_type { 49enum amdgpu_ring_type {
48 AMDGPU_RING_TYPE_GFX, 50 AMDGPU_RING_TYPE_GFX,
49 AMDGPU_RING_TYPE_COMPUTE, 51 AMDGPU_RING_TYPE_COMPUTE,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 57b14dccd8e0..8904e62dca7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -76,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
76 r = amdgpu_bo_reserve(vram_obj, false); 76 r = amdgpu_bo_reserve(vram_obj, false);
77 if (unlikely(r != 0)) 77 if (unlikely(r != 0))
78 goto out_unref; 78 goto out_unref;
79 r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr); 79 r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
80 if (r) { 80 if (r) {
81 DRM_ERROR("Failed to pin VRAM object\n"); 81 DRM_ERROR("Failed to pin VRAM object\n");
82 goto out_unres; 82 goto out_unres;
83 } 83 }
84 vram_addr = amdgpu_bo_gpu_offset(vram_obj);
84 for (i = 0; i < n; i++) { 85 for (i = 0; i < n; i++) {
85 void *gtt_map, *vram_map; 86 void *gtt_map, *vram_map;
86 void **gart_start, **gart_end; 87 void **gart_start, **gart_end;
@@ -97,11 +98,17 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
97 r = amdgpu_bo_reserve(gtt_obj[i], false); 98 r = amdgpu_bo_reserve(gtt_obj[i], false);
98 if (unlikely(r != 0)) 99 if (unlikely(r != 0))
99 goto out_lclean_unref; 100 goto out_lclean_unref;
100 r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr); 101 r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
101 if (r) { 102 if (r) {
102 DRM_ERROR("Failed to pin GTT object %d\n", i); 103 DRM_ERROR("Failed to pin GTT object %d\n", i);
103 goto out_lclean_unres; 104 goto out_lclean_unres;
104 } 105 }
106 r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
107 if (r) {
108 DRM_ERROR("%p bind failed\n", gtt_obj[i]);
109 goto out_lclean_unpin;
110 }
111 gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
105 112
106 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map); 113 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
107 if (r) { 114 if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e96e26d3f3b0..76920035eb22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
150 150
151 TP_fast_assign( 151 TP_fast_assign(
152 __entry->bo_list = p->bo_list; 152 __entry->bo_list = p->bo_list;
153 __entry->ring = p->job->ring->idx; 153 __entry->ring = p->ring->idx;
154 __entry->dw = p->job->ibs[i].length_dw; 154 __entry->dw = p->job->ibs[i].length_dw;
155 __entry->fences = amdgpu_fence_count_emitted( 155 __entry->fences = amdgpu_fence_count_emitted(
156 p->job->ring); 156 p->ring);
157 ), 157 ),
158 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", 158 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
159 __entry->bo_list, __entry->ring, __entry->dw, 159 __entry->bo_list, __entry->ring, __entry->dw,
@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
178 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) 178 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
179 __entry->context = job->base.s_fence->finished.context; 179 __entry->context = job->base.s_fence->finished.context;
180 __entry->seqno = job->base.s_fence->finished.seqno; 180 __entry->seqno = job->base.s_fence->finished.seqno;
181 __entry->ring_name = job->ring->name; 181 __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
182 __entry->num_ibs = job->num_ibs; 182 __entry->num_ibs = job->num_ibs;
183 ), 183 ),
184 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 184 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
203 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) 203 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
204 __entry->context = job->base.s_fence->finished.context; 204 __entry->context = job->base.s_fence->finished.context;
205 __entry->seqno = job->base.s_fence->finished.seqno; 205 __entry->seqno = job->base.s_fence->finished.seqno;
206 __entry->ring_name = job->ring->name; 206 __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
207 __entry->num_ibs = job->num_ibs; 207 __entry->num_ibs = job->num_ibs;
208 ), 208 ),
209 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 209 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0246cb87d9e4..13977ea6a097 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -104,8 +104,6 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
104static int amdgpu_ttm_global_init(struct amdgpu_device *adev) 104static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
105{ 105{
106 struct drm_global_reference *global_ref; 106 struct drm_global_reference *global_ref;
107 struct amdgpu_ring *ring;
108 struct drm_sched_rq *rq;
109 int r; 107 int r;
110 108
111 /* ensure reference is false in case init fails */ 109 /* ensure reference is false in case init fails */
@@ -138,21 +136,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
138 136
139 mutex_init(&adev->mman.gtt_window_lock); 137 mutex_init(&adev->mman.gtt_window_lock);
140 138
141 ring = adev->mman.buffer_funcs_ring;
142 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
143 r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
144 rq, NULL);
145 if (r) {
146 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
147 goto error_entity;
148 }
149
150 adev->mman.mem_global_referenced = true; 139 adev->mman.mem_global_referenced = true;
151 140
152 return 0; 141 return 0;
153 142
154error_entity:
155 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
156error_bo: 143error_bo:
157 drm_global_item_unref(&adev->mman.mem_global_ref); 144 drm_global_item_unref(&adev->mman.mem_global_ref);
158error_mem: 145error_mem:
@@ -162,8 +149,6 @@ error_mem:
162static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) 149static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
163{ 150{
164 if (adev->mman.mem_global_referenced) { 151 if (adev->mman.mem_global_referenced) {
165 drm_sched_entity_destroy(adev->mman.entity.sched,
166 &adev->mman.entity);
167 mutex_destroy(&adev->mman.gtt_window_lock); 152 mutex_destroy(&adev->mman.gtt_window_lock);
168 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 153 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
169 drm_global_item_unref(&adev->mman.mem_global_ref); 154 drm_global_item_unref(&adev->mman.mem_global_ref);
@@ -1695,7 +1680,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1695 AMDGPU_GEM_DOMAIN_VRAM, 1680 AMDGPU_GEM_DOMAIN_VRAM,
1696 adev->fw_vram_usage.start_offset, 1681 adev->fw_vram_usage.start_offset,
1697 (adev->fw_vram_usage.start_offset + 1682 (adev->fw_vram_usage.start_offset +
1698 adev->fw_vram_usage.size), NULL); 1683 adev->fw_vram_usage.size));
1699 if (r) 1684 if (r)
1700 goto error_pin; 1685 goto error_pin;
1701 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, 1686 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
@@ -1921,10 +1906,29 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1921{ 1906{
1922 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; 1907 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1923 uint64_t size; 1908 uint64_t size;
1909 int r;
1924 1910
1925 if (!adev->mman.initialized || adev->in_gpu_reset) 1911 if (!adev->mman.initialized || adev->in_gpu_reset ||
1912 adev->mman.buffer_funcs_enabled == enable)
1926 return; 1913 return;
1927 1914
1915 if (enable) {
1916 struct amdgpu_ring *ring;
1917 struct drm_sched_rq *rq;
1918
1919 ring = adev->mman.buffer_funcs_ring;
1920 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1921 r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
1922 if (r) {
1923 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1924 r);
1925 return;
1926 }
1927 } else {
1928 drm_sched_entity_destroy(adev->mman.entity.sched,
1929 &adev->mman.entity);
1930 }
1931
1928 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 1932 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1929 if (enable) 1933 if (enable)
1930 size = adev->gmc.real_vram_size; 1934 size = adev->gmc.real_vram_size;
@@ -2002,7 +2006,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
2002 if (r) 2006 if (r)
2003 goto error_free; 2007 goto error_free;
2004 2008
2005 r = amdgpu_job_submit(job, ring, &adev->mman.entity, 2009 r = amdgpu_job_submit(job, &adev->mman.entity,
2006 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 2010 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
2007 if (r) 2011 if (r)
2008 goto error_free; 2012 goto error_free;
@@ -2071,24 +2075,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2071 2075
2072 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2076 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2073 WARN_ON(job->ibs[0].length_dw > num_dw); 2077 WARN_ON(job->ibs[0].length_dw > num_dw);
2074 if (direct_submit) { 2078 if (direct_submit)
2075 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, 2079 r = amdgpu_job_submit_direct(job, ring, fence);
2076 NULL, fence); 2080 else
2077 job->fence = dma_fence_get(*fence); 2081 r = amdgpu_job_submit(job, &adev->mman.entity,
2078 if (r)
2079 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2080 amdgpu_job_free(job);
2081 } else {
2082 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
2083 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2082 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2084 if (r) 2083 if (r)
2085 goto error_free; 2084 goto error_free;
2086 }
2087 2085
2088 return r; 2086 return r;
2089 2087
2090error_free: 2088error_free:
2091 amdgpu_job_free(job); 2089 amdgpu_job_free(job);
2090 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2092 return r; 2091 return r;
2093} 2092}
2094 2093
@@ -2171,7 +2170,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2171 2170
2172 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2171 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2173 WARN_ON(job->ibs[0].length_dw > num_dw); 2172 WARN_ON(job->ibs[0].length_dw > num_dw);
2174 r = amdgpu_job_submit(job, ring, &adev->mman.entity, 2173 r = amdgpu_job_submit(job, &adev->mman.entity,
2175 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2174 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2176 if (r) 2175 if (r)
2177 goto error_free; 2176 goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e5da4654b630..8b3cc6687769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
73uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 73uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
74int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 74int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
75 75
76u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); 76u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
77uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 77uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
78uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 78uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
79 79
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 3e70eb61a960..80b5c453f8c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -263,21 +263,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
263 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 263 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
264 return r; 264 return r;
265 } 265 }
266 }
266 267
267 ring = &adev->uvd.inst[j].ring; 268 ring = &adev->uvd.inst[0].ring;
268 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 269 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
269 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, 270 r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
270 rq, NULL); 271 if (r) {
271 if (r != 0) { 272 DRM_ERROR("Failed setting up UVD kernel entity.\n");
272 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); 273 return r;
273 return r;
274 }
275
276 for (i = 0; i < adev->uvd.max_handles; ++i) {
277 atomic_set(&adev->uvd.inst[j].handles[i], 0);
278 adev->uvd.inst[j].filp[i] = NULL;
279 }
280 } 274 }
275 for (i = 0; i < adev->uvd.max_handles; ++i) {
276 atomic_set(&adev->uvd.handles[i], 0);
277 adev->uvd.filp[i] = NULL;
278 }
279
281 /* from uvd v5.0 HW addressing capacity increased to 64 bits */ 280 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
282 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) 281 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
283 adev->uvd.address_64_bit = true; 282 adev->uvd.address_64_bit = true;
@@ -306,11 +305,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
306{ 305{
307 int i, j; 306 int i, j;
308 307
308 drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
309 &adev->uvd.entity);
310
309 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 311 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
310 kfree(adev->uvd.inst[j].saved_bo); 312 kfree(adev->uvd.inst[j].saved_bo);
311 313
312 drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
313
314 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, 314 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
315 &adev->uvd.inst[j].gpu_addr, 315 &adev->uvd.inst[j].gpu_addr,
316 (void **)&adev->uvd.inst[j].cpu_addr); 316 (void **)&adev->uvd.inst[j].cpu_addr);
@@ -333,20 +333,20 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
333 333
334 cancel_delayed_work_sync(&adev->uvd.idle_work); 334 cancel_delayed_work_sync(&adev->uvd.idle_work);
335 335
336 /* only valid for physical mode */
337 if (adev->asic_type < CHIP_POLARIS10) {
338 for (i = 0; i < adev->uvd.max_handles; ++i)
339 if (atomic_read(&adev->uvd.handles[i]))
340 break;
341
342 if (i == adev->uvd.max_handles)
343 return 0;
344 }
345
336 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 346 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
337 if (adev->uvd.inst[j].vcpu_bo == NULL) 347 if (adev->uvd.inst[j].vcpu_bo == NULL)
338 continue; 348 continue;
339 349
340 /* only valid for physical mode */
341 if (adev->asic_type < CHIP_POLARIS10) {
342 for (i = 0; i < adev->uvd.max_handles; ++i)
343 if (atomic_read(&adev->uvd.inst[j].handles[i]))
344 break;
345
346 if (i == adev->uvd.max_handles)
347 continue;
348 }
349
350 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); 350 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
351 ptr = adev->uvd.inst[j].cpu_addr; 351 ptr = adev->uvd.inst[j].cpu_addr;
352 352
@@ -398,30 +398,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
398 398
399void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 399void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
400{ 400{
401 struct amdgpu_ring *ring; 401 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
402 int i, j, r; 402 int i, r;
403
404 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
405 ring = &adev->uvd.inst[j].ring;
406 403
407 for (i = 0; i < adev->uvd.max_handles; ++i) { 404 for (i = 0; i < adev->uvd.max_handles; ++i) {
408 uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); 405 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
409 if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
410 struct dma_fence *fence;
411
412 r = amdgpu_uvd_get_destroy_msg(ring, handle,
413 false, &fence);
414 if (r) {
415 DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
416 continue;
417 }
418 406
419 dma_fence_wait(fence, false); 407 if (handle != 0 && adev->uvd.filp[i] == filp) {
420 dma_fence_put(fence); 408 struct dma_fence *fence;
421 409
422 adev->uvd.inst[j].filp[i] = NULL; 410 r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
423 atomic_set(&adev->uvd.inst[j].handles[i], 0); 411 &fence);
412 if (r) {
413 DRM_ERROR("Error destroying UVD %d!\n", r);
414 continue;
424 } 415 }
416
417 dma_fence_wait(fence, false);
418 dma_fence_put(fence);
419
420 adev->uvd.filp[i] = NULL;
421 atomic_set(&adev->uvd.handles[i], 0);
425 } 422 }
426 } 423 }
427} 424}
@@ -696,16 +693,15 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
696 void *ptr; 693 void *ptr;
697 long r; 694 long r;
698 int i; 695 int i;
699 uint32_t ip_instance = ctx->parser->job->ring->me;
700 696
701 if (offset & 0x3F) { 697 if (offset & 0x3F) {
702 DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); 698 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
703 return -EINVAL; 699 return -EINVAL;
704 } 700 }
705 701
706 r = amdgpu_bo_kmap(bo, &ptr); 702 r = amdgpu_bo_kmap(bo, &ptr);
707 if (r) { 703 if (r) {
708 DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); 704 DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
709 return r; 705 return r;
710 } 706 }
711 707
@@ -715,7 +711,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
715 handle = msg[2]; 711 handle = msg[2];
716 712
717 if (handle == 0) { 713 if (handle == 0) {
718 DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); 714 DRM_ERROR("Invalid UVD handle!\n");
719 return -EINVAL; 715 return -EINVAL;
720 } 716 }
721 717
@@ -726,18 +722,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
726 722
727 /* try to alloc a new handle */ 723 /* try to alloc a new handle */
728 for (i = 0; i < adev->uvd.max_handles; ++i) { 724 for (i = 0; i < adev->uvd.max_handles; ++i) {
729 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { 725 if (atomic_read(&adev->uvd.handles[i]) == handle) {
730 DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); 726 DRM_ERROR(")Handle 0x%x already in use!\n",
727 handle);
731 return -EINVAL; 728 return -EINVAL;
732 } 729 }
733 730
734 if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { 731 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
735 adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; 732 adev->uvd.filp[i] = ctx->parser->filp;
736 return 0; 733 return 0;
737 } 734 }
738 } 735 }
739 736
740 DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); 737 DRM_ERROR("No more free UVD handles!\n");
741 return -ENOSPC; 738 return -ENOSPC;
742 739
743 case 1: 740 case 1:
@@ -749,27 +746,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
749 746
750 /* validate the handle */ 747 /* validate the handle */
751 for (i = 0; i < adev->uvd.max_handles; ++i) { 748 for (i = 0; i < adev->uvd.max_handles; ++i) {
752 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { 749 if (atomic_read(&adev->uvd.handles[i]) == handle) {
753 if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { 750 if (adev->uvd.filp[i] != ctx->parser->filp) {
754 DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); 751 DRM_ERROR("UVD handle collision detected!\n");
755 return -EINVAL; 752 return -EINVAL;
756 } 753 }
757 return 0; 754 return 0;
758 } 755 }
759 } 756 }
760 757
761 DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); 758 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
762 return -ENOENT; 759 return -ENOENT;
763 760
764 case 2: 761 case 2:
765 /* it's a destroy msg, free the handle */ 762 /* it's a destroy msg, free the handle */
766 for (i = 0; i < adev->uvd.max_handles; ++i) 763 for (i = 0; i < adev->uvd.max_handles; ++i)
767 atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); 764 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
768 amdgpu_bo_kunmap(bo); 765 amdgpu_bo_kunmap(bo);
769 return 0; 766 return 0;
770 767
771 default: 768 default:
772 DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); 769 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
773 return -EINVAL; 770 return -EINVAL;
774 } 771 }
775 BUG(); 772 BUG();
@@ -1062,19 +1059,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1062 if (r < 0) 1059 if (r < 0)
1063 goto err_free; 1060 goto err_free;
1064 1061
1065 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 1062 r = amdgpu_job_submit_direct(job, ring, &f);
1066 job->fence = dma_fence_get(f);
1067 if (r) 1063 if (r)
1068 goto err_free; 1064 goto err_free;
1069
1070 amdgpu_job_free(job);
1071 } else { 1065 } else {
1072 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, 1066 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
1073 AMDGPU_FENCE_OWNER_UNDEFINED, false); 1067 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1074 if (r) 1068 if (r)
1075 goto err_free; 1069 goto err_free;
1076 1070
1077 r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, 1071 r = amdgpu_job_submit(job, &adev->uvd.entity,
1078 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 1072 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1079 if (r) 1073 if (r)
1080 goto err_free; 1074 goto err_free;
@@ -1276,7 +1270,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1276 * necessarily linear. So we need to count 1270 * necessarily linear. So we need to count
1277 * all non-zero handles. 1271 * all non-zero handles.
1278 */ 1272 */
1279 if (atomic_read(&adev->uvd.inst->handles[i])) 1273 if (atomic_read(&adev->uvd.handles[i]))
1280 used_handles++; 1274 used_handles++;
1281 } 1275 }
1282 1276
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 8b23a1b00c76..66872286ab12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -42,13 +42,9 @@ struct amdgpu_uvd_inst {
42 void *cpu_addr; 42 void *cpu_addr;
43 uint64_t gpu_addr; 43 uint64_t gpu_addr;
44 void *saved_bo; 44 void *saved_bo;
45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
47 struct amdgpu_ring ring; 45 struct amdgpu_ring ring;
48 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; 46 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
49 struct amdgpu_irq_src irq; 47 struct amdgpu_irq_src irq;
50 struct drm_sched_entity entity;
51 struct drm_sched_entity entity_enc;
52 uint32_t srbm_soft_reset; 48 uint32_t srbm_soft_reset;
53}; 49};
54 50
@@ -57,10 +53,13 @@ struct amdgpu_uvd {
57 unsigned fw_version; 53 unsigned fw_version;
58 unsigned max_handles; 54 unsigned max_handles;
59 unsigned num_enc_rings; 55 unsigned num_enc_rings;
60 uint8_t num_uvd_inst; 56 uint8_t num_uvd_inst;
61 bool address_64_bit; 57 bool address_64_bit;
62 bool use_ctx_buf; 58 bool use_ctx_buf;
63 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; 59 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
60 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
61 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
62 struct drm_sched_entity entity;
64 struct delayed_work idle_work; 63 struct delayed_work idle_work;
65}; 64};
66 65
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 6ae1ad7e83b3..86182c966ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
190 190
191 ring = &adev->vce.ring[0]; 191 ring = &adev->vce.ring[0];
192 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 192 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
193 r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, 193 r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
194 rq, NULL);
195 if (r != 0) { 194 if (r != 0) {
196 DRM_ERROR("Failed setting up VCE run queue.\n"); 195 DRM_ERROR("Failed setting up VCE run queue.\n");
197 return r; 196 return r;
@@ -470,12 +469,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
470 for (i = ib->length_dw; i < ib_size_dw; ++i) 469 for (i = ib->length_dw; i < ib_size_dw; ++i)
471 ib->ptr[i] = 0x0; 470 ib->ptr[i] = 0x0;
472 471
473 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 472 r = amdgpu_job_submit_direct(job, ring, &f);
474 job->fence = dma_fence_get(f);
475 if (r) 473 if (r)
476 goto err; 474 goto err;
477 475
478 amdgpu_job_free(job);
479 if (fence) 476 if (fence)
480 *fence = dma_fence_get(f); 477 *fence = dma_fence_get(f);
481 dma_fence_put(f); 478 dma_fence_put(f);
@@ -532,19 +529,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
532 for (i = ib->length_dw; i < ib_size_dw; ++i) 529 for (i = ib->length_dw; i < ib_size_dw; ++i)
533 ib->ptr[i] = 0x0; 530 ib->ptr[i] = 0x0;
534 531
535 if (direct) { 532 if (direct)
536 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 533 r = amdgpu_job_submit_direct(job, ring, &f);
537 job->fence = dma_fence_get(f); 534 else
538 if (r) 535 r = amdgpu_job_submit(job, &ring->adev->vce.entity,
539 goto err;
540
541 amdgpu_job_free(job);
542 } else {
543 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
544 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 536 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
545 if (r) 537 if (r)
546 goto err; 538 goto err;
547 }
548 539
549 if (fence) 540 if (fence)
550 *fence = dma_fence_get(f); 541 *fence = dma_fence_get(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a66cd521a875..798648a19710 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -211,6 +211,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
211 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); 211 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
212 } 212 }
213 213
214 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
215
214 if (fences == 0) { 216 if (fences == 0) {
215 if (adev->pm.dpm_enabled) 217 if (adev->pm.dpm_enabled)
216 amdgpu_dpm_enable_uvd(adev, false); 218 amdgpu_dpm_enable_uvd(adev, false);
@@ -227,7 +229,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
227 struct amdgpu_device *adev = ring->adev; 229 struct amdgpu_device *adev = ring->adev;
228 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); 230 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
229 231
230 if (set_clocks && adev->pm.dpm_enabled) { 232 if (set_clocks) {
231 if (adev->pm.dpm_enabled) 233 if (adev->pm.dpm_enabled)
232 amdgpu_dpm_enable_uvd(adev, true); 234 amdgpu_dpm_enable_uvd(adev, true);
233 else 235 else
@@ -306,13 +308,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
306 } 308 }
307 ib->length_dw = 16; 309 ib->length_dw = 16;
308 310
309 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 311 r = amdgpu_job_submit_direct(job, ring, &f);
310 job->fence = dma_fence_get(f);
311 if (r) 312 if (r)
312 goto err_free; 313 goto err_free;
313 314
314 amdgpu_job_free(job);
315
316 amdgpu_bo_fence(bo, f, false); 315 amdgpu_bo_fence(bo, f, false);
317 amdgpu_bo_unreserve(bo); 316 amdgpu_bo_unreserve(bo);
318 amdgpu_bo_unref(&bo); 317 amdgpu_bo_unref(&bo);
@@ -497,12 +496,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
497 for (i = ib->length_dw; i < ib_size_dw; ++i) 496 for (i = ib->length_dw; i < ib_size_dw; ++i)
498 ib->ptr[i] = 0x0; 497 ib->ptr[i] = 0x0;
499 498
500 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 499 r = amdgpu_job_submit_direct(job, ring, &f);
501 job->fence = dma_fence_get(f);
502 if (r) 500 if (r)
503 goto err; 501 goto err;
504 502
505 amdgpu_job_free(job);
506 if (fence) 503 if (fence)
507 *fence = dma_fence_get(f); 504 *fence = dma_fence_get(f);
508 dma_fence_put(f); 505 dma_fence_put(f);
@@ -551,12 +548,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
551 for (i = ib->length_dw; i < ib_size_dw; ++i) 548 for (i = ib->length_dw; i < ib_size_dw; ++i)
552 ib->ptr[i] = 0x0; 549 ib->ptr[i] = 0x0;
553 550
554 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 551 r = amdgpu_job_submit_direct(job, ring, &f);
555 job->fence = dma_fence_get(f);
556 if (r) 552 if (r)
557 goto err; 553 goto err;
558 554
559 amdgpu_job_free(job);
560 if (fence) 555 if (fence)
561 *fence = dma_fence_get(f); 556 *fence = dma_fence_get(f);
562 dma_fence_put(f); 557 dma_fence_put(f);
@@ -664,12 +659,10 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
664 } 659 }
665 ib->length_dw = 16; 660 ib->length_dw = 16;
666 661
667 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 662 r = amdgpu_job_submit_direct(job, ring, &f);
668 job->fence = dma_fence_get(f);
669 if (r) 663 if (r)
670 goto err; 664 goto err;
671 665
672 amdgpu_job_free(job);
673 if (fence) 666 if (fence)
674 *fence = dma_fence_get(f); 667 *fence = dma_fence_get(f);
675 dma_fence_put(f); 668 dma_fence_put(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 712af5c1a5d6..098dd1ba751a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -156,6 +156,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
156 return; 156 return;
157 list_add_tail(&base->bo_list, &bo->va); 157 list_add_tail(&base->bo_list, &bo->va);
158 158
159 if (bo->tbo.type == ttm_bo_type_kernel)
160 list_move(&base->vm_status, &vm->relocated);
161
159 if (bo->tbo.resv != vm->root.base.bo->tbo.resv) 162 if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
160 return; 163 return;
161 164
@@ -422,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
422 if (r) 425 if (r)
423 goto error_free; 426 goto error_free;
424 427
425 r = amdgpu_job_submit(job, ring, &vm->entity, 428 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
426 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 429 &fence);
427 if (r) 430 if (r)
428 goto error_free; 431 goto error_free;
429 432
@@ -540,7 +543,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
540 pt->parent = amdgpu_bo_ref(parent->base.bo); 543 pt->parent = amdgpu_bo_ref(parent->base.bo);
541 544
542 amdgpu_vm_bo_base_init(&entry->base, vm, pt); 545 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
543 list_move(&entry->base.vm_status, &vm->relocated);
544 } 546 }
545 547
546 if (level < AMDGPU_VM_PTB) { 548 if (level < AMDGPU_VM_PTB) {
@@ -1118,8 +1120,8 @@ restart:
1118 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, 1120 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1119 AMDGPU_FENCE_OWNER_VM, false); 1121 AMDGPU_FENCE_OWNER_VM, false);
1120 WARN_ON(params.ib->length_dw > ndw); 1122 WARN_ON(params.ib->length_dw > ndw);
1121 r = amdgpu_job_submit(job, ring, &vm->entity, 1123 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1122 AMDGPU_FENCE_OWNER_VM, &fence); 1124 &fence);
1123 if (r) 1125 if (r)
1124 goto error; 1126 goto error;
1125 1127
@@ -1483,8 +1485,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1483 1485
1484 amdgpu_ring_pad_ib(ring, params.ib); 1486 amdgpu_ring_pad_ib(ring, params.ib);
1485 WARN_ON(params.ib->length_dw > ndw); 1487 WARN_ON(params.ib->length_dw > ndw);
1486 r = amdgpu_job_submit(job, ring, &vm->entity, 1488 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1487 AMDGPU_FENCE_OWNER_VM, &f);
1488 if (r) 1489 if (r)
1489 goto error_free; 1490 goto error_free;
1490 1491
@@ -1645,18 +1646,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1645 uint64_t flags; 1646 uint64_t flags;
1646 int r; 1647 int r;
1647 1648
1648 if (clear || !bo_va->base.bo) { 1649 if (clear || !bo) {
1649 mem = NULL; 1650 mem = NULL;
1650 nodes = NULL; 1651 nodes = NULL;
1651 exclusive = NULL; 1652 exclusive = NULL;
1652 } else { 1653 } else {
1653 struct ttm_dma_tt *ttm; 1654 struct ttm_dma_tt *ttm;
1654 1655
1655 mem = &bo_va->base.bo->tbo.mem; 1656 mem = &bo->tbo.mem;
1656 nodes = mem->mm_node; 1657 nodes = mem->mm_node;
1657 if (mem->mem_type == TTM_PL_TT) { 1658 if (mem->mem_type == TTM_PL_TT) {
1658 ttm = container_of(bo_va->base.bo->tbo.ttm, 1659 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1659 struct ttm_dma_tt, ttm);
1660 pages_addr = ttm->dma_address; 1660 pages_addr = ttm->dma_address;
1661 } 1661 }
1662 exclusive = reservation_object_get_excl(bo->tbo.resv); 1662 exclusive = reservation_object_get_excl(bo->tbo.resv);
@@ -2562,8 +2562,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2562 ring_instance %= adev->vm_manager.vm_pte_num_rings; 2562 ring_instance %= adev->vm_manager.vm_pte_num_rings;
2563 ring = adev->vm_manager.vm_pte_rings[ring_instance]; 2563 ring = adev->vm_manager.vm_pte_rings[ring_instance];
2564 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 2564 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2565 r = drm_sched_entity_init(&ring->sched, &vm->entity, 2565 r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
2566 rq, NULL);
2567 if (r) 2566 if (r)
2568 return r; 2567 return r;
2569 2568
@@ -2942,3 +2941,42 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2942 2941
2943 return 0; 2942 return 0;
2944} 2943}
2944
2945/**
2946 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2947 *
2948 * @dev: drm device pointer
2949 * @pasid: PASID identifier for VM
2950 * @task_info: task_info to fill.
2951 */
2952void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
2953 struct amdgpu_task_info *task_info)
2954{
2955 struct amdgpu_vm *vm;
2956
2957 spin_lock(&adev->vm_manager.pasid_lock);
2958
2959 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2960 if (vm)
2961 *task_info = vm->task_info;
2962
2963 spin_unlock(&adev->vm_manager.pasid_lock);
2964}
2965
2966/**
2967 * amdgpu_vm_set_task_info - Sets VMs task info.
2968 *
2969 * @vm: vm for which to set the info
2970 */
2971void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2972{
2973 if (!vm->task_info.pid) {
2974 vm->task_info.pid = current->pid;
2975 get_task_comm(vm->task_info.task_name, current);
2976
2977 if (current->group_leader->mm == current->mm) {
2978 vm->task_info.tgid = current->group_leader->pid;
2979 get_task_comm(vm->task_info.process_name, current->group_leader);
2980 }
2981 }
2982}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 061b99a18cb8..d416f895233d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -164,6 +164,14 @@ struct amdgpu_vm_pt {
164#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48) 164#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
165#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL) 165#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
166 166
167
168struct amdgpu_task_info {
169 char process_name[TASK_COMM_LEN];
170 char task_name[TASK_COMM_LEN];
171 pid_t pid;
172 pid_t tgid;
173};
174
167struct amdgpu_vm { 175struct amdgpu_vm {
168 /* tree of virtual addresses mapped */ 176 /* tree of virtual addresses mapped */
169 struct rb_root_cached va; 177 struct rb_root_cached va;
@@ -215,6 +223,9 @@ struct amdgpu_vm {
215 223
216 /* Valid while the PD is reserved or fenced */ 224 /* Valid while the PD is reserved or fenced */
217 uint64_t pd_phys_addr; 225 uint64_t pd_phys_addr;
226
227 /* Some basic info about the task */
228 struct amdgpu_task_info task_info;
218}; 229};
219 230
220struct amdgpu_vm_manager { 231struct amdgpu_vm_manager {
@@ -317,4 +328,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
317 struct amdgpu_job *job); 328 struct amdgpu_job *job);
318void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); 329void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
319 330
331void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
332 struct amdgpu_task_info *task_info);
333
334void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
335
320#endif 336#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index f7a4bd5885a3..9cfa8a9ada92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
97} 97}
98 98
99/** 99/**
100 * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size 100 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
101 * 101 *
102 * @bo: &amdgpu_bo buffer object (must be in VRAM) 102 * @bo: &amdgpu_bo buffer object (must be in VRAM)
103 * 103 *
104 * Returns: 104 * Returns:
105 * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. 105 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
106 */ 106 */
107u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) 107u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
108{ 108{
109 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 109 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
110 struct ttm_mem_reg *mem = &bo->tbo.mem; 110 struct ttm_mem_reg *mem = &bo->tbo.mem;
111 struct drm_mm_node *nodes = mem->mm_node; 111 struct drm_mm_node *nodes = mem->mm_node;
112 unsigned pages = mem->num_pages; 112 unsigned pages = mem->num_pages;
113 u64 usage = 0; 113 u64 usage;
114 114
115 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 115 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
116 return 0; 116 return amdgpu_bo_size(bo);
117 117
118 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 118 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
119 return amdgpu_bo_size(bo); 119 return 0;
120 120
121 while (nodes && pages) { 121 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
122 usage += nodes->size << PAGE_SHIFT; 122 usage += amdgpu_vram_mgr_vis_size(adev, nodes);
123 usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
124 pages -= nodes->size;
125 ++nodes;
126 }
127 123
128 return usage; 124 return usage;
129} 125}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ada241bfeee9..308f9f238bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -41,6 +41,8 @@
41#include "gmc/gmc_8_1_d.h" 41#include "gmc/gmc_8_1_d.h"
42#include "gmc/gmc_8_1_sh_mask.h" 42#include "gmc/gmc_8_1_sh_mask.h"
43 43
44#include "ivsrcid/ivsrcid_vislands30.h"
45
44static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); 46static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
45static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); 47static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
46 48
@@ -1855,15 +1857,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1855 if (unlikely(r != 0)) 1857 if (unlikely(r != 0))
1856 return r; 1858 return r;
1857 1859
1858 if (atomic) { 1860 if (!atomic) {
1859 fb_location = amdgpu_bo_gpu_offset(abo); 1861 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1860 } else {
1861 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1862 if (unlikely(r != 0)) { 1862 if (unlikely(r != 0)) {
1863 amdgpu_bo_unreserve(abo); 1863 amdgpu_bo_unreserve(abo);
1864 return -EINVAL; 1864 return -EINVAL;
1865 } 1865 }
1866 } 1866 }
1867 fb_location = amdgpu_bo_gpu_offset(abo);
1867 1868
1868 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1869 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1869 amdgpu_bo_unreserve(abo); 1870 amdgpu_bo_unreserve(abo);
@@ -2370,13 +2371,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2370 return ret; 2371 return ret;
2371 } 2372 }
2372 2373
2373 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2374 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2374 amdgpu_bo_unreserve(aobj); 2375 amdgpu_bo_unreserve(aobj);
2375 if (ret) { 2376 if (ret) {
2376 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2377 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2377 drm_gem_object_put_unlocked(obj); 2378 drm_gem_object_put_unlocked(obj);
2378 return ret; 2379 return ret;
2379 } 2380 }
2381 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2380 2382
2381 dce_v10_0_lock_cursor(crtc, true); 2383 dce_v10_0_lock_cursor(crtc, true);
2382 2384
@@ -2737,14 +2739,14 @@ static int dce_v10_0_sw_init(void *handle)
2737 return r; 2739 return r;
2738 } 2740 }
2739 2741
2740 for (i = 8; i < 20; i += 2) { 2742 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2741 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2743 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2742 if (r) 2744 if (r)
2743 return r; 2745 return r;
2744 } 2746 }
2745 2747
2746 /* HPD hotplug */ 2748 /* HPD hotplug */
2747 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2749 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2748 if (r) 2750 if (r)
2749 return r; 2751 return r;
2750 2752
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a5b96eac3033..76dfb76f7900 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -41,6 +41,8 @@
41#include "gmc/gmc_8_1_d.h" 41#include "gmc/gmc_8_1_d.h"
42#include "gmc/gmc_8_1_sh_mask.h" 42#include "gmc/gmc_8_1_sh_mask.h"
43 43
44#include "ivsrcid/ivsrcid_vislands30.h"
45
44static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); 46static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
45static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); 47static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
46 48
@@ -1897,15 +1899,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
1897 if (unlikely(r != 0)) 1899 if (unlikely(r != 0))
1898 return r; 1900 return r;
1899 1901
1900 if (atomic) { 1902 if (!atomic) {
1901 fb_location = amdgpu_bo_gpu_offset(abo); 1903 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1902 } else {
1903 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1904 if (unlikely(r != 0)) { 1904 if (unlikely(r != 0)) {
1905 amdgpu_bo_unreserve(abo); 1905 amdgpu_bo_unreserve(abo);
1906 return -EINVAL; 1906 return -EINVAL;
1907 } 1907 }
1908 } 1908 }
1909 fb_location = amdgpu_bo_gpu_offset(abo);
1909 1910
1910 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1911 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1911 amdgpu_bo_unreserve(abo); 1912 amdgpu_bo_unreserve(abo);
@@ -2449,13 +2450,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2449 return ret; 2450 return ret;
2450 } 2451 }
2451 2452
2452 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2453 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2453 amdgpu_bo_unreserve(aobj); 2454 amdgpu_bo_unreserve(aobj);
2454 if (ret) { 2455 if (ret) {
2455 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2456 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2456 drm_gem_object_put_unlocked(obj); 2457 drm_gem_object_put_unlocked(obj);
2457 return ret; 2458 return ret;
2458 } 2459 }
2460 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2459 2461
2460 dce_v11_0_lock_cursor(crtc, true); 2462 dce_v11_0_lock_cursor(crtc, true);
2461 2463
@@ -2858,14 +2860,14 @@ static int dce_v11_0_sw_init(void *handle)
2858 return r; 2860 return r;
2859 } 2861 }
2860 2862
2861 for (i = 8; i < 20; i += 2) { 2863 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2862 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2864 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2863 if (r) 2865 if (r)
2864 return r; 2866 return r;
2865 } 2867 }
2866 2868
2867 /* HPD hotplug */ 2869 /* HPD hotplug */
2868 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2870 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2869 if (r) 2871 if (r)
2870 return r; 2872 return r;
2871 2873
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 394cc1e8fe20..c9adc627305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1811,15 +1811,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1811 if (unlikely(r != 0)) 1811 if (unlikely(r != 0))
1812 return r; 1812 return r;
1813 1813
1814 if (atomic) { 1814 if (!atomic) {
1815 fb_location = amdgpu_bo_gpu_offset(abo); 1815 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1816 } else {
1817 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1818 if (unlikely(r != 0)) { 1816 if (unlikely(r != 0)) {
1819 amdgpu_bo_unreserve(abo); 1817 amdgpu_bo_unreserve(abo);
1820 return -EINVAL; 1818 return -EINVAL;
1821 } 1819 }
1822 } 1820 }
1821 fb_location = amdgpu_bo_gpu_offset(abo);
1823 1822
1824 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1823 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1825 amdgpu_bo_unreserve(abo); 1824 amdgpu_bo_unreserve(abo);
@@ -2263,13 +2262,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2263 return ret; 2262 return ret;
2264 } 2263 }
2265 2264
2266 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2265 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2267 amdgpu_bo_unreserve(aobj); 2266 amdgpu_bo_unreserve(aobj);
2268 if (ret) { 2267 if (ret) {
2269 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2268 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2270 drm_gem_object_put_unlocked(obj); 2269 drm_gem_object_put_unlocked(obj);
2271 return ret; 2270 return ret;
2272 } 2271 }
2272 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2273 2273
2274 dce_v6_0_lock_cursor(crtc, true); 2274 dce_v6_0_lock_cursor(crtc, true);
2275 2275
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c9b9ab8f1b05..50cd03beac7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1786,15 +1786,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1786 if (unlikely(r != 0)) 1786 if (unlikely(r != 0))
1787 return r; 1787 return r;
1788 1788
1789 if (atomic) { 1789 if (!atomic) {
1790 fb_location = amdgpu_bo_gpu_offset(abo); 1790 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1791 } else {
1792 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1793 if (unlikely(r != 0)) { 1791 if (unlikely(r != 0)) {
1794 amdgpu_bo_unreserve(abo); 1792 amdgpu_bo_unreserve(abo);
1795 return -EINVAL; 1793 return -EINVAL;
1796 } 1794 }
1797 } 1795 }
1796 fb_location = amdgpu_bo_gpu_offset(abo);
1798 1797
1799 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1798 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1800 amdgpu_bo_unreserve(abo); 1799 amdgpu_bo_unreserve(abo);
@@ -2274,13 +2273,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2274 return ret; 2273 return ret;
2275 } 2274 }
2276 2275
2277 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2276 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2278 amdgpu_bo_unreserve(aobj); 2277 amdgpu_bo_unreserve(aobj);
2279 if (ret) { 2278 if (ret) {
2280 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2279 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2281 drm_gem_object_put_unlocked(obj); 2280 drm_gem_object_put_unlocked(obj);
2282 return ret; 2281 return ret;
2283 } 2282 }
2283 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2284 2284
2285 dce_v8_0_lock_cursor(crtc, true); 2285 dce_v8_0_lock_cursor(crtc, true);
2286 2286
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 677e96a56330..15257634a53a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -36,6 +36,7 @@
36#include "dce_v10_0.h" 36#include "dce_v10_0.h"
37#include "dce_v11_0.h" 37#include "dce_v11_0.h"
38#include "dce_virtual.h" 38#include "dce_virtual.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
39 40
40#define DCE_VIRTUAL_VBLANK_PERIOD 16666666 41#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
41 42
@@ -371,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
371 int r, i; 372 int r, i;
372 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 373 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 374
374 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq); 375 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
375 if (r) 376 if (r)
376 return r; 377 return r;
377 378
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 551f21bad6d3..5cd45210113f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -51,6 +51,8 @@
51 51
52#include "smu/smu_7_1_3_d.h" 52#include "smu/smu_7_1_3_d.h"
53 53
54#include "ivsrcid/ivsrcid_vislands30.h"
55
54#define GFX8_NUM_GFX_RINGS 1 56#define GFX8_NUM_GFX_RINGS 1
55#define GFX8_MEC_HPD_SIZE 2048 57#define GFX8_MEC_HPD_SIZE 2048
56 58
@@ -2047,35 +2049,35 @@ static int gfx_v8_0_sw_init(void *handle)
2047 adev->gfx.mec.num_queue_per_pipe = 8; 2049 adev->gfx.mec.num_queue_per_pipe = 8;
2048 2050
2049 /* KIQ event */ 2051 /* KIQ event */
2050 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq); 2052 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
2051 if (r) 2053 if (r)
2052 return r; 2054 return r;
2053 2055
2054 /* EOP Event */ 2056 /* EOP Event */
2055 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); 2057 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
2056 if (r) 2058 if (r)
2057 return r; 2059 return r;
2058 2060
2059 /* Privileged reg */ 2061 /* Privileged reg */
2060 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, 2062 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
2061 &adev->gfx.priv_reg_irq); 2063 &adev->gfx.priv_reg_irq);
2062 if (r) 2064 if (r)
2063 return r; 2065 return r;
2064 2066
2065 /* Privileged inst */ 2067 /* Privileged inst */
2066 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, 2068 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
2067 &adev->gfx.priv_inst_irq); 2069 &adev->gfx.priv_inst_irq);
2068 if (r) 2070 if (r)
2069 return r; 2071 return r;
2070 2072
2071 /* Add CP EDC/ECC irq */ 2073 /* Add CP EDC/ECC irq */
2072 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 197, 2074 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
2073 &adev->gfx.cp_ecc_error_irq); 2075 &adev->gfx.cp_ecc_error_irq);
2074 if (r) 2076 if (r)
2075 return r; 2077 return r;
2076 2078
2077 /* SQ interrupts. */ 2079 /* SQ interrupts. */
2078 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 239, 2080 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
2079 &adev->gfx.sq_irq); 2081 &adev->gfx.sq_irq);
2080 if (r) { 2082 if (r) {
2081 DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r); 2083 DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ac46eabe3bcd..9ab39117cc4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,6 +38,8 @@
38#include "clearstate_gfx9.h" 38#include "clearstate_gfx9.h"
39#include "v9_structs.h" 39#include "v9_structs.h"
40 40
41#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
42
41#define GFX9_NUM_GFX_RINGS 1 43#define GFX9_NUM_GFX_RINGS 1
42#define GFX9_MEC_HPD_SIZE 2048 44#define GFX9_MEC_HPD_SIZE 2048
43#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 45#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -102,11 +104,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
102{ 104{
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107), 105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042), 110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042), 111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000), 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800) 120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
110}; 123};
111 124
112static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 125static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -648,7 +661,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
648 adev->firmware.fw_size += 661 adev->firmware.fw_size +=
649 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 662 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
650 663
651 if (adev->gfx.rlc.is_rlc_v2_1) { 664 if (adev->gfx.rlc.is_rlc_v2_1 &&
665 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
666 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
667 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
652 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; 668 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
653 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; 669 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
654 info->fw = adev->gfx.rlc_fw; 670 info->fw = adev->gfx.rlc_fw;
@@ -943,6 +959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
943 dst_ptr = adev->gfx.rlc.cs_ptr; 959 dst_ptr = adev->gfx.rlc.cs_ptr;
944 gfx_v9_0_get_csb_buffer(adev, dst_ptr); 960 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
945 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); 961 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
962 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
946 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 963 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
947 } 964 }
948 965
@@ -971,6 +988,39 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
971 return 0; 988 return 0;
972} 989}
973 990
991static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
992{
993 int r;
994
995 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
996 if (unlikely(r != 0))
997 return r;
998
999 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1000 AMDGPU_GEM_DOMAIN_VRAM);
1001 if (!r)
1002 adev->gfx.rlc.clear_state_gpu_addr =
1003 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1004
1005 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1006
1007 return r;
1008}
1009
1010static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1011{
1012 int r;
1013
1014 if (!adev->gfx.rlc.clear_state_obj)
1015 return;
1016
1017 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1018 if (likely(r == 0)) {
1019 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1020 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1021 }
1022}
1023
974static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) 1024static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
975{ 1025{
976 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 1026 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1451,23 +1501,23 @@ static int gfx_v9_0_sw_init(void *handle)
1451 adev->gfx.mec.num_queue_per_pipe = 8; 1501 adev->gfx.mec.num_queue_per_pipe = 8;
1452 1502
1453 /* KIQ event */ 1503 /* KIQ event */
1454 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq); 1504 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
1455 if (r) 1505 if (r)
1456 return r; 1506 return r;
1457 1507
1458 /* EOP Event */ 1508 /* EOP Event */
1459 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq); 1509 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1460 if (r) 1510 if (r)
1461 return r; 1511 return r;
1462 1512
1463 /* Privileged reg */ 1513 /* Privileged reg */
1464 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184, 1514 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1465 &adev->gfx.priv_reg_irq); 1515 &adev->gfx.priv_reg_irq);
1466 if (r) 1516 if (r)
1467 return r; 1517 return r;
1468 1518
1469 /* Privileged inst */ 1519 /* Privileged inst */
1470 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185, 1520 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1471 &adev->gfx.priv_inst_irq); 1521 &adev->gfx.priv_inst_irq);
1472 if (r) 1522 if (r)
1473 return r; 1523 return r;
@@ -2148,8 +2198,16 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
2148 2198
2149static void gfx_v9_0_init_pg(struct amdgpu_device *adev) 2199static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2150{ 2200{
2151 if (!adev->gfx.rlc.is_rlc_v2_1) 2201 gfx_v9_0_init_csb(adev);
2152 return; 2202
2203 /*
2204 * Rlc save restore list is workable since v2_1.
2205 * And it's needed by gfxoff feature.
2206 */
2207 if (adev->gfx.rlc.is_rlc_v2_1) {
2208 gfx_v9_1_init_rlc_save_restore_list(adev);
2209 gfx_v9_0_enable_save_restore_machine(adev);
2210 }
2153 2211
2154 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 2212 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2155 AMD_PG_SUPPORT_GFX_SMG | 2213 AMD_PG_SUPPORT_GFX_SMG |
@@ -2157,10 +2215,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2157 AMD_PG_SUPPORT_CP | 2215 AMD_PG_SUPPORT_CP |
2158 AMD_PG_SUPPORT_GDS | 2216 AMD_PG_SUPPORT_GDS |
2159 AMD_PG_SUPPORT_RLC_SMU_HS)) { 2217 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2160 gfx_v9_0_init_csb(adev);
2161 gfx_v9_1_init_rlc_save_restore_list(adev);
2162 gfx_v9_0_enable_save_restore_machine(adev);
2163
2164 WREG32(mmRLC_JUMP_TABLE_RESTORE, 2218 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2165 adev->gfx.rlc.cp_table_gpu_addr >> 8); 2219 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2166 gfx_v9_0_init_gfx_power_gating(adev); 2220 gfx_v9_0_init_gfx_power_gating(adev);
@@ -2252,9 +2306,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2252 /* disable CG */ 2306 /* disable CG */
2253 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2307 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2254 2308
2255 /* disable PG */
2256 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2257
2258 gfx_v9_0_rlc_reset(adev); 2309 gfx_v9_0_rlc_reset(adev);
2259 2310
2260 gfx_v9_0_init_pg(adev); 2311 gfx_v9_0_init_pg(adev);
@@ -3116,6 +3167,10 @@ static int gfx_v9_0_hw_init(void *handle)
3116 3167
3117 gfx_v9_0_gpu_init(adev); 3168 gfx_v9_0_gpu_init(adev);
3118 3169
3170 r = gfx_v9_0_csb_vram_pin(adev);
3171 if (r)
3172 return r;
3173
3119 r = gfx_v9_0_rlc_resume(adev); 3174 r = gfx_v9_0_rlc_resume(adev);
3120 if (r) 3175 if (r)
3121 return r; 3176 return r;
@@ -3224,6 +3279,8 @@ static int gfx_v9_0_hw_fini(void *handle)
3224 gfx_v9_0_cp_enable(adev, false); 3279 gfx_v9_0_cp_enable(adev, false);
3225 gfx_v9_0_rlc_stop(adev); 3280 gfx_v9_0_rlc_stop(adev);
3226 3281
3282 gfx_v9_0_csb_vram_unpin(adev);
3283
3227 return 0; 3284 return 0;
3228} 3285}
3229 3286
@@ -3510,8 +3567,11 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3510 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3567 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3511 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3568 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3512 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3569 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3513 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK | 3570
3514 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3571 if (adev->asic_type != CHIP_VEGA12)
3572 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3573
3574 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3515 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3575 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3516 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3576 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3517 3577
@@ -3541,11 +3601,15 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3541 } else { 3601 } else {
3542 /* 1 - MGCG_OVERRIDE */ 3602 /* 1 - MGCG_OVERRIDE */
3543 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3603 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3544 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK | 3604
3545 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 3605 if (adev->asic_type != CHIP_VEGA12)
3606 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3607
3608 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3546 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3609 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3547 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3610 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3548 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3611 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3612
3549 if (def != data) 3613 if (def != data)
3550 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3614 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3551 3615
@@ -3581,9 +3645,11 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3581 /* update CGCG and CGLS override bits */ 3645 /* update CGCG and CGLS override bits */
3582 if (def != data) 3646 if (def != data)
3583 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3647 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3584 /* enable 3Dcgcg FSM(0x0020003f) */ 3648
3649 /* enable 3Dcgcg FSM(0x0000363f) */
3585 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3650 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3586 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3651
3652 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3587 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3653 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3588 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3654 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3589 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3655 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3630,9 +3696,10 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
3630 if (def != data) 3696 if (def != data)
3631 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3697 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3632 3698
3633 /* enable cgcg FSM(0x0020003F) */ 3699 /* enable cgcg FSM(0x0000363F) */
3634 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3700 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3635 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3701
3702 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3636 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3703 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3637 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3704 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3638 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3705 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3719,6 +3786,11 @@ static int gfx_v9_0_set_powergating_state(void *handle,
3719 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu) 3786 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
3720 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true); 3787 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
3721 break; 3788 break;
3789 case CHIP_VEGA12:
3790 /* set gfx off through smu */
3791 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
3792 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
3793 break;
3722 default: 3794 default:
3723 break; 3795 break;
3724 } 3796 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 78339309a00c..10920f0bd85f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -43,6 +43,8 @@
43 43
44#include "amdgpu_atombios.h" 44#include "amdgpu_atombios.h"
45 45
46#include "ivsrcid/ivsrcid_vislands30.h"
47
46static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); 48static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 49static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
48static int gmc_v7_0_wait_for_idle(void *handle); 50static int gmc_v7_0_wait_for_idle(void *handle);
@@ -996,11 +998,11 @@ static int gmc_v7_0_sw_init(void *handle)
996 adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); 998 adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
997 } 999 }
998 1000
999 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); 1001 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1000 if (r) 1002 if (r)
1001 return r; 1003 return r;
1002 1004
1003 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); 1005 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1004 if (r) 1006 if (r)
1005 return r; 1007 return r;
1006 1008
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1edbe6b477b5..75f3ffb2891e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -44,6 +44,7 @@
44 44
45#include "amdgpu_atombios.h" 45#include "amdgpu_atombios.h"
46 46
47#include "ivsrcid/ivsrcid_vislands30.h"
47 48
48static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); 49static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
49static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 50static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1101,11 +1102,11 @@ static int gmc_v8_0_sw_init(void *handle)
1101 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); 1102 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1102 } 1103 }
1103 1104
1104 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); 1105 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1105 if (r) 1106 if (r)
1106 return r; 1107 return r;
1107 1108
1108 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); 1109 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1109 if (r) 1110 if (r)
1110 return r; 1111 return r;
1111 1112
@@ -1447,8 +1448,13 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1447 gmc_v8_0_set_fault_enable_default(adev, false); 1448 gmc_v8_0_set_fault_enable_default(adev, false);
1448 1449
1449 if (printk_ratelimit()) { 1450 if (printk_ratelimit()) {
1450 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1451 struct amdgpu_task_info task_info = { 0 };
1451 entry->src_id, entry->src_data[0]); 1452
1453 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1454
1455 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1456 entry->src_id, entry->src_data[0], task_info.process_name,
1457 task_info.tgid, task_info.task_name, task_info.pid);
1452 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1458 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1453 addr); 1459 addr);
1454 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1460 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3c0a85d4e4ab..9df94b45d17d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -43,6 +43,8 @@
43#include "gfxhub_v1_0.h" 43#include "gfxhub_v1_0.h"
44#include "mmhub_v1_0.h" 44#include "mmhub_v1_0.h"
45 45
46#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
47
46/* add these here since we already include dce12 headers and these are for DCN */ 48/* add these here since we already include dce12 headers and these are for DCN */
47#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d 49#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
48#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 50#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -257,11 +259,16 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
257 } 259 }
258 260
259 if (printk_ratelimit()) { 261 if (printk_ratelimit()) {
262 struct amdgpu_task_info task_info = { 0 };
263
264 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
265
260 dev_err(adev->dev, 266 dev_err(adev->dev,
261 "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", 267 "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
262 entry->vmid_src ? "mmhub" : "gfxhub", 268 entry->vmid_src ? "mmhub" : "gfxhub",
263 entry->src_id, entry->ring_id, entry->vmid, 269 entry->src_id, entry->ring_id, entry->vmid,
264 entry->pasid); 270 entry->pasid, task_info.process_name, task_info.tgid,
271 task_info.task_name, task_info.pid);
265 dev_err(adev->dev, " at page 0x%016llx from %d\n", 272 dev_err(adev->dev, " at page 0x%016llx from %d\n",
266 addr, entry->client_id); 273 addr, entry->client_id);
267 if (!amdgpu_sriov_vf(adev)) 274 if (!amdgpu_sriov_vf(adev))
@@ -872,9 +879,9 @@ static int gmc_v9_0_sw_init(void *handle)
872 } 879 }
873 880
874 /* This interrupt is VMC page fault.*/ 881 /* This interrupt is VMC page fault.*/
875 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0, 882 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
876 &adev->gmc.vm_fault); 883 &adev->gmc.vm_fault);
877 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0, 884 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
878 &adev->gmc.vm_fault); 885 &adev->gmc.vm_fault);
879 886
880 if (r) 887 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index cee4fae76d20..15ae4bc9c072 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -44,6 +44,8 @@
44 44
45#include "iceland_sdma_pkt_open.h" 45#include "iceland_sdma_pkt_open.h"
46 46
47#include "ivsrcid/ivsrcid_vislands30.h"
48
47static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); 49static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
48static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); 50static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
49static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); 51static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -896,7 +898,7 @@ static int sdma_v2_4_sw_init(void *handle)
896 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 898 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
897 899
898 /* SDMA trap event */ 900 /* SDMA trap event */
899 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, 901 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
900 &adev->sdma.trap_irq); 902 &adev->sdma.trap_irq);
901 if (r) 903 if (r)
902 return r; 904 return r;
@@ -908,7 +910,7 @@ static int sdma_v2_4_sw_init(void *handle)
908 return r; 910 return r;
909 911
910 /* SDMA Privileged inst */ 912 /* SDMA Privileged inst */
911 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, 913 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
912 &adev->sdma.illegal_inst_irq); 914 &adev->sdma.illegal_inst_irq);
913 if (r) 915 if (r)
914 return r; 916 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 99616dd9594f..1e07ff274d73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -44,6 +44,8 @@
44 44
45#include "tonga_sdma_pkt_open.h" 45#include "tonga_sdma_pkt_open.h"
46 46
47#include "ivsrcid/ivsrcid_vislands30.h"
48
47static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); 49static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
48static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); 50static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
49static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); 51static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1175,7 +1177,7 @@ static int sdma_v3_0_sw_init(void *handle)
1175 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1176 1178
1177 /* SDMA trap event */ 1179 /* SDMA trap event */
1178 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, 1180 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
1179 &adev->sdma.trap_irq); 1181 &adev->sdma.trap_irq);
1180 if (r) 1182 if (r)
1181 return r; 1183 return r;
@@ -1187,7 +1189,7 @@ static int sdma_v3_0_sw_init(void *handle)
1187 return r; 1189 return r;
1188 1190
1189 /* SDMA Privileged inst */ 1191 /* SDMA Privileged inst */
1190 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, 1192 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
1191 &adev->sdma.illegal_inst_irq); 1193 &adev->sdma.illegal_inst_irq);
1192 if (r) 1194 if (r)
1193 return r; 1195 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 572ca63cf676..e7ca4623cfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -38,6 +38,9 @@
38#include "soc15.h" 38#include "soc15.h"
39#include "vega10_sdma_pkt_open.h" 39#include "vega10_sdma_pkt_open.h"
40 40
41#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
42#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
43
41MODULE_FIRMWARE("amdgpu/vega10_sdma.bin"); 44MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
42MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin"); 45MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
43MODULE_FIRMWARE("amdgpu/vega12_sdma.bin"); 46MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -1225,13 +1228,13 @@ static int sdma_v4_0_sw_init(void *handle)
1225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1228 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1226 1229
1227 /* SDMA trap event */ 1230 /* SDMA trap event */
1228 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224, 1231 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
1229 &adev->sdma.trap_irq); 1232 &adev->sdma.trap_irq);
1230 if (r) 1233 if (r)
1231 return r; 1234 return r;
1232 1235
1233 /* SDMA trap event */ 1236 /* SDMA trap event */
1234 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224, 1237 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
1235 &adev->sdma.trap_irq); 1238 &adev->sdma.trap_irq);
1236 if (r) 1239 if (r)
1237 return r; 1240 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 341ee6d55ce8..aeaa1ca46a99 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -35,6 +35,7 @@
35#include "vi.h" 35#include "vi.h"
36#include "smu/smu_7_1_2_d.h" 36#include "smu/smu_7_1_2_d.h"
37#include "smu/smu_7_1_2_sh_mask.h" 37#include "smu/smu_7_1_2_sh_mask.h"
38#include "ivsrcid/ivsrcid_vislands30.h"
38 39
39static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 40static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
40static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); 41static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -104,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
104 int r; 105 int r;
105 106
106 /* UVD TRAP */ 107 /* UVD TRAP */
107 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); 108 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
108 if (r) 109 if (r)
109 return r; 110 return r;
110 111
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 8ee1c2eaaa14..598dbeaba636 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -36,6 +36,7 @@
36#include "bif/bif_5_1_d.h" 36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h" 37#include "gmc/gmc_8_1_d.h"
38#include "vi.h" 38#include "vi.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
39 40
40/* Polaris10/11/12 firmware version */ 41/* Polaris10/11/12 firmware version */
41#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8)) 42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
@@ -247,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
247 for (i = ib->length_dw; i < ib_size_dw; ++i) 248 for (i = ib->length_dw; i < ib_size_dw; ++i)
248 ib->ptr[i] = 0x0; 249 ib->ptr[i] = 0x0;
249 250
250 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 251 r = amdgpu_job_submit_direct(job, ring, &f);
251 job->fence = dma_fence_get(f);
252 if (r) 252 if (r)
253 goto err; 253 goto err;
254 254
255 amdgpu_job_free(job);
256 if (fence) 255 if (fence)
257 *fence = dma_fence_get(f); 256 *fence = dma_fence_get(f);
258 dma_fence_put(f); 257 dma_fence_put(f);
@@ -311,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
311 for (i = ib->length_dw; i < ib_size_dw; ++i) 310 for (i = ib->length_dw; i < ib_size_dw; ++i)
312 ib->ptr[i] = 0x0; 311 ib->ptr[i] = 0x0;
313 312
314 if (direct) { 313 if (direct)
315 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 314 r = amdgpu_job_submit_direct(job, ring, &f);
316 job->fence = dma_fence_get(f); 315 else
317 if (r) 316 r = amdgpu_job_submit(job, &ring->adev->vce.entity,
318 goto err;
319
320 amdgpu_job_free(job);
321 } else {
322 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
323 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 317 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
324 if (r) 318 if (r)
325 goto err; 319 goto err;
326 }
327 320
328 if (fence) 321 if (fence)
329 *fence = dma_fence_get(f); 322 *fence = dma_fence_get(f);
@@ -400,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
400 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 393 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
401 394
402 /* UVD TRAP */ 395 /* UVD TRAP */
403 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); 396 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
404 if (r) 397 if (r)
405 return r; 398 return r;
406 399
407 /* UVD ENC TRAP */ 400 /* UVD ENC TRAP */
408 if (uvd_v6_0_enc_support(adev)) { 401 if (uvd_v6_0_enc_support(adev)) {
409 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 402 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
410 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq); 403 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
411 if (r) 404 if (r)
412 return r; 405 return r;
413 } 406 }
@@ -425,16 +418,6 @@ static int uvd_v6_0_sw_init(void *handle)
425 adev->uvd.num_enc_rings = 0; 418 adev->uvd.num_enc_rings = 0;
426 419
427 DRM_INFO("UVD ENC is disabled\n"); 420 DRM_INFO("UVD ENC is disabled\n");
428 } else {
429 struct drm_sched_rq *rq;
430 ring = &adev->uvd.inst->ring_enc[0];
431 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
432 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
433 rq, NULL);
434 if (r) {
435 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
436 return r;
437 }
438 } 421 }
439 422
440 r = amdgpu_uvd_resume(adev); 423 r = amdgpu_uvd_resume(adev);
@@ -470,8 +453,6 @@ static int uvd_v6_0_sw_fini(void *handle)
470 return r; 453 return r;
471 454
472 if (uvd_v6_0_enc_support(adev)) { 455 if (uvd_v6_0_enc_support(adev)) {
473 drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
474
475 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 456 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
476 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); 457 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
477 } 458 }
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index ba244d3b74db..db5f3d78ab12 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -39,6 +39,7 @@
39#include "hdp/hdp_4_0_offset.h" 39#include "hdp/hdp_4_0_offset.h"
40#include "mmhub/mmhub_1_0_offset.h" 40#include "mmhub/mmhub_1_0_offset.h"
41#include "mmhub/mmhub_1_0_sh_mask.h" 41#include "mmhub/mmhub_1_0_sh_mask.h"
42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
42 43
43#define UVD7_MAX_HW_INSTANCES_VEGA20 2 44#define UVD7_MAX_HW_INSTANCES_VEGA20 2
44 45
@@ -249,12 +250,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
249 for (i = ib->length_dw; i < ib_size_dw; ++i) 250 for (i = ib->length_dw; i < ib_size_dw; ++i)
250 ib->ptr[i] = 0x0; 251 ib->ptr[i] = 0x0;
251 252
252 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 253 r = amdgpu_job_submit_direct(job, ring, &f);
253 job->fence = dma_fence_get(f);
254 if (r) 254 if (r)
255 goto err; 255 goto err;
256 256
257 amdgpu_job_free(job);
258 if (fence) 257 if (fence)
259 *fence = dma_fence_get(f); 258 *fence = dma_fence_get(f);
260 dma_fence_put(f); 259 dma_fence_put(f);
@@ -312,19 +311,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
312 for (i = ib->length_dw; i < ib_size_dw; ++i) 311 for (i = ib->length_dw; i < ib_size_dw; ++i)
313 ib->ptr[i] = 0x0; 312 ib->ptr[i] = 0x0;
314 313
315 if (direct) { 314 if (direct)
316 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 315 r = amdgpu_job_submit_direct(job, ring, &f);
317 job->fence = dma_fence_get(f); 316 else
318 if (r) 317 r = amdgpu_job_submit(job, &ring->adev->vce.entity,
319 goto err;
320
321 amdgpu_job_free(job);
322 } else {
323 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
324 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 318 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
325 if (r) 319 if (r)
326 goto err; 320 goto err;
327 }
328 321
329 if (fence) 322 if (fence)
330 *fence = dma_fence_get(f); 323 *fence = dma_fence_get(f);
@@ -396,19 +389,18 @@ static int uvd_v7_0_early_init(void *handle)
396static int uvd_v7_0_sw_init(void *handle) 389static int uvd_v7_0_sw_init(void *handle)
397{ 390{
398 struct amdgpu_ring *ring; 391 struct amdgpu_ring *ring;
399 struct drm_sched_rq *rq;
400 int i, j, r; 392 int i, j, r;
401 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 393 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
402 394
403 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 395 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
404 /* UVD TRAP */ 396 /* UVD TRAP */
405 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq); 397 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
406 if (r) 398 if (r)
407 return r; 399 return r;
408 400
409 /* UVD ENC TRAP */ 401 /* UVD ENC TRAP */
410 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 402 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
411 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq); 403 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
412 if (r) 404 if (r)
413 return r; 405 return r;
414 } 406 }
@@ -428,17 +420,6 @@ static int uvd_v7_0_sw_init(void *handle)
428 DRM_INFO("PSP loading UVD firmware\n"); 420 DRM_INFO("PSP loading UVD firmware\n");
429 } 421 }
430 422
431 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
432 ring = &adev->uvd.inst[j].ring_enc[0];
433 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
434 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
435 rq, NULL);
436 if (r) {
437 DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
438 return r;
439 }
440 }
441
442 r = amdgpu_uvd_resume(adev); 423 r = amdgpu_uvd_resume(adev);
443 if (r) 424 if (r)
444 return r; 425 return r;
@@ -491,8 +472,6 @@ static int uvd_v7_0_sw_fini(void *handle)
491 return r; 472 return r;
492 473
493 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 474 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
494 drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
495
496 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 475 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
497 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); 476 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
498 } 477 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 99604d0262ad..cc6ce6cc03f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -39,6 +39,7 @@
39#include "smu/smu_7_1_2_sh_mask.h" 39#include "smu/smu_7_1_2_sh_mask.h"
40#include "gca/gfx_8_0_d.h" 40#include "gca/gfx_8_0_d.h"
41#include "gca/gfx_8_0_sh_mask.h" 41#include "gca/gfx_8_0_sh_mask.h"
42#include "ivsrcid/ivsrcid_vislands30.h"
42 43
43 44
44#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 45#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
@@ -422,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
422 int r, i; 423 int r, i;
423 424
424 /* VCE */ 425 /* VCE */
425 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq); 426 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
426 if (r) 427 if (r)
427 return r; 428 return r;
428 429
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 575bf9709389..65f8860169e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -39,6 +39,8 @@
39#include "mmhub/mmhub_1_0_offset.h" 39#include "mmhub/mmhub_1_0_offset.h"
40#include "mmhub/mmhub_1_0_sh_mask.h" 40#include "mmhub/mmhub_1_0_sh_mask.h"
41 41
42#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
43
42#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 44#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
43 45
44#define VCE_V4_0_FW_SIZE (384 * 1024) 46#define VCE_V4_0_FW_SIZE (384 * 1024)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index b82c92084b6f..2ce91a748c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -35,6 +35,8 @@
35#include "mmhub/mmhub_9_1_offset.h" 35#include "mmhub/mmhub_9_1_offset.h"
36#include "mmhub/mmhub_9_1_sh_mask.h" 36#include "mmhub/mmhub_9_1_sh_mask.h"
37 37
38#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
39
38static int vcn_v1_0_stop(struct amdgpu_device *adev); 40static int vcn_v1_0_stop(struct amdgpu_device *adev);
39static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); 41static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
40static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); 42static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -77,13 +79,13 @@ static int vcn_v1_0_sw_init(void *handle)
77 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 79 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
78 80
79 /* VCN DEC TRAP */ 81 /* VCN DEC TRAP */
80 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq); 82 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
81 if (r) 83 if (r)
82 return r; 84 return r;
83 85
84 /* VCN ENC TRAP */ 86 /* VCN ENC TRAP */
85 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 87 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
86 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119, 88 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
87 &adev->vcn.irq); 89 &adev->vcn.irq);
88 if (r) 90 if (r)
89 return r; 91 return r;
@@ -600,12 +602,12 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
600 /* disable byte swapping */ 602 /* disable byte swapping */
601 lmi_swap_cntl = 0; 603 lmi_swap_cntl = 0;
602 604
603 vcn_v1_0_mc_resume(adev);
604
605 vcn_1_0_disable_static_power_gating(adev); 605 vcn_1_0_disable_static_power_gating(adev);
606 /* disable clock gating */ 606 /* disable clock gating */
607 vcn_v1_0_disable_clock_gating(adev); 607 vcn_v1_0_disable_clock_gating(adev);
608 608
609 vcn_v1_0_mc_resume(adev);
610
609 /* disable interupt */ 611 /* disable interupt */
610 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, 612 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
611 ~UVD_MASTINT_EN__VCPU_EN_MASK); 613 ~UVD_MASTINT_EN__VCPU_EN_MASK);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 45aafca7f315..c5c9b2bc190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -51,6 +51,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
51 adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i])); 51 adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
52 adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i])); 52 adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
53 adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); 53 adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
54 adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
54 } 55 }
55 return 0; 56 return 0;
56} 57}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 4ac1288ab7df..42c8ad105b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1363,11 +1363,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
1363 1363
1364 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1364 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1365 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1365 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1366 pp_support_state = AMD_CG_SUPPORT_MC_LS; 1366 pp_support_state = PP_STATE_SUPPORT_LS;
1367 pp_state = PP_STATE_LS; 1367 pp_state = PP_STATE_LS;
1368 } 1368 }
1369 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1369 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1370 pp_support_state |= AMD_CG_SUPPORT_MC_MGCG; 1370 pp_support_state |= PP_STATE_SUPPORT_CG;
1371 pp_state |= PP_STATE_CG; 1371 pp_state |= PP_STATE_CG;
1372 } 1372 }
1373 if (state == AMD_CG_STATE_UNGATE) 1373 if (state == AMD_CG_STATE_UNGATE)
@@ -1382,11 +1382,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
1382 1382
1383 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1383 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1384 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1384 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1385 pp_support_state = AMD_CG_SUPPORT_SDMA_LS; 1385 pp_support_state = PP_STATE_SUPPORT_LS;
1386 pp_state = PP_STATE_LS; 1386 pp_state = PP_STATE_LS;
1387 } 1387 }
1388 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1388 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1389 pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG; 1389 pp_support_state |= PP_STATE_SUPPORT_CG;
1390 pp_state |= PP_STATE_CG; 1390 pp_state |= PP_STATE_CG;
1391 } 1391 }
1392 if (state == AMD_CG_STATE_UNGATE) 1392 if (state == AMD_CG_STATE_UNGATE)
@@ -1401,11 +1401,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
1401 1401
1402 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1402 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1403 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1403 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1404 pp_support_state = AMD_CG_SUPPORT_HDP_LS; 1404 pp_support_state = PP_STATE_SUPPORT_LS;
1405 pp_state = PP_STATE_LS; 1405 pp_state = PP_STATE_LS;
1406 } 1406 }
1407 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1407 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1408 pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG; 1408 pp_support_state |= PP_STATE_SUPPORT_CG;
1409 pp_state |= PP_STATE_CG; 1409 pp_state |= PP_STATE_CG;
1410 } 1410 }
1411 if (state == AMD_CG_STATE_UNGATE) 1411 if (state == AMD_CG_STATE_UNGATE)
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 4c35625eb2c7..325083b0297e 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -9,14 +9,6 @@ config DRM_AMD_DC
9 support for AMDGPU. This adds required support for Vega and 9 support for AMDGPU. This adds required support for Vega and
10 Raven ASICs. 10 Raven ASICs.
11 11
12config DRM_AMD_DC_DCN1_0
13 bool "DCN 1.0 Raven family"
14 depends on DRM_AMD_DC && X86
15 default y
16 help
17 Choose this option if you want to have
18 RV family for display engine
19
20config DEBUG_KERNEL_DC 12config DEBUG_KERNEL_DC
21 bool "Enable kgdb break in DC" 13 bool "Enable kgdb break in DC"
22 depends on DRM_AMD_DC 14 depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
index 357d59648401..a8a6c106e8c7 100644
--- a/drivers/gpu/drm/amd/display/TODO
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -97,10 +97,10 @@ share it with drivers. But that's a very long term goal, and by far not just an
97issue with DC - other drivers, especially around DP sink handling, are equally 97issue with DC - other drivers, especially around DP sink handling, are equally
98guilty. 98guilty.
99 99
10019. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG 10019. DONE - The DC logger is still a rather sore thing, but I know that the
101stuff just isn't up to the challenges either. We need to figure out something 101DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
102that integrates better with DRM and linux debug printing, while not being 102something that integrates better with DRM and linux debug printing, while not
103useless with filtering output. dynamic debug printing might be an option. 103being useless with filtering output. dynamic debug printing might be an option.
104 104
10520. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI 10520. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
106retimer that we need to program to pass PHY compliance. Currently that's 106retimer that we need to program to pass PHY compliance. Currently that's
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 28da18b1da52..5fc13e71a3b5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -60,7 +60,7 @@
60 60
61#include "modules/inc/mod_freesync.h" 61#include "modules/inc/mod_freesync.h"
62 62
63#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 63#ifdef CONFIG_X86
64#include "ivsrcid/irqsrcs_dcn_1_0.h" 64#include "ivsrcid/irqsrcs_dcn_1_0.h"
65 65
66#include "dcn/dcn_1_0_offset.h" 66#include "dcn/dcn_1_0_offset.h"
@@ -1041,7 +1041,7 @@ static void handle_hpd_rx_irq(void *param)
1041 if (dc_link->type != dc_connection_mst_branch) 1041 if (dc_link->type != dc_connection_mst_branch)
1042 mutex_lock(&aconnector->hpd_lock); 1042 mutex_lock(&aconnector->hpd_lock);
1043 1043
1044 if (dc_link_handle_hpd_rx_irq(dc_link, NULL) && 1044 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1045 !is_mst_root_connector) { 1045 !is_mst_root_connector) {
1046 /* Downstream Port status changed. */ 1046 /* Downstream Port status changed. */
1047 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1047 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
@@ -1192,7 +1192,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1192 return 0; 1192 return 0;
1193} 1193}
1194 1194
1195#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1195#ifdef CONFIG_X86
1196/* Register IRQ sources and initialize IRQ callbacks */ 1196/* Register IRQ sources and initialize IRQ callbacks */
1197static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 1197static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1198{ 1198{
@@ -1526,7 +1526,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1526 goto fail; 1526 goto fail;
1527 } 1527 }
1528 break; 1528 break;
1529#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1529#ifdef CONFIG_X86
1530 case CHIP_RAVEN: 1530 case CHIP_RAVEN:
1531 if (dcn10_register_irq_handlers(dm->adev)) { 1531 if (dcn10_register_irq_handlers(dm->adev)) {
1532 DRM_ERROR("DM: Failed to initialize IRQ\n"); 1532 DRM_ERROR("DM: Failed to initialize IRQ\n");
@@ -1725,7 +1725,7 @@ static int dm_early_init(void *handle)
1725 adev->mode_info.num_dig = 6; 1725 adev->mode_info.num_dig = 6;
1726 adev->mode_info.plane_type = dm_plane_type_default; 1726 adev->mode_info.plane_type = dm_plane_type_default;
1727 break; 1727 break;
1728#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1728#ifdef CONFIG_X86
1729 case CHIP_RAVEN: 1729 case CHIP_RAVEN:
1730 adev->mode_info.num_crtc = 4; 1730 adev->mode_info.num_crtc = 4;
1731 adev->mode_info.num_hpd = 4; 1731 adev->mode_info.num_hpd = 4;
@@ -3094,15 +3094,25 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3094 else 3094 else
3095 domain = AMDGPU_GEM_DOMAIN_VRAM; 3095 domain = AMDGPU_GEM_DOMAIN_VRAM;
3096 3096
3097 r = amdgpu_bo_pin(rbo, domain, &afb->address); 3097 r = amdgpu_bo_pin(rbo, domain);
3098 amdgpu_bo_unreserve(rbo);
3099
3100 if (unlikely(r != 0)) { 3098 if (unlikely(r != 0)) {
3101 if (r != -ERESTARTSYS) 3099 if (r != -ERESTARTSYS)
3102 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 3100 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3101 amdgpu_bo_unreserve(rbo);
3103 return r; 3102 return r;
3104 } 3103 }
3105 3104
3105 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3106 if (unlikely(r != 0)) {
3107 amdgpu_bo_unpin(rbo);
3108 amdgpu_bo_unreserve(rbo);
3109 DRM_ERROR("%p bind failed\n", rbo);
3110 return r;
3111 }
3112 amdgpu_bo_unreserve(rbo);
3113
3114 afb->address = amdgpu_bo_gpu_offset(rbo);
3115
3106 amdgpu_bo_ref(rbo); 3116 amdgpu_bo_ref(rbo);
3107 3117
3108 if (dm_plane_state_new->dc_state && 3118 if (dm_plane_state_new->dc_state &&
@@ -3499,7 +3509,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3499 aconnector->base.stereo_allowed = false; 3509 aconnector->base.stereo_allowed = false;
3500 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 3510 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3501 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 3511 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3502
3503 mutex_init(&aconnector->hpd_lock); 3512 mutex_init(&aconnector->hpd_lock);
3504 3513
3505 /* configure support HPD hot plug connector_>polled default value is 0 3514 /* configure support HPD hot plug connector_>polled default value is 0
@@ -3508,9 +3517,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3508 switch (connector_type) { 3517 switch (connector_type) {
3509 case DRM_MODE_CONNECTOR_HDMIA: 3518 case DRM_MODE_CONNECTOR_HDMIA:
3510 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 3519 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3520 aconnector->base.ycbcr_420_allowed =
3521 link->link_enc->features.ycbcr420_supported ? true : false;
3511 break; 3522 break;
3512 case DRM_MODE_CONNECTOR_DisplayPort: 3523 case DRM_MODE_CONNECTOR_DisplayPort:
3513 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 3524 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3525 aconnector->base.ycbcr_420_allowed =
3526 link->link_enc->features.ycbcr420_supported ? true : false;
3514 break; 3527 break;
3515 case DRM_MODE_CONNECTOR_DVID: 3528 case DRM_MODE_CONNECTOR_DVID:
3516 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 3529 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index cf5ea69e46ad..0d9e410ca01e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -26,113 +26,667 @@
26#include <linux/debugfs.h> 26#include <linux/debugfs.h>
27 27
28#include "dc.h" 28#include "dc.h"
29#include "dc_link.h"
30
31#include "amdgpu.h" 29#include "amdgpu.h"
32#include "amdgpu_dm.h" 30#include "amdgpu_dm.h"
33#include "amdgpu_dm_debugfs.h" 31#include "amdgpu_dm_debugfs.h"
34 32
35static ssize_t dp_link_rate_debugfs_read(struct file *f, char __user *buf, 33/* function description
34 * get/ set DP configuration: lane_count, link_rate, spread_spectrum
35 *
36 * valid lane count value: 1, 2, 4
37 * valid link rate value:
38 * 06h = 1.62Gbps per lane
39 * 0Ah = 2.7Gbps per lane
40 * 0Ch = 3.24Gbps per lane
41 * 14h = 5.4Gbps per lane
42 * 1Eh = 8.1Gbps per lane
43 *
44 * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
45 *
46 * --- to get dp configuration
47 *
48 * cat link_settings
49 *
50 * It will list current, verified, reported, preferred dp configuration.
51 * current -- for current video mode
52 * verified --- maximum configuration which pass link training
53 * reported --- DP rx report caps (DPCD register offset 0, 1 2)
54 * preferred --- user force settings
55 *
56 * --- set (or force) dp configuration
57 *
58 * echo <lane_count> <link_rate> > link_settings
59 *
60 * for example, to force to 2 lane, 2.7GHz,
61 * echo 4 0xa > link_settings
62 *
63 * spread_spectrum could not be changed dynamically.
64 *
65 * in case invalid lane count, link rate are force, no hw programming will be
66 * done. please check link settings after force operation to see if HW get
67 * programming.
68 *
69 * cat link_settings
70 *
71 * check current and preferred settings.
72 *
73 */
74static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
36 size_t size, loff_t *pos) 75 size_t size, loff_t *pos)
37{ 76{
38 /* TODO: create method to read link rate */ 77 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
39 return 1; 78 struct dc_link *link = connector->dc_link;
40} 79 char *rd_buf = NULL;
80 char *rd_buf_ptr = NULL;
81 const uint32_t rd_buf_size = 100;
82 uint32_t result = 0;
83 uint8_t str_len = 0;
84 int r;
41 85
42static ssize_t dp_link_rate_debugfs_write(struct file *f, const char __user *buf, 86 if (*pos & 3 || size & 3)
43 size_t size, loff_t *pos) 87 return -EINVAL;
44{
45 /* TODO: create method to write link rate */
46 return 1;
47}
48 88
49static ssize_t dp_lane_count_debugfs_read(struct file *f, char __user *buf, 89 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
50 size_t size, loff_t *pos) 90 if (!rd_buf)
51{ 91 return 0;
52 /* TODO: create method to read lane count */
53 return 1;
54}
55 92
56static ssize_t dp_lane_count_debugfs_write(struct file *f, const char __user *buf, 93 rd_buf_ptr = rd_buf;
57 size_t size, loff_t *pos)
58{
59 /* TODO: create method to write lane count */
60 return 1;
61}
62 94
63static ssize_t dp_voltage_swing_debugfs_read(struct file *f, char __user *buf, 95 str_len = strlen("Current: %d %d %d ");
64 size_t size, loff_t *pos) 96 snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
65{ 97 link->cur_link_settings.lane_count,
66 /* TODO: create method to read voltage swing */ 98 link->cur_link_settings.link_rate,
67 return 1; 99 link->cur_link_settings.link_spread);
68} 100 rd_buf_ptr += str_len;
69 101
70static ssize_t dp_voltage_swing_debugfs_write(struct file *f, const char __user *buf, 102 str_len = strlen("Verified: %d %d %d ");
71 size_t size, loff_t *pos) 103 snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
72{ 104 link->verified_link_cap.lane_count,
73 /* TODO: create method to write voltage swing */ 105 link->verified_link_cap.link_rate,
74 return 1; 106 link->verified_link_cap.link_spread);
107 rd_buf_ptr += str_len;
108
109 str_len = strlen("Reported: %d %d %d ");
110 snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
111 link->reported_link_cap.lane_count,
112 link->reported_link_cap.link_rate,
113 link->reported_link_cap.link_spread);
114 rd_buf_ptr += str_len;
115
116 str_len = strlen("Preferred: %d %d %d ");
117 snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
118 link->preferred_link_setting.lane_count,
119 link->preferred_link_setting.link_rate,
120 link->preferred_link_setting.link_spread);
121
122 while (size) {
123 if (*pos >= rd_buf_size)
124 break;
125
126 r = put_user(*(rd_buf + result), buf);
127 if (r)
128 return r; /* r = -EFAULT */
129
130 buf += 1;
131 size -= 1;
132 *pos += 1;
133 result += 1;
134 }
135
136 kfree(rd_buf);
137 return result;
75} 138}
76 139
77static ssize_t dp_pre_emphasis_debugfs_read(struct file *f, char __user *buf, 140static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
78 size_t size, loff_t *pos) 141 size_t size, loff_t *pos)
79{ 142{
80 /* TODO: create method to read pre-emphasis */ 143 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
81 return 1; 144 struct dc_link *link = connector->dc_link;
145 struct dc *dc = (struct dc *)link->dc;
146 struct dc_link_settings prefer_link_settings;
147 char *wr_buf = NULL;
148 char *wr_buf_ptr = NULL;
149 const uint32_t wr_buf_size = 40;
150 int r;
151 int bytes_from_user;
152 char *sub_str;
153 /* 0: lane_count; 1: link_rate */
154 uint8_t param_index = 0;
155 long param[2];
156 const char delimiter[3] = {' ', '\n', '\0'};
157 bool valid_input = false;
158
159 if (size == 0)
160 return -EINVAL;
161
162 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
163 if (!wr_buf)
164 return -EINVAL;
165 wr_buf_ptr = wr_buf;
166
167 r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
168
169 /* r is bytes not be copied */
170 if (r >= wr_buf_size) {
171 kfree(wr_buf);
172 DRM_DEBUG_DRIVER("user data not read\n");
173 return -EINVAL;
174 }
175
176 bytes_from_user = wr_buf_size - r;
177
178 while (isspace(*wr_buf_ptr))
179 wr_buf_ptr++;
180
181 while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
182
183 sub_str = strsep(&wr_buf_ptr, delimiter);
184
185 r = kstrtol(sub_str, 16, &param[param_index]);
186
187 if (r)
188 DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
189
190 param_index++;
191 while (isspace(*wr_buf_ptr))
192 wr_buf_ptr++;
193 }
194
195 switch (param[0]) {
196 case LANE_COUNT_ONE:
197 case LANE_COUNT_TWO:
198 case LANE_COUNT_FOUR:
199 valid_input = true;
200 break;
201 default:
202 break;
203 }
204
205 switch (param[1]) {
206 case LINK_RATE_LOW:
207 case LINK_RATE_HIGH:
208 case LINK_RATE_RBR2:
209 case LINK_RATE_HIGH2:
210 case LINK_RATE_HIGH3:
211 valid_input = true;
212 break;
213 default:
214 break;
215 }
216
217 if (!valid_input) {
218 kfree(wr_buf);
219 DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
220 return bytes_from_user;
221 }
222
223 /* save user force lane_count, link_rate to preferred settings
224 * spread spectrum will not be changed
225 */
226 prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
227 prefer_link_settings.lane_count = param[0];
228 prefer_link_settings.link_rate = param[1];
229
230 dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
231
232 kfree(wr_buf);
233 return bytes_from_user;
82} 234}
83 235
84static ssize_t dp_pre_emphasis_debugfs_write(struct file *f, const char __user *buf, 236/* function: get current DP PHY settings: voltage swing, pre-emphasis,
237 * post-cursor2 (defined by VESA DP specification)
238 *
239 * valid values
240 * voltage swing: 0,1,2,3
241 * pre-emphasis : 0,1,2,3
242 * post cursor2 : 0,1,2,3
243 *
244 *
245 * how to use this debugfs
246 *
247 * debugfs is located at /sys/kernel/debug/dri/0/DP-x
248 *
249 * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
250 *
251 * To figure out which DP-x is the display for DP to be check,
252 * cd DP-x
253 * ls -ll
254 * There should be debugfs file, like link_settings, phy_settings.
255 * cat link_settings
256 * from lane_count, link_rate to figure which DP-x is for display to be worked
257 * on
258 *
259 * To get current DP PHY settings,
260 * cat phy_settings
261 *
262 * To change DP PHY settings,
263 * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
264 * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
265 * 0,
266 * echo 2 3 0 > phy_settings
267 *
268 * To check if change be applied, get current phy settings by
269 * cat phy_settings
270 *
271 * In case invalid values are set by user, like
272 * echo 1 4 0 > phy_settings
273 *
274 * HW will NOT be programmed by these settings.
275 * cat phy_settings will show the previous valid settings.
276 */
277static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
85 size_t size, loff_t *pos) 278 size_t size, loff_t *pos)
86{ 279{
87 /* TODO: create method to write pre-emphasis */ 280 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
88 return 1; 281 struct dc_link *link = connector->dc_link;
282 char *rd_buf = NULL;
283 const uint32_t rd_buf_size = 20;
284 uint32_t result = 0;
285 int r;
286
287 if (*pos & 3 || size & 3)
288 return -EINVAL;
289
290 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
291 if (!rd_buf)
292 return -EINVAL;
293
294 snprintf(rd_buf, rd_buf_size, " %d %d %d ",
295 link->cur_lane_setting.VOLTAGE_SWING,
296 link->cur_lane_setting.PRE_EMPHASIS,
297 link->cur_lane_setting.POST_CURSOR2);
298
299 while (size) {
300 if (*pos >= rd_buf_size)
301 break;
302
303 r = put_user((*(rd_buf + result)), buf);
304 if (r)
305 return r; /* r = -EFAULT */
306
307 buf += 1;
308 size -= 1;
309 *pos += 1;
310 result += 1;
311 }
312
313 kfree(rd_buf);
314 return result;
89} 315}
90 316
91static ssize_t dp_phy_test_pattern_debugfs_read(struct file *f, char __user *buf, 317static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
92 size_t size, loff_t *pos) 318 size_t size, loff_t *pos)
93{ 319{
94 /* TODO: create method to read PHY test pattern */ 320 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
95 return 1; 321 struct dc_link *link = connector->dc_link;
322 struct dc *dc = (struct dc *)link->dc;
323 char *wr_buf = NULL;
324 char *wr_buf_ptr = NULL;
325 uint32_t wr_buf_size = 40;
326 int r;
327 int bytes_from_user;
328 char *sub_str;
329 uint8_t param_index = 0;
330 long param[3];
331 const char delimiter[3] = {' ', '\n', '\0'};
332 bool use_prefer_link_setting;
333 struct link_training_settings link_lane_settings;
334
335 if (size == 0)
336 return 0;
337
338 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
339 if (!wr_buf)
340 return 0;
341 wr_buf_ptr = wr_buf;
342
343 r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
344
345 /* r is bytes not be copied */
346 if (r >= wr_buf_size) {
347 kfree(wr_buf);
348 DRM_DEBUG_DRIVER("user data not be read\n");
349 return 0;
350 }
351
352 bytes_from_user = wr_buf_size - r;
353
354 while (isspace(*wr_buf_ptr))
355 wr_buf_ptr++;
356
357 while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
358
359 sub_str = strsep(&wr_buf_ptr, delimiter);
360
361 r = kstrtol(sub_str, 16, &param[param_index]);
362
363 if (r)
364 DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
365
366 param_index++;
367 while (isspace(*wr_buf_ptr))
368 wr_buf_ptr++;
369 }
370
371 if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
372 (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
373 (param[2] > POST_CURSOR2_MAX_LEVEL)) {
374 kfree(wr_buf);
375 DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
376 return bytes_from_user;
377 }
378
379 /* get link settings: lane count, link rate */
380 use_prefer_link_setting =
381 ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
382 (link->test_pattern_enabled));
383
384 memset(&link_lane_settings, 0, sizeof(link_lane_settings));
385
386 if (use_prefer_link_setting) {
387 link_lane_settings.link_settings.lane_count =
388 link->preferred_link_setting.lane_count;
389 link_lane_settings.link_settings.link_rate =
390 link->preferred_link_setting.link_rate;
391 link_lane_settings.link_settings.link_spread =
392 link->preferred_link_setting.link_spread;
393 } else {
394 link_lane_settings.link_settings.lane_count =
395 link->cur_link_settings.lane_count;
396 link_lane_settings.link_settings.link_rate =
397 link->cur_link_settings.link_rate;
398 link_lane_settings.link_settings.link_spread =
399 link->cur_link_settings.link_spread;
400 }
401
402 /* apply phy settings from user */
403 for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
404 link_lane_settings.lane_settings[r].VOLTAGE_SWING =
405 (enum dc_voltage_swing) (param[0]);
406 link_lane_settings.lane_settings[r].PRE_EMPHASIS =
407 (enum dc_pre_emphasis) (param[1]);
408 link_lane_settings.lane_settings[r].POST_CURSOR2 =
409 (enum dc_post_cursor2) (param[2]);
410 }
411
412 /* program ASIC registers and DPCD registers */
413 dc_link_set_drive_settings(dc, &link_lane_settings, link);
414
415 kfree(wr_buf);
416 return bytes_from_user;
96} 417}
97 418
419/* function description
420 *
421 * set PHY layer or Link layer test pattern
422 * PHY test pattern is used for PHY SI check.
423 * Link layer test will not affect PHY SI.
424 *
425 * Reset Test Pattern:
426 * 0 = DP_TEST_PATTERN_VIDEO_MODE
427 *
428 * PHY test pattern supported:
429 * 1 = DP_TEST_PATTERN_D102
430 * 2 = DP_TEST_PATTERN_SYMBOL_ERROR
431 * 3 = DP_TEST_PATTERN_PRBS7
432 * 4 = DP_TEST_PATTERN_80BIT_CUSTOM
433 * 5 = DP_TEST_PATTERN_CP2520_1
434 * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
435 * 7 = DP_TEST_PATTERN_CP2520_3
436 *
437 * DP PHY Link Training Patterns
438 * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
439 * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
440 * a = DP_TEST_PATTERN_TRAINING_PATTERN3
441 * b = DP_TEST_PATTERN_TRAINING_PATTERN4
442 *
443 * DP Link Layer Test pattern
444 * c = DP_TEST_PATTERN_COLOR_SQUARES
445 * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
446 * e = DP_TEST_PATTERN_VERTICAL_BARS
447 * f = DP_TEST_PATTERN_HORIZONTAL_BARS
448 * 10= DP_TEST_PATTERN_COLOR_RAMP
449 *
450 * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
451 *
452 * --- set test pattern
453 * echo <test pattern #> > test_pattern
454 *
455 * If test pattern # is not supported, NO HW programming will be done.
456 * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
457 * for the user pattern. input 10 bytes data are separated by space
458 *
459 * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
460 *
461 * --- reset test pattern
462 * echo 0 > test_pattern
463 *
464 * --- HPD detection is disabled when set PHY test pattern
465 *
466 * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
467 * is disable. User could unplug DP display from DP connected and plug scope to
468 * check test pattern PHY SI.
469 * If there is need unplug scope and plug DP display back, do steps below:
470 * echo 0 > phy_test_pattern
471 * unplug scope
472 * plug DP display.
473 *
474 * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
475 * driver could detect "unplug scope" and "plug DP display"
476 */
98static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf, 477static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
99 size_t size, loff_t *pos) 478 size_t size, loff_t *pos)
100{ 479{
101 /* TODO: create method to write PHY test pattern */ 480 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
102 return 1; 481 struct dc_link *link = connector->dc_link;
103} 482 char *wr_buf = NULL;
483 char *wr_buf_ptr = NULL;
484 uint32_t wr_buf_size = 100;
485 uint32_t wr_buf_count = 0;
486 int r;
487 int bytes_from_user;
488 char *sub_str = NULL;
489 uint8_t param_index = 0;
490 uint8_t param_nums = 0;
491 long param[11] = {0x0};
492 const char delimiter[3] = {' ', '\n', '\0'};
493 enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
494 bool disable_hpd = false;
495 bool valid_test_pattern = false;
496 /* init with defalut 80bit custom pattern */
497 uint8_t custom_pattern[10] = {
498 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
499 0x1f, 0x7c, 0xf0, 0xc1, 0x07
500 };
501 struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
502 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
503 struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
504 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
505 struct link_training_settings link_training_settings;
506 int i;
104 507
105static const struct file_operations dp_link_rate_fops = { 508 if (size == 0)
106 .owner = THIS_MODULE, 509 return 0;
107 .read = dp_link_rate_debugfs_read,
108 .write = dp_link_rate_debugfs_write,
109 .llseek = default_llseek
110};
111 510
112static const struct file_operations dp_lane_count_fops = { 511 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
113 .owner = THIS_MODULE, 512 if (!wr_buf)
114 .read = dp_lane_count_debugfs_read, 513 return 0;
115 .write = dp_lane_count_debugfs_write, 514 wr_buf_ptr = wr_buf;
116 .llseek = default_llseek 515
117}; 516 r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
517
518 /* r is bytes not be copied */
519 if (r >= wr_buf_size) {
520 kfree(wr_buf);
521 DRM_DEBUG_DRIVER("user data not be read\n");
522 return 0;
523 }
524
525 bytes_from_user = wr_buf_size - r;
526
527 /* check number of parameters. isspace could not differ space and \n */
528 while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
529 /* skip space*/
530 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
531 wr_buf_ptr++;
532 wr_buf_count++;
533 }
534
535 if (wr_buf_count == wr_buf_size)
536 break;
537
538 /* skip non-space*/
539 while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
540 wr_buf_ptr++;
541 wr_buf_count++;
542 }
543
544 param_nums++;
545
546 if (wr_buf_count == wr_buf_size)
547 break;
548 }
549
550 /* max 11 parameters */
551 if (param_nums > 11)
552 param_nums = 11;
553
554 wr_buf_ptr = wr_buf; /* reset buf pinter */
555 wr_buf_count = 0; /* number of char already checked */
556
557 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
558 wr_buf_ptr++;
559 wr_buf_count++;
560 }
561
562 while (param_index < param_nums) {
563 /* after strsep, wr_buf_ptr will be moved to after space */
564 sub_str = strsep(&wr_buf_ptr, delimiter);
565
566 r = kstrtol(sub_str, 16, &param[param_index]);
567
568 if (r)
569 DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
570
571 param_index++;
572 }
573
574 test_pattern = param[0];
575
576 switch (test_pattern) {
577 case DP_TEST_PATTERN_VIDEO_MODE:
578 case DP_TEST_PATTERN_COLOR_SQUARES:
579 case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
580 case DP_TEST_PATTERN_VERTICAL_BARS:
581 case DP_TEST_PATTERN_HORIZONTAL_BARS:
582 case DP_TEST_PATTERN_COLOR_RAMP:
583 valid_test_pattern = true;
584 break;
585
586 case DP_TEST_PATTERN_D102:
587 case DP_TEST_PATTERN_SYMBOL_ERROR:
588 case DP_TEST_PATTERN_PRBS7:
589 case DP_TEST_PATTERN_80BIT_CUSTOM:
590 case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
591 case DP_TEST_PATTERN_TRAINING_PATTERN4:
592 disable_hpd = true;
593 valid_test_pattern = true;
594 break;
595
596 default:
597 valid_test_pattern = false;
598 test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
599 break;
600 }
601
602 if (!valid_test_pattern) {
603 kfree(wr_buf);
604 DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
605 return bytes_from_user;
606 }
607
608 if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
609 for (i = 0; i < 10; i++) {
610 if ((uint8_t) param[i + 1] != 0x0)
611 break;
612 }
613
614 if (i < 10) {
615 /* not use default value */
616 for (i = 0; i < 10; i++)
617 custom_pattern[i] = (uint8_t) param[i + 1];
618 }
619 }
620
621 /* Usage: set DP physical test pattern using debugfs with normal DP
622 * panel. Then plug out DP panel and connect a scope to measure
623 * For normal video mode and test pattern generated from CRCT,
624 * they are visibile to user. So do not disable HPD.
625 * Video Mode is also set to clear the test pattern, so enable HPD
626 * because it might have been disabled after a test pattern was set.
627 * AUX depends on HPD * sequence dependent, do not move!
628 */
629 if (!disable_hpd)
630 dc_link_enable_hpd(link);
631
632 prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
633 prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
634 prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
635
636 cur_link_settings.lane_count = link->cur_link_settings.lane_count;
637 cur_link_settings.link_rate = link->cur_link_settings.link_rate;
638 cur_link_settings.link_spread = link->cur_link_settings.link_spread;
639
640 link_training_settings.link_settings = cur_link_settings;
641
642
643 if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
644 if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
645 prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
646 (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
647 prefer_link_settings.link_rate != cur_link_settings.link_rate))
648 link_training_settings.link_settings = prefer_link_settings;
649 }
650
651 for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
652 link_training_settings.lane_settings[i] = link->cur_lane_setting;
653
654 dc_link_set_test_pattern(
655 link,
656 test_pattern,
657 &link_training_settings,
658 custom_pattern,
659 10);
660
661 /* Usage: Set DP physical test pattern using AMDDP with normal DP panel
662 * Then plug out DP panel and connect a scope to measure DP PHY signal.
663 * Need disable interrupt to avoid SW driver disable DP output. This is
664 * done after the test pattern is set.
665 */
666 if (valid_test_pattern && disable_hpd)
667 dc_link_disable_hpd(link);
668
669 kfree(wr_buf);
670
671 return bytes_from_user;
672}
118 673
119static const struct file_operations dp_voltage_swing_fops = { 674static const struct file_operations dp_link_settings_debugfs_fops = {
120 .owner = THIS_MODULE, 675 .owner = THIS_MODULE,
121 .read = dp_voltage_swing_debugfs_read, 676 .read = dp_link_settings_read,
122 .write = dp_voltage_swing_debugfs_write, 677 .write = dp_link_settings_write,
123 .llseek = default_llseek 678 .llseek = default_llseek
124}; 679};
125 680
126static const struct file_operations dp_pre_emphasis_fops = { 681static const struct file_operations dp_phy_settings_debugfs_fop = {
127 .owner = THIS_MODULE, 682 .owner = THIS_MODULE,
128 .read = dp_pre_emphasis_debugfs_read, 683 .read = dp_phy_settings_read,
129 .write = dp_pre_emphasis_debugfs_write, 684 .write = dp_phy_settings_write,
130 .llseek = default_llseek 685 .llseek = default_llseek
131}; 686};
132 687
133static const struct file_operations dp_phy_test_pattern_fops = { 688static const struct file_operations dp_phy_test_pattern_fops = {
134 .owner = THIS_MODULE, 689 .owner = THIS_MODULE,
135 .read = dp_phy_test_pattern_debugfs_read,
136 .write = dp_phy_test_pattern_debugfs_write, 690 .write = dp_phy_test_pattern_debugfs_write,
137 .llseek = default_llseek 691 .llseek = default_llseek
138}; 692};
@@ -141,11 +695,9 @@ static const struct {
141 char *name; 695 char *name;
142 const struct file_operations *fops; 696 const struct file_operations *fops;
143} dp_debugfs_entries[] = { 697} dp_debugfs_entries[] = {
144 {"link_rate", &dp_link_rate_fops}, 698 {"link_settings", &dp_link_settings_debugfs_fops},
145 {"lane_count", &dp_lane_count_fops}, 699 {"phy_settings", &dp_phy_settings_debugfs_fop},
146 {"voltage_swing", &dp_voltage_swing_fops}, 700 {"test_pattern", &dp_phy_test_pattern_fops}
147 {"pre_emphasis", &dp_pre_emphasis_fops},
148 {"phy_test_pattern", &dp_phy_test_pattern_fops}
149}; 701};
150 702
151int connector_debugfs_init(struct amdgpu_dm_connector *connector) 703int connector_debugfs_init(struct amdgpu_dm_connector *connector)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b19dc4cfc030..8403b6a9a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -169,6 +169,11 @@ static void get_payload_table(
169 mutex_unlock(&mst_mgr->payload_lock); 169 mutex_unlock(&mst_mgr->payload_lock);
170} 170}
171 171
172void dm_helpers_dp_update_branch_info(
173 struct dc_context *ctx,
174 const struct dc_link *link)
175{}
176
172/* 177/*
173 * Writes payload allocation table in immediate downstream device. 178 * Writes payload allocation table in immediate downstream device.
174 */ 179 */
@@ -454,6 +459,22 @@ bool dm_helpers_submit_i2c(
454 return result; 459 return result;
455} 460}
456 461
462bool dm_helpers_is_dp_sink_present(struct dc_link *link)
463{
464 bool dp_sink_present;
465 struct amdgpu_dm_connector *aconnector = link->priv;
466
467 if (!aconnector) {
468 BUG_ON("Failed to found connector for link!");
469 return true;
470 }
471
472 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
473 dp_sink_present = dc_link_is_dp_sink_present(link);
474 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
475 return dp_sink_present;
476}
477
457enum dc_edid_status dm_helpers_read_local_edid( 478enum dc_edid_status dm_helpers_read_local_edid(
458 struct dc_context *ctx, 479 struct dc_context *ctx,
459 struct dc_link *link, 480 struct dc_link *link,
@@ -498,8 +519,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
498 edid_status, 519 edid_status,
499 aconnector->base.name); 520 aconnector->base.name);
500 if (link->aux_mode) { 521 if (link->aux_mode) {
501 union test_request test_request = {0}; 522 union test_request test_request = { {0} };
502 union test_response test_response = {0}; 523 union test_response test_response = { {0} };
503 524
504 dm_helpers_dp_read_dpcd(ctx, 525 dm_helpers_dp_read_dpcd(ctx,
505 link, 526 link,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 65f210d3497b..9a300732ba37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -80,55 +80,72 @@ static void log_dpcd(uint8_t type,
80static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, 80static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
81 struct drm_dp_aux_msg *msg) 81 struct drm_dp_aux_msg *msg)
82{ 82{
83 enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? 83 ssize_t result = 0;
84 I2C_MOT_TRUE : I2C_MOT_FALSE; 84 enum i2caux_transaction_action action;
85 enum ddc_result res; 85 enum aux_transaction_type type;
86 uint32_t read_bytes = msg->size;
87 86
88 if (WARN_ON(msg->size > 16)) 87 if (WARN_ON(msg->size > 16))
89 return -E2BIG; 88 return -E2BIG;
90 89
91 switch (msg->request & ~DP_AUX_I2C_MOT) { 90 switch (msg->request & ~DP_AUX_I2C_MOT) {
92 case DP_AUX_NATIVE_READ: 91 case DP_AUX_NATIVE_READ:
93 res = dal_ddc_service_read_dpcd_data( 92 type = AUX_TRANSACTION_TYPE_DP;
94 TO_DM_AUX(aux)->ddc_service, 93 action = I2CAUX_TRANSACTION_ACTION_DP_READ;
95 false, 94
96 I2C_MOT_UNDEF, 95 result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
97 msg->address, 96 msg->address,
98 msg->buffer, 97 &msg->reply,
99 msg->size, 98 msg->buffer,
100 &read_bytes); 99 msg->size,
100 type,
101 action);
101 break; 102 break;
102 case DP_AUX_NATIVE_WRITE: 103 case DP_AUX_NATIVE_WRITE:
103 res = dal_ddc_service_write_dpcd_data( 104 type = AUX_TRANSACTION_TYPE_DP;
104 TO_DM_AUX(aux)->ddc_service, 105 action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
105 false, 106
106 I2C_MOT_UNDEF, 107 dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
107 msg->address, 108 msg->address,
108 msg->buffer, 109 &msg->reply,
109 msg->size); 110 msg->buffer,
111 msg->size,
112 type,
113 action);
114 result = msg->size;
110 break; 115 break;
111 case DP_AUX_I2C_READ: 116 case DP_AUX_I2C_READ:
112 res = dal_ddc_service_read_dpcd_data( 117 type = AUX_TRANSACTION_TYPE_I2C;
113 TO_DM_AUX(aux)->ddc_service, 118 if (msg->request & DP_AUX_I2C_MOT)
114 true, 119 action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
115 mot, 120 else
116 msg->address, 121 action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
117 msg->buffer, 122
118 msg->size, 123 result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
119 &read_bytes); 124 msg->address,
125 &msg->reply,
126 msg->buffer,
127 msg->size,
128 type,
129 action);
120 break; 130 break;
121 case DP_AUX_I2C_WRITE: 131 case DP_AUX_I2C_WRITE:
122 res = dal_ddc_service_write_dpcd_data( 132 type = AUX_TRANSACTION_TYPE_I2C;
123 TO_DM_AUX(aux)->ddc_service, 133 if (msg->request & DP_AUX_I2C_MOT)
124 true, 134 action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
125 mot, 135 else
126 msg->address, 136 action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
127 msg->buffer, 137
128 msg->size); 138 dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
139 msg->address,
140 &msg->reply,
141 msg->buffer,
142 msg->size,
143 type,
144 action);
145 result = msg->size;
129 break; 146 break;
130 default: 147 default:
131 return 0; 148 return -EINVAL;
132 } 149 }
133 150
134#ifdef TRACE_DPCD 151#ifdef TRACE_DPCD
@@ -139,9 +156,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
139 r == DDC_RESULT_SUCESSFULL); 156 r == DDC_RESULT_SUCESSFULL);
140#endif 157#endif
141 158
142 if (res != DDC_RESULT_SUCESSFULL) 159 if (result < 0) /* DC doesn't know about kernel error codes */
143 return -EIO; 160 result = -EIO;
144 return read_bytes; 161
162 return result;
145} 163}
146 164
147static enum drm_connector_status 165static enum drm_connector_status
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 50e863024f58..c69ae78d82b2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -192,6 +192,33 @@ static enum amd_pp_clock_type dc_to_pp_clock_type(
192 return amd_pp_clk_type; 192 return amd_pp_clk_type;
193} 193}
194 194
195static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
196 enum PP_DAL_POWERLEVEL max_clocks_state)
197{
198 switch (max_clocks_state) {
199 case PP_DAL_POWERLEVEL_0:
200 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
201 case PP_DAL_POWERLEVEL_1:
202 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
203 case PP_DAL_POWERLEVEL_2:
204 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
205 case PP_DAL_POWERLEVEL_3:
206 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
207 case PP_DAL_POWERLEVEL_4:
208 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
209 case PP_DAL_POWERLEVEL_5:
210 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
211 case PP_DAL_POWERLEVEL_6:
212 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
213 case PP_DAL_POWERLEVEL_7:
214 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
215 default:
216 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
217 max_clocks_state);
218 return DM_PP_CLOCKS_STATE_INVALID;
219 }
220}
221
195static void pp_to_dc_clock_levels( 222static void pp_to_dc_clock_levels(
196 const struct amd_pp_clocks *pp_clks, 223 const struct amd_pp_clocks *pp_clks,
197 struct dm_pp_clock_levels *dc_clks, 224 struct dm_pp_clock_levels *dc_clks,
@@ -441,7 +468,7 @@ bool dm_pp_get_static_clocks(
441 if (ret) 468 if (ret)
442 return false; 469 return false;
443 470
444 static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state; 471 static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
445 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock; 472 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
446 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock; 473 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
447 474
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index aed538a4d1ba..532a515fda9a 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,7 +25,7 @@
25 25
26DC_LIBS = basics bios calcs dce gpio i2caux irq virtual 26DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
27 27
28ifdef CONFIG_DRM_AMD_DC_DCN1_0 28ifdef CONFIG_X86
29DC_LIBS += dcn10 dml 29DC_LIBS += dcn10 dml
30endif 30endif
31 31
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index b49ea96b5dae..a50a76471107 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -25,7 +25,7 @@
25# subcomponents. 25# subcomponents.
26 26
27BASICS = conversion.o fixpt31_32.o \ 27BASICS = conversion.o fixpt31_32.o \
28 logger.o log_helpers.o vector.o 28 log_helpers.o vector.o
29 29
30AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) 30AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
31 31
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index f6c00a51d51a..26583f346c39 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -28,77 +28,12 @@
28#include "include/logger_interface.h" 28#include "include/logger_interface.h"
29#include "dm_helpers.h" 29#include "dm_helpers.h"
30 30
31#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) 31void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
32
33struct dc_signal_type_info {
34 enum signal_type type;
35 char name[MAX_NAME_LEN];
36};
37
38static const struct dc_signal_type_info signal_type_info_tbl[] = {
39 {SIGNAL_TYPE_NONE, "NC"},
40 {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
41 {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
42 {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
43 {SIGNAL_TYPE_LVDS, "LVDS"},
44 {SIGNAL_TYPE_RGB, "VGA"},
45 {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
46 {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
47 {SIGNAL_TYPE_EDP, "eDP"},
48 {SIGNAL_TYPE_VIRTUAL, "Virtual"}
49};
50
51void dc_conn_log(struct dc_context *ctx,
52 const struct dc_link *link,
53 uint8_t *hex_data,
54 int hex_data_count,
55 enum dc_log_type event,
56 const char *msg,
57 ...)
58{ 32{
59 int i; 33 int i;
60 va_list args;
61 struct log_entry entry = { 0 };
62 enum signal_type signal;
63
64 if (link->local_sink)
65 signal = link->local_sink->sink_signal;
66 else
67 signal = link->connector_signal;
68
69 if (link->type == dc_connection_mst_branch)
70 signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
71
72 dm_logger_open(ctx->logger, &entry, event);
73
74 for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
75 if (signal == signal_type_info_tbl[i].type)
76 break;
77
78 if (i == NUM_ELEMENTS(signal_type_info_tbl))
79 goto fail;
80
81 dm_logger_append_heading(&entry);
82
83 dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
84 signal_type_info_tbl[i].name,
85 link->link_index);
86
87 va_start(args, msg);
88 dm_logger_append_va(&entry, msg, args);
89
90 if (entry.buf_offset > 0 &&
91 entry.buf[entry.buf_offset - 1] == '\n')
92 entry.buf_offset--;
93 34
94 if (hex_data) 35 if (hex_data)
95 for (i = 0; i < hex_data_count; i++) 36 for (i = 0; i < hex_data_count; i++)
96 dm_logger_append(&entry, "%2.2X ", hex_data[i]); 37 DC_LOG_DEBUG("%2.2X ", hex_data[i]);
97
98 dm_logger_append(&entry, "^\n");
99
100fail:
101 dm_logger_close(&entry);
102
103 va_end(args);
104} 38}
39
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
deleted file mode 100644
index a3c56cd8b396..000000000000
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ /dev/null
@@ -1,406 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "include/logger_interface.h"
27#include "logger.h"
28
29
30#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
31
32static const struct dc_log_type_info log_type_info_tbl[] = {
33 {LOG_ERROR, "Error"},
34 {LOG_WARNING, "Warning"},
35 {LOG_DEBUG, "Debug"},
36 {LOG_DC, "DC_Interface"},
37 {LOG_DTN, "DTN"},
38 {LOG_SURFACE, "Surface"},
39 {LOG_HW_HOTPLUG, "HW_Hotplug"},
40 {LOG_HW_LINK_TRAINING, "HW_LKTN"},
41 {LOG_HW_SET_MODE, "HW_Mode"},
42 {LOG_HW_RESUME_S3, "HW_Resume"},
43 {LOG_HW_AUDIO, "HW_Audio"},
44 {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
45 {LOG_MST, "MST"},
46 {LOG_SCALER, "Scaler"},
47 {LOG_BIOS, "BIOS"},
48 {LOG_BANDWIDTH_CALCS, "BWCalcs"},
49 {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
50 {LOG_I2C_AUX, "I2C_AUX"},
51 {LOG_SYNC, "Sync"},
52 {LOG_BACKLIGHT, "Backlight"},
53 {LOG_FEATURE_OVERRIDE, "Override"},
54 {LOG_DETECTION_EDID_PARSER, "Edid"},
55 {LOG_DETECTION_DP_CAPS, "DP_Caps"},
56 {LOG_RESOURCE, "Resource"},
57 {LOG_DML, "DML"},
58 {LOG_EVENT_MODE_SET, "Mode"},
59 {LOG_EVENT_DETECTION, "Detect"},
60 {LOG_EVENT_LINK_TRAINING, "LKTN"},
61 {LOG_EVENT_LINK_LOSS, "LinkLoss"},
62 {LOG_EVENT_UNDERFLOW, "Underflow"},
63 {LOG_IF_TRACE, "InterfaceTrace"},
64 {LOG_PERF_TRACE, "PerfTrace"},
65 {LOG_DISPLAYSTATS, "DisplayStats"}
66};
67
68
69/* ----------- Object init and destruction ----------- */
70static bool construct(struct dc_context *ctx, struct dal_logger *logger,
71 uint32_t log_mask)
72{
73 /* malloc buffer and init offsets */
74 logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
75 logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
76 GFP_KERNEL);
77 if (!logger->log_buffer)
78 return false;
79
80 /* Initialize both offsets to start of buffer (empty) */
81 logger->buffer_read_offset = 0;
82 logger->buffer_write_offset = 0;
83
84 logger->open_count = 0;
85
86 logger->flags.bits.ENABLE_CONSOLE = 1;
87 logger->flags.bits.ENABLE_BUFFER = 0;
88
89 logger->ctx = ctx;
90
91 logger->mask = log_mask;
92
93 return true;
94}
95
96static void destruct(struct dal_logger *logger)
97{
98 if (logger->log_buffer) {
99 kfree(logger->log_buffer);
100 logger->log_buffer = NULL;
101 }
102}
103
104struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
105{
106 /* malloc struct */
107 struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
108 GFP_KERNEL);
109
110 if (!logger)
111 return NULL;
112 if (!construct(ctx, logger, log_mask)) {
113 kfree(logger);
114 return NULL;
115 }
116
117 return logger;
118}
119
120uint32_t dal_logger_destroy(struct dal_logger **logger)
121{
122 if (logger == NULL || *logger == NULL)
123 return 1;
124 destruct(*logger);
125 kfree(*logger);
126 *logger = NULL;
127
128 return 0;
129}
130
131/* ------------------------------------------------------------------------ */
132void dm_logger_append_heading(struct log_entry *entry)
133{
134 int j;
135
136 for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
137
138 const struct dc_log_type_info *info = &log_type_info_tbl[j];
139
140 if (info->type == entry->type)
141 dm_logger_append(entry, "[%s]\t", info->name);
142 }
143}
144
145
146/* Print everything unread existing in log_buffer to debug console*/
147void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
148{
149 char *string_start = &logger->log_buffer[logger->buffer_read_offset];
150
151 if (should_warn)
152 dm_output_to_console(
153 "---------------- FLUSHING LOG BUFFER ----------------\n");
154 while (logger->buffer_read_offset < logger->buffer_write_offset) {
155
156 if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
157 dm_output_to_console("%s", string_start);
158 string_start = logger->log_buffer + logger->buffer_read_offset + 1;
159 }
160 logger->buffer_read_offset++;
161 }
162 if (should_warn)
163 dm_output_to_console(
164 "-------------- END FLUSHING LOG BUFFER --------------\n\n");
165}
166/* ------------------------------------------------------------------------ */
167
168/* Warning: Be careful that 'msg' is null terminated and the total size is
169 * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
170 */
171static bool dal_logger_should_log(
172 struct dal_logger *logger,
173 enum dc_log_type log_type)
174{
175 if (logger->mask & (1 << log_type))
176 return true;
177
178 return false;
179}
180
181static void log_to_debug_console(struct log_entry *entry)
182{
183 struct dal_logger *logger = entry->logger;
184
185 if (logger->flags.bits.ENABLE_CONSOLE == 0)
186 return;
187
188 if (entry->buf_offset) {
189 switch (entry->type) {
190 case LOG_ERROR:
191 dm_error("%s", entry->buf);
192 break;
193 default:
194 dm_output_to_console("%s", entry->buf);
195 break;
196 }
197 }
198}
199
200
201static void log_to_internal_buffer(struct log_entry *entry)
202{
203
204 uint32_t size = entry->buf_offset;
205 struct dal_logger *logger = entry->logger;
206
207 if (logger->flags.bits.ENABLE_BUFFER == 0)
208 return;
209
210 if (logger->log_buffer == NULL)
211 return;
212
213 if (size > 0 && size < logger->log_buffer_size) {
214
215 int buffer_space = logger->log_buffer_size -
216 logger->buffer_write_offset;
217
218 if (logger->buffer_write_offset == logger->buffer_read_offset) {
219 /* Buffer is empty, start writing at beginning */
220 buffer_space = logger->log_buffer_size;
221 logger->buffer_write_offset = 0;
222 logger->buffer_read_offset = 0;
223 }
224
225 if (buffer_space > size) {
226 /* No wrap around, copy 'size' bytes
227 * from 'entry->buf' to 'log_buffer'
228 */
229 memmove(logger->log_buffer +
230 logger->buffer_write_offset,
231 entry->buf, size);
232 logger->buffer_write_offset += size;
233
234 } else {
235 /* Not enough room remaining, we should flush
236 * existing logs */
237
238 /* Flush existing unread logs to console */
239 dm_logger_flush_buffer(logger, true);
240
241 /* Start writing to beginning of buffer */
242 memmove(logger->log_buffer, entry->buf, size);
243 logger->buffer_write_offset = size;
244 logger->buffer_read_offset = 0;
245 }
246
247 }
248}
249
250static void append_entry(
251 struct log_entry *entry,
252 char *buffer,
253 uint32_t buf_size)
254{
255 if (!entry->buf ||
256 entry->buf_offset + buf_size > entry->max_buf_bytes
257 ) {
258 BREAK_TO_DEBUGGER();
259 return;
260 }
261
262 /* Todo: check if off by 1 byte due to \0 anywhere */
263 memmove(entry->buf + entry->buf_offset, buffer, buf_size);
264 entry->buf_offset += buf_size;
265}
266
267
268void dm_logger_write(
269 struct dal_logger *logger,
270 enum dc_log_type log_type,
271 const char *msg,
272 ...)
273{
274 if (logger && dal_logger_should_log(logger, log_type)) {
275 uint32_t size;
276 va_list args;
277 char buffer[LOG_MAX_LINE_SIZE];
278 struct log_entry entry;
279
280 va_start(args, msg);
281
282 entry.logger = logger;
283
284 entry.buf = buffer;
285
286 entry.buf_offset = 0;
287 entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
288
289 entry.type = log_type;
290
291 dm_logger_append_heading(&entry);
292
293 size = dm_log_to_buffer(
294 buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
295
296 buffer[entry.buf_offset + size] = '\0';
297 entry.buf_offset += size + 1;
298
299 /* --Flush log_entry buffer-- */
300 /* print to kernel console */
301 log_to_debug_console(&entry);
302 /* log internally for dsat */
303 log_to_internal_buffer(&entry);
304
305 va_end(args);
306 }
307}
308
309/* Same as dm_logger_write, except without open() and close(), which must
310 * be done separately.
311 */
312void dm_logger_append(
313 struct log_entry *entry,
314 const char *msg,
315 ...)
316{
317 va_list args;
318
319 va_start(args, msg);
320 dm_logger_append_va(entry, msg, args);
321 va_end(args);
322}
323
324void dm_logger_append_va(
325 struct log_entry *entry,
326 const char *msg,
327 va_list args)
328{
329 struct dal_logger *logger;
330
331 if (!entry) {
332 BREAK_TO_DEBUGGER();
333 return;
334 }
335
336 logger = entry->logger;
337
338 if (logger && logger->open_count > 0 &&
339 dal_logger_should_log(logger, entry->type)) {
340
341 uint32_t size;
342 char buffer[LOG_MAX_LINE_SIZE];
343
344 size = dm_log_to_buffer(
345 buffer, LOG_MAX_LINE_SIZE, msg, args);
346
347 if (size < LOG_MAX_LINE_SIZE - 1) {
348 append_entry(entry, buffer, size);
349 } else {
350 append_entry(entry, "LOG_ERROR, line too long\n", 27);
351 }
352 }
353}
354
355void dm_logger_open(
356 struct dal_logger *logger,
357 struct log_entry *entry, /* out */
358 enum dc_log_type log_type)
359{
360 if (!entry) {
361 BREAK_TO_DEBUGGER();
362 return;
363 }
364
365 entry->type = log_type;
366 entry->logger = logger;
367
368 entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
369 GFP_KERNEL);
370
371 entry->buf_offset = 0;
372 entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
373
374 logger->open_count++;
375
376 dm_logger_append_heading(entry);
377}
378
379void dm_logger_close(struct log_entry *entry)
380{
381 struct dal_logger *logger = entry->logger;
382
383 if (logger && logger->open_count > 0) {
384 logger->open_count--;
385 } else {
386 BREAK_TO_DEBUGGER();
387 goto cleanup;
388 }
389
390 /* --Flush log_entry buffer-- */
391 /* print to kernel console */
392 log_to_debug_console(entry);
393 /* log internally for dsat */
394 log_to_internal_buffer(entry);
395
396 /* TODO: Write end heading */
397
398cleanup:
399 if (entry->buf) {
400 kfree(entry->buf);
401 entry->buf = NULL;
402 entry->buf_offset = 0;
403 entry->max_buf_bytes = 0;
404 }
405}
406
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index aeb56e402ccc..eab007e1793c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -678,7 +678,7 @@ static enum bp_result bios_parser_get_gpio_pin_info(
678 return BP_RESULT_BADBIOSTABLE; 678 return BP_RESULT_BADBIOSTABLE;
679 679
680 if (sizeof(struct atom_common_table_header) + 680 if (sizeof(struct atom_common_table_header) +
681 sizeof(struct atom_gpio_pin_lut_v2_1) 681 sizeof(struct atom_gpio_pin_assignment)
682 > le16_to_cpu(header->table_header.structuresize)) 682 > le16_to_cpu(header->table_header.structuresize))
683 return BP_RESULT_BADBIOSTABLE; 683 return BP_RESULT_BADBIOSTABLE;
684 684
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index bbbcef566c55..770ff89ba7e1 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
55 case DCE_VERSION_11_22: 55 case DCE_VERSION_11_22:
56 *h = dal_cmd_tbl_helper_dce112_get_table2(); 56 *h = dal_cmd_tbl_helper_dce112_get_table2();
57 return true; 57 return true;
58#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 58#ifdef CONFIG_X86
59 case DCN_VERSION_1_0: 59 case DCN_VERSION_1_0:
60 *h = dal_cmd_tbl_helper_dce112_get_table2(); 60 *h = dal_cmd_tbl_helper_dce112_get_table2();
61 return true; 61 return true;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 95f332ee3e7e..416500e51b8d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -38,7 +38,7 @@ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
38 38
39BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o 39BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
40 40
41ifdef CONFIG_DRM_AMD_DC_DCN1_0 41ifdef CONFIG_X86
42BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o 42BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
43endif 43endif
44 44
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
index fc3f98fb09ea..62435bfc274d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
+++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
@@ -25,10 +25,9 @@
25 25
26#ifndef _CALCS_CALCS_LOGGER_H_ 26#ifndef _CALCS_CALCS_LOGGER_H_
27#define _CALCS_CALCS_LOGGER_H_ 27#define _CALCS_CALCS_LOGGER_H_
28#define DC_LOGGER \ 28#define DC_LOGGER ctx->logger
29 logger
30 29
31static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip) 30static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip)
32{ 31{
33 32
34 DC_LOG_BANDWIDTH_CALCS("#####################################################################"); 33 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -122,7 +121,7 @@ static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calc
122 121
123} 122}
124 123
125static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios) 124static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios)
126{ 125{
127 126
128 DC_LOG_BANDWIDTH_CALCS("#####################################################################"); 127 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -181,7 +180,7 @@ static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calc
181 180
182} 181}
183 182
184static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data) 183static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data)
185{ 184{
186 185
187 int i, j, k; 186 int i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 2c4e8f0cb2dc..160d11a15eac 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3010,9 +3010,9 @@ bool bw_calcs(struct dc_context *ctx,
3010 struct bw_fixed low_yclk = vbios->low_yclk; 3010 struct bw_fixed low_yclk = vbios->low_yclk;
3011 3011
3012 if (ctx->dc->debug.bandwidth_calcs_trace) { 3012 if (ctx->dc->debug.bandwidth_calcs_trace) {
3013 print_bw_calcs_dceip(ctx->logger, dceip); 3013 print_bw_calcs_dceip(ctx, dceip);
3014 print_bw_calcs_vbios(ctx->logger, vbios); 3014 print_bw_calcs_vbios(ctx, vbios);
3015 print_bw_calcs_data(ctx->logger, data); 3015 print_bw_calcs_data(ctx, data);
3016 } 3016 }
3017 calculate_bandwidth(dceip, vbios, data); 3017 calculate_bandwidth(dceip, vbios, data);
3018 3018
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index e44b8d3d6891..080f777d705e 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -250,7 +250,24 @@ static void pipe_ctx_to_e2e_pipe_params (
250 else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state) 250 else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
251 input->src.is_hsplit = true; 251 input->src.is_hsplit = true;
252 252
253 input->src.dcc = pipe->plane_state->dcc.enable; 253 if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
254 /*
255 * this method requires us to always re-calculate watermark when dcc change
256 * between flip.
257 */
258 input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
259 } else {
260 /*
261 * allow us to disable dcc on the fly without re-calculating WM
262 *
263 * extra overhead for DCC is quite small. for 1080p WM without
264 * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
265 */
266 unsigned int bpe;
267
268 input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
269 dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
270 }
254 input->src.dcc_rate = 1; 271 input->src.dcc_rate = 1;
255 input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch; 272 input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
256 input->src.source_scan = dm_horz; 273 input->src.source_scan = dm_horz;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2a785bbf2b8f..733ac224e7fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -384,6 +384,71 @@ void dc_stream_set_static_screen_events(struct dc *dc,
384 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events); 384 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
385} 385}
386 386
387void dc_link_set_drive_settings(struct dc *dc,
388 struct link_training_settings *lt_settings,
389 const struct dc_link *link)
390{
391
392 int i;
393
394 for (i = 0; i < dc->link_count; i++) {
395 if (dc->links[i] == link)
396 break;
397 }
398
399 if (i >= dc->link_count)
400 ASSERT_CRITICAL(false);
401
402 dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
403}
404
405void dc_link_perform_link_training(struct dc *dc,
406 struct dc_link_settings *link_setting,
407 bool skip_video_pattern)
408{
409 int i;
410
411 for (i = 0; i < dc->link_count; i++)
412 dc_link_dp_perform_link_training(
413 dc->links[i],
414 link_setting,
415 skip_video_pattern);
416}
417
418void dc_link_set_preferred_link_settings(struct dc *dc,
419 struct dc_link_settings *link_setting,
420 struct dc_link *link)
421{
422 link->preferred_link_setting = *link_setting;
423 dp_retrain_link_dp_test(link, link_setting, false);
424}
425
426void dc_link_enable_hpd(const struct dc_link *link)
427{
428 dc_link_dp_enable_hpd(link);
429}
430
431void dc_link_disable_hpd(const struct dc_link *link)
432{
433 dc_link_dp_disable_hpd(link);
434}
435
436
437void dc_link_set_test_pattern(struct dc_link *link,
438 enum dp_test_pattern test_pattern,
439 const struct link_training_settings *p_link_settings,
440 const unsigned char *p_custom_pattern,
441 unsigned int cust_pattern_size)
442{
443 if (link != NULL)
444 dc_link_dp_set_test_pattern(
445 link,
446 test_pattern,
447 p_link_settings,
448 p_custom_pattern,
449 cust_pattern_size);
450}
451
387static void destruct(struct dc *dc) 452static void destruct(struct dc *dc)
388{ 453{
389 dc_release_state(dc->current_state); 454 dc_release_state(dc->current_state);
@@ -402,9 +467,6 @@ static void destruct(struct dc *dc)
402 if (dc->ctx->created_bios) 467 if (dc->ctx->created_bios)
403 dal_bios_parser_destroy(&dc->ctx->dc_bios); 468 dal_bios_parser_destroy(&dc->ctx->dc_bios);
404 469
405 if (dc->ctx->logger)
406 dal_logger_destroy(&dc->ctx->logger);
407
408 kfree(dc->ctx); 470 kfree(dc->ctx);
409 dc->ctx = NULL; 471 dc->ctx = NULL;
410 472
@@ -414,7 +476,7 @@ static void destruct(struct dc *dc)
414 kfree(dc->bw_dceip); 476 kfree(dc->bw_dceip);
415 dc->bw_dceip = NULL; 477 dc->bw_dceip = NULL;
416 478
417#ifdef CONFIG_DRM_AMD_DC_DCN1_0 479#ifdef CONFIG_X86
418 kfree(dc->dcn_soc); 480 kfree(dc->dcn_soc);
419 dc->dcn_soc = NULL; 481 dc->dcn_soc = NULL;
420 482
@@ -427,11 +489,10 @@ static void destruct(struct dc *dc)
427static bool construct(struct dc *dc, 489static bool construct(struct dc *dc,
428 const struct dc_init_data *init_params) 490 const struct dc_init_data *init_params)
429{ 491{
430 struct dal_logger *logger;
431 struct dc_context *dc_ctx; 492 struct dc_context *dc_ctx;
432 struct bw_calcs_dceip *dc_dceip; 493 struct bw_calcs_dceip *dc_dceip;
433 struct bw_calcs_vbios *dc_vbios; 494 struct bw_calcs_vbios *dc_vbios;
434#ifdef CONFIG_DRM_AMD_DC_DCN1_0 495#ifdef CONFIG_X86
435 struct dcn_soc_bounding_box *dcn_soc; 496 struct dcn_soc_bounding_box *dcn_soc;
436 struct dcn_ip_params *dcn_ip; 497 struct dcn_ip_params *dcn_ip;
437#endif 498#endif
@@ -453,7 +514,7 @@ static bool construct(struct dc *dc,
453 } 514 }
454 515
455 dc->bw_vbios = dc_vbios; 516 dc->bw_vbios = dc_vbios;
456#ifdef CONFIG_DRM_AMD_DC_DCN1_0 517#ifdef CONFIG_X86
457 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 518 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
458 if (!dcn_soc) { 519 if (!dcn_soc) {
459 dm_error("%s: failed to create dcn_soc\n", __func__); 520 dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -492,14 +553,7 @@ static bool construct(struct dc *dc,
492 } 553 }
493 554
494 /* Create logger */ 555 /* Create logger */
495 logger = dal_logger_create(dc_ctx, init_params->log_mask);
496 556
497 if (!logger) {
498 /* can *not* call logger. call base driver 'print error' */
499 dm_error("%s: failed to create Logger!\n", __func__);
500 goto fail;
501 }
502 dc_ctx->logger = logger;
503 dc_ctx->dce_environment = init_params->dce_environment; 557 dc_ctx->dce_environment = init_params->dce_environment;
504 558
505 dc_version = resource_parse_asic_id(init_params->asic_id); 559 dc_version = resource_parse_asic_id(init_params->asic_id);
@@ -918,9 +972,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
918 for (i = 0; i < context->stream_count; i++) { 972 for (i = 0; i < context->stream_count; i++) {
919 struct dc_stream_state *stream = context->streams[i]; 973 struct dc_stream_state *stream = context->streams[i];
920 974
921 dc_stream_log(stream, 975 dc_stream_log(dc, stream);
922 dc->ctx->logger,
923 LOG_DC);
924 } 976 }
925 977
926 result = dc_commit_state_no_check(dc, context); 978 result = dc_commit_state_no_check(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index e1ebdf7b5eaf..caece7c13bc6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -348,7 +348,7 @@ void context_clock_trace(
348 struct dc *dc, 348 struct dc *dc,
349 struct dc_state *context) 349 struct dc_state *context)
350{ 350{
351#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 351#ifdef CONFIG_X86
352 DC_LOGGER_INIT(dc->ctx->logger); 352 DC_LOGGER_INIT(dc->ctx->logger);
353 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 353 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
354 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", 354 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fa56c0fc02bf..a4429c90c60c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -313,7 +313,7 @@ static enum signal_type get_basic_signal_type(
313 * @brief 313 * @brief
314 * Check whether there is a dongle on DP connector 314 * Check whether there is a dongle on DP connector
315 */ 315 */
316static bool is_dp_sink_present(struct dc_link *link) 316bool dc_link_is_dp_sink_present(struct dc_link *link)
317{ 317{
318 enum gpio_result gpio_result; 318 enum gpio_result gpio_result;
319 uint32_t clock_pin = 0; 319 uint32_t clock_pin = 0;
@@ -406,7 +406,7 @@ static enum signal_type link_detect_sink(
406 * we assume signal is DVI; it could be corrected 406 * we assume signal is DVI; it could be corrected
407 * to HDMI after dongle detection 407 * to HDMI after dongle detection
408 */ 408 */
409 if (!is_dp_sink_present(link)) 409 if (!dm_helpers_is_dp_sink_present(link))
410 result = SIGNAL_TYPE_DVI_SINGLE_LINK; 410 result = SIGNAL_TYPE_DVI_SINGLE_LINK;
411 } 411 }
412 } 412 }
@@ -498,6 +498,10 @@ static bool detect_dp(
498 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; 498 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
499 link->type = dc_connection_mst_branch; 499 link->type = dc_connection_mst_branch;
500 500
501 dal_ddc_service_set_transaction_type(
502 link->ddc,
503 sink_caps->transaction_type);
504
501 /* 505 /*
502 * This call will initiate MST topology discovery. Which 506 * This call will initiate MST topology discovery. Which
503 * will detect MST ports and add new DRM connector DRM 507 * will detect MST ports and add new DRM connector DRM
@@ -525,6 +529,10 @@ static bool detect_dp(
525 if (reason == DETECT_REASON_BOOT) 529 if (reason == DETECT_REASON_BOOT)
526 boot = true; 530 boot = true;
527 531
532 dm_helpers_dp_update_branch_info(
533 link->ctx,
534 link);
535
528 if (!dm_helpers_dp_mst_start_top_mgr( 536 if (!dm_helpers_dp_mst_start_top_mgr(
529 link->ctx, 537 link->ctx,
530 link, boot)) { 538 link, boot)) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ae48d603ebd6..08c9d73b9ab7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,6 +33,10 @@
33#include "include/vector.h" 33#include "include/vector.h"
34#include "core_types.h" 34#include "core_types.h"
35#include "dc_link_ddc.h" 35#include "dc_link_ddc.h"
36#include "i2caux/engine.h"
37#include "i2caux/i2c_engine.h"
38#include "i2caux/aux_engine.h"
39#include "i2caux/i2caux.h"
36 40
37#define AUX_POWER_UP_WA_DELAY 500 41#define AUX_POWER_UP_WA_DELAY 500
38#define I2C_OVER_AUX_DEFER_WA_DELAY 70 42#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -629,83 +633,62 @@ bool dal_ddc_service_query_ddc_data(
629 return ret; 633 return ret;
630} 634}
631 635
632enum ddc_result dal_ddc_service_read_dpcd_data( 636int dc_link_aux_transfer(struct ddc_service *ddc,
633 struct ddc_service *ddc, 637 unsigned int address,
634 bool i2c, 638 uint8_t *reply,
635 enum i2c_mot_mode mot, 639 void *buffer,
636 uint32_t address, 640 unsigned int size,
637 uint8_t *data, 641 enum aux_transaction_type type,
638 uint32_t len, 642 enum i2caux_transaction_action action)
639 uint32_t *read)
640{ 643{
641 struct aux_payload read_payload = { 644 struct i2caux *i2caux = ddc->ctx->i2caux;
642 .i2c_over_aux = i2c, 645 struct ddc *ddc_pin = ddc->ddc_pin;
643 .write = false, 646 struct aux_engine *engine;
644 .address = address, 647 enum aux_channel_operation_result operation_result;
645 .length = len, 648 struct aux_request_transaction_data aux_req;
646 .data = data, 649 struct aux_reply_transaction_data aux_rep;
647 }; 650 uint8_t returned_bytes = 0;
648 struct aux_command command = { 651 int res = -1;
649 .payloads = &read_payload, 652 uint32_t status;
650 .number_of_payloads = 1,
651 .defer_delay = 0,
652 .max_defer_write_retry = 0,
653 .mot = mot
654 };
655
656 *read = 0;
657
658 if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
659 BREAK_TO_DEBUGGER();
660 return DDC_RESULT_FAILED_INVALID_OPERATION;
661 }
662 653
663 if (dal_i2caux_submit_aux_command( 654 memset(&aux_req, 0, sizeof(aux_req));
664 ddc->ctx->i2caux, 655 memset(&aux_rep, 0, sizeof(aux_rep));
665 ddc->ddc_pin,
666 &command)) {
667 *read = command.payloads->length;
668 return DDC_RESULT_SUCESSFULL;
669 }
670 656
671 return DDC_RESULT_FAILED_OPERATION; 657 engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
672}
673 658
674enum ddc_result dal_ddc_service_write_dpcd_data( 659 aux_req.type = type;
675 struct ddc_service *ddc, 660 aux_req.action = action;
676 bool i2c, 661
677 enum i2c_mot_mode mot, 662 aux_req.address = address;
678 uint32_t address, 663 aux_req.delay = 0;
679 const uint8_t *data, 664 aux_req.length = size;
680 uint32_t len) 665 aux_req.data = buffer;
681{
682 struct aux_payload write_payload = {
683 .i2c_over_aux = i2c,
684 .write = true,
685 .address = address,
686 .length = len,
687 .data = (uint8_t *)data,
688 };
689 struct aux_command command = {
690 .payloads = &write_payload,
691 .number_of_payloads = 1,
692 .defer_delay = 0,
693 .max_defer_write_retry = 0,
694 .mot = mot
695 };
696
697 if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
698 BREAK_TO_DEBUGGER();
699 return DDC_RESULT_FAILED_INVALID_OPERATION;
700 }
701 666
702 if (dal_i2caux_submit_aux_command( 667 engine->funcs->submit_channel_request(engine, &aux_req);
703 ddc->ctx->i2caux, 668 operation_result = engine->funcs->get_channel_status(engine, &returned_bytes);
704 ddc->ddc_pin, 669
705 &command)) 670 switch (operation_result) {
706 return DDC_RESULT_SUCESSFULL; 671 case AUX_CHANNEL_OPERATION_SUCCEEDED:
672 res = returned_bytes;
673
674 if (res <= size && res >= 0)
675 res = engine->funcs->read_channel_reply(engine, size,
676 buffer, reply,
677 &status);
678
679 break;
680 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
681 res = 0;
682 break;
683 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
684 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
685 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
686 res = -1;
687 break;
688 }
707 689
708 return DDC_RESULT_FAILED_OPERATION; 690 i2caux->funcs->release_engine(i2caux, &engine->base);
691 return res;
709} 692}
710 693
711/*test only function*/ 694/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 84586b679d73..474cd3e01752 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -39,7 +39,7 @@ static bool decide_fallback_link_setting(
39 struct dc_link_settings initial_link_settings, 39 struct dc_link_settings initial_link_settings,
40 struct dc_link_settings *current_link_setting, 40 struct dc_link_settings *current_link_setting,
41 enum link_training_result training_result); 41 enum link_training_result training_result);
42static struct dc_link_settings get_common_supported_link_settings ( 42static struct dc_link_settings get_common_supported_link_settings(
43 struct dc_link_settings link_setting_a, 43 struct dc_link_settings link_setting_a,
44 struct dc_link_settings link_setting_b); 44 struct dc_link_settings link_setting_b);
45 45
@@ -94,8 +94,8 @@ static void dpcd_set_link_settings(
94 uint8_t rate = (uint8_t) 94 uint8_t rate = (uint8_t)
95 (lt_settings->link_settings.link_rate); 95 (lt_settings->link_settings.link_rate);
96 96
97 union down_spread_ctrl downspread = {{0}}; 97 union down_spread_ctrl downspread = { {0} };
98 union lane_count_set lane_count_set = {{0}}; 98 union lane_count_set lane_count_set = { {0} };
99 uint8_t link_set_buffer[2]; 99 uint8_t link_set_buffer[2];
100 100
101 downspread.raw = (uint8_t) 101 downspread.raw = (uint8_t)
@@ -165,11 +165,11 @@ static void dpcd_set_lt_pattern_and_lane_settings(
165 const struct link_training_settings *lt_settings, 165 const struct link_training_settings *lt_settings,
166 enum hw_dp_training_pattern pattern) 166 enum hw_dp_training_pattern pattern)
167{ 167{
168 union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}}; 168 union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
169 const uint32_t dpcd_base_lt_offset = 169 const uint32_t dpcd_base_lt_offset =
170 DP_TRAINING_PATTERN_SET; 170 DP_TRAINING_PATTERN_SET;
171 uint8_t dpcd_lt_buffer[5] = {0}; 171 uint8_t dpcd_lt_buffer[5] = {0};
172 union dpcd_training_pattern dpcd_pattern = {{0}}; 172 union dpcd_training_pattern dpcd_pattern = { {0} };
173 uint32_t lane; 173 uint32_t lane;
174 uint32_t size_in_bytes; 174 uint32_t size_in_bytes;
175 bool edp_workaround = false; /* TODO link_prop.INTERNAL */ 175 bool edp_workaround = false; /* TODO link_prop.INTERNAL */
@@ -233,7 +233,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
233 link, 233 link,
234 DP_TRAINING_PATTERN_SET, 234 DP_TRAINING_PATTERN_SET,
235 &dpcd_pattern.raw, 235 &dpcd_pattern.raw,
236 sizeof(dpcd_pattern.raw) ); 236 sizeof(dpcd_pattern.raw));
237 237
238 core_link_write_dpcd( 238 core_link_write_dpcd(
239 link, 239 link,
@@ -247,7 +247,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
247 link, 247 link,
248 dpcd_base_lt_offset, 248 dpcd_base_lt_offset,
249 dpcd_lt_buffer, 249 dpcd_lt_buffer,
250 size_in_bytes + sizeof(dpcd_pattern.raw) ); 250 size_in_bytes + sizeof(dpcd_pattern.raw));
251 251
252 link->cur_lane_setting = lt_settings->lane_settings[0]; 252 link->cur_lane_setting = lt_settings->lane_settings[0];
253} 253}
@@ -429,8 +429,8 @@ static void get_lane_status_and_drive_settings(
429 struct link_training_settings *req_settings) 429 struct link_training_settings *req_settings)
430{ 430{
431 uint8_t dpcd_buf[6] = {0}; 431 uint8_t dpcd_buf[6] = {0};
432 union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}}; 432 union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
433 struct link_training_settings request_settings = {{0}}; 433 struct link_training_settings request_settings = { {0} };
434 uint32_t lane; 434 uint32_t lane;
435 435
436 memset(req_settings, '\0', sizeof(struct link_training_settings)); 436 memset(req_settings, '\0', sizeof(struct link_training_settings));
@@ -652,7 +652,7 @@ static bool perform_post_lt_adj_req_sequence(
652 652
653 if (req_drv_setting_changed) { 653 if (req_drv_setting_changed) {
654 update_drive_settings( 654 update_drive_settings(
655 lt_settings,req_settings); 655 lt_settings, req_settings);
656 656
657 dc_link_dp_set_drive_settings(link, 657 dc_link_dp_set_drive_settings(link,
658 lt_settings); 658 lt_settings);
@@ -725,8 +725,8 @@ static enum link_training_result perform_channel_equalization_sequence(
725 enum hw_dp_training_pattern hw_tr_pattern; 725 enum hw_dp_training_pattern hw_tr_pattern;
726 uint32_t retries_ch_eq; 726 uint32_t retries_ch_eq;
727 enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; 727 enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
728 union lane_align_status_updated dpcd_lane_status_updated = {{0}}; 728 union lane_align_status_updated dpcd_lane_status_updated = { {0} };
729 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}}; 729 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
730 730
731 hw_tr_pattern = get_supported_tp(link); 731 hw_tr_pattern = get_supported_tp(link);
732 732
@@ -1028,6 +1028,9 @@ enum link_training_result dc_link_dp_perform_link_training(
1028 lt_settings.lane_settings[0].VOLTAGE_SWING, 1028 lt_settings.lane_settings[0].VOLTAGE_SWING,
1029 lt_settings.lane_settings[0].PRE_EMPHASIS); 1029 lt_settings.lane_settings[0].PRE_EMPHASIS);
1030 1030
1031 if (status != LINK_TRAINING_SUCCESS)
1032 link->ctx->dc->debug.debug_data.ltFailCount++;
1033
1031 return status; 1034 return status;
1032} 1035}
1033 1036
@@ -1183,7 +1186,7 @@ bool dp_hbr_verify_link_cap(
1183 return success; 1186 return success;
1184} 1187}
1185 1188
1186static struct dc_link_settings get_common_supported_link_settings ( 1189static struct dc_link_settings get_common_supported_link_settings(
1187 struct dc_link_settings link_setting_a, 1190 struct dc_link_settings link_setting_a,
1188 struct dc_link_settings link_setting_b) 1191 struct dc_link_settings link_setting_b)
1189{ 1192{
@@ -1429,6 +1432,7 @@ static uint32_t bandwidth_in_kbps_from_link_settings(
1429 1432
1430 uint32_t lane_count = link_setting->lane_count; 1433 uint32_t lane_count = link_setting->lane_count;
1431 uint32_t kbps = link_rate_in_kbps; 1434 uint32_t kbps = link_rate_in_kbps;
1435
1432 kbps *= lane_count; 1436 kbps *= lane_count;
1433 kbps *= 8; /* 8 bits per byte*/ 1437 kbps *= 8; /* 8 bits per byte*/
1434 1438
@@ -1446,9 +1450,9 @@ bool dp_validate_mode_timing(
1446 const struct dc_link_settings *link_setting; 1450 const struct dc_link_settings *link_setting;
1447 1451
1448 /*always DP fail safe mode*/ 1452 /*always DP fail safe mode*/
1449 if (timing->pix_clk_khz == (uint32_t)25175 && 1453 if (timing->pix_clk_khz == (uint32_t) 25175 &&
1450 timing->h_addressable == (uint32_t)640 && 1454 timing->h_addressable == (uint32_t) 640 &&
1451 timing->v_addressable == (uint32_t)480) 1455 timing->v_addressable == (uint32_t) 480)
1452 return true; 1456 return true;
1453 1457
1454 /* We always use verified link settings */ 1458 /* We always use verified link settings */
@@ -1996,12 +2000,16 @@ static void handle_automated_test(struct dc_link *link)
1996 sizeof(test_response)); 2000 sizeof(test_response));
1997} 2001}
1998 2002
1999bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data) 2003bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
2000{ 2004{
2001 union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}}; 2005 union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
2002 union device_service_irq device_service_clear = { { 0 } }; 2006 union device_service_irq device_service_clear = { { 0 } };
2003 enum dc_status result; 2007 enum dc_status result;
2008
2004 bool status = false; 2009 bool status = false;
2010
2011 if (out_link_loss)
2012 *out_link_loss = false;
2005 /* For use cases related to down stream connection status change, 2013 /* For use cases related to down stream connection status change,
2006 * PSR and device auto test, refer to function handle_sst_hpd_irq 2014 * PSR and device auto test, refer to function handle_sst_hpd_irq
2007 * in DAL2.1*/ 2015 * in DAL2.1*/
@@ -2076,6 +2084,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
2076 true, LINK_TRAINING_ATTEMPTS); 2084 true, LINK_TRAINING_ATTEMPTS);
2077 2085
2078 status = false; 2086 status = false;
2087 if (out_link_loss)
2088 *out_link_loss = true;
2079 } 2089 }
2080 2090
2081 if (link->type == dc_connection_active_dongle && 2091 if (link->type == dc_connection_active_dongle &&
@@ -2262,6 +2272,11 @@ static void get_active_converter_info(
2262 2272
2263 link->dpcd_caps.branch_hw_revision = 2273 link->dpcd_caps.branch_hw_revision =
2264 dp_hw_fw_revision.ieee_hw_rev; 2274 dp_hw_fw_revision.ieee_hw_rev;
2275
2276 memmove(
2277 link->dpcd_caps.branch_fw_revision,
2278 dp_hw_fw_revision.ieee_fw_rev,
2279 sizeof(dp_hw_fw_revision.ieee_fw_rev));
2265 } 2280 }
2266} 2281}
2267 2282
@@ -2317,6 +2332,7 @@ static bool retrieve_link_cap(struct dc_link *link)
2317 enum dc_status status = DC_ERROR_UNEXPECTED; 2332 enum dc_status status = DC_ERROR_UNEXPECTED;
2318 uint32_t read_dpcd_retry_cnt = 3; 2333 uint32_t read_dpcd_retry_cnt = 3;
2319 int i; 2334 int i;
2335 struct dp_sink_hw_fw_revision dp_hw_fw_revision;
2320 2336
2321 memset(dpcd_data, '\0', sizeof(dpcd_data)); 2337 memset(dpcd_data, '\0', sizeof(dpcd_data));
2322 memset(&down_strm_port_count, 2338 memset(&down_strm_port_count,
@@ -2408,6 +2424,25 @@ static bool retrieve_link_cap(struct dc_link *link)
2408 (sink_id.ieee_oui[1] << 8) + 2424 (sink_id.ieee_oui[1] << 8) +
2409 (sink_id.ieee_oui[2]); 2425 (sink_id.ieee_oui[2]);
2410 2426
2427 memmove(
2428 link->dpcd_caps.sink_dev_id_str,
2429 sink_id.ieee_device_id,
2430 sizeof(sink_id.ieee_device_id));
2431
2432 core_link_read_dpcd(
2433 link,
2434 DP_SINK_HW_REVISION_START,
2435 (uint8_t *)&dp_hw_fw_revision,
2436 sizeof(dp_hw_fw_revision));
2437
2438 link->dpcd_caps.sink_hw_revision =
2439 dp_hw_fw_revision.ieee_hw_rev;
2440
2441 memmove(
2442 link->dpcd_caps.sink_fw_revision,
2443 dp_hw_fw_revision.ieee_fw_rev,
2444 sizeof(dp_hw_fw_revision.ieee_fw_rev));
2445
2411 /* Connectivity log: detection */ 2446 /* Connectivity log: detection */
2412 CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); 2447 CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
2413 2448
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c5fc5250e2bf..2e65715f76a1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -41,7 +41,7 @@
41#include "dce100/dce100_resource.h" 41#include "dce100/dce100_resource.h"
42#include "dce110/dce110_resource.h" 42#include "dce110/dce110_resource.h"
43#include "dce112/dce112_resource.h" 43#include "dce112/dce112_resource.h"
44#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 44#ifdef CONFIG_X86
45#include "dcn10/dcn10_resource.h" 45#include "dcn10/dcn10_resource.h"
46#endif 46#endif
47#include "dce120/dce120_resource.h" 47#include "dce120/dce120_resource.h"
@@ -85,7 +85,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
85 case FAMILY_AI: 85 case FAMILY_AI:
86 dc_version = DCE_VERSION_12_0; 86 dc_version = DCE_VERSION_12_0;
87 break; 87 break;
88#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 88#ifdef CONFIG_X86
89 case FAMILY_RV: 89 case FAMILY_RV:
90 dc_version = DCN_VERSION_1_0; 90 dc_version = DCN_VERSION_1_0;
91 break; 91 break;
@@ -136,7 +136,7 @@ struct resource_pool *dc_create_resource_pool(
136 num_virtual_links, dc); 136 num_virtual_links, dc);
137 break; 137 break;
138 138
139#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 139#ifdef CONFIG_X86
140 case DCN_VERSION_1_0: 140 case DCN_VERSION_1_0:
141 res_pool = dcn10_create_resource_pool( 141 res_pool = dcn10_create_resource_pool(
142 num_virtual_links, dc); 142 num_virtual_links, dc);
@@ -1213,7 +1213,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
1213 1213
1214} 1214}
1215 1215
1216#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1216#ifdef CONFIG_X86
1217static int acquire_first_split_pipe( 1217static int acquire_first_split_pipe(
1218 struct resource_context *res_ctx, 1218 struct resource_context *res_ctx,
1219 const struct resource_pool *pool, 1219 const struct resource_pool *pool,
@@ -1284,7 +1284,7 @@ bool dc_add_plane_to_context(
1284 1284
1285 free_pipe = acquire_free_pipe_for_stream(context, pool, stream); 1285 free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
1286 1286
1287#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1287#ifdef CONFIG_X86
1288 if (!free_pipe) { 1288 if (!free_pipe) {
1289 int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); 1289 int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
1290 if (pipe_idx >= 0) 1290 if (pipe_idx >= 0)
@@ -1705,8 +1705,8 @@ enum dc_status dc_add_stream_to_ctx(
1705 struct dc_context *dc_ctx = dc->ctx; 1705 struct dc_context *dc_ctx = dc->ctx;
1706 enum dc_status res; 1706 enum dc_status res;
1707 1707
1708 if (new_ctx->stream_count >= dc->res_pool->pipe_count) { 1708 if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
1709 DC_ERROR("Max streams reached, can add stream %p !\n", stream); 1709 DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
1710 return DC_ERROR_UNEXPECTED; 1710 return DC_ERROR_UNEXPECTED;
1711 } 1711 }
1712 1712
@@ -1882,7 +1882,7 @@ enum dc_status resource_map_pool_resources(
1882 /* acquire new resources */ 1882 /* acquire new resources */
1883 pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); 1883 pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
1884 1884
1885#ifdef CONFIG_DRM_AMD_DC_DCN1_0 1885#ifdef CONFIG_X86
1886 if (pipe_idx < 0) 1886 if (pipe_idx < 0)
1887 pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); 1887 pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
1888#endif 1888#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 3732a1de9d6c..fdcc8ab19bf3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,8 @@
30#include "ipp.h" 30#include "ipp.h"
31#include "timing_generator.h" 31#include "timing_generator.h"
32 32
33#define DC_LOGGER dc->ctx->logger
34
33/******************************************************************************* 35/*******************************************************************************
34 * Private functions 36 * Private functions
35 ******************************************************************************/ 37 ******************************************************************************/
@@ -212,6 +214,8 @@ bool dc_stream_set_cursor_attributes(
212 } 214 }
213 215
214 core_dc->hwss.set_cursor_attribute(pipe_ctx); 216 core_dc->hwss.set_cursor_attribute(pipe_ctx);
217 if (core_dc->hwss.set_cursor_sdr_white_level)
218 core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
215 } 219 }
216 220
217 if (pipe_to_program) 221 if (pipe_to_program)
@@ -317,16 +321,10 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
317 return ret; 321 return ret;
318} 322}
319 323
320 324void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
321void dc_stream_log(
322 const struct dc_stream_state *stream,
323 struct dal_logger *dm_logger,
324 enum dc_log_type log_type)
325{ 325{
326 326 DC_LOG_DC(
327 dm_logger_write(dm_logger, 327 "core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
328 log_type,
329 "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
330 stream, 328 stream,
331 stream->src.x, 329 stream->src.x,
332 stream->src.y, 330 stream->src.y,
@@ -337,21 +335,18 @@ void dc_stream_log(
337 stream->dst.width, 335 stream->dst.width,
338 stream->dst.height, 336 stream->dst.height,
339 stream->output_color_space); 337 stream->output_color_space);
340 dm_logger_write(dm_logger, 338 DC_LOG_DC(
341 log_type,
342 "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n", 339 "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
343 stream->timing.pix_clk_khz, 340 stream->timing.pix_clk_khz,
344 stream->timing.h_total, 341 stream->timing.h_total,
345 stream->timing.v_total, 342 stream->timing.v_total,
346 stream->timing.pixel_encoding, 343 stream->timing.pixel_encoding,
347 stream->timing.display_color_depth); 344 stream->timing.display_color_depth);
348 dm_logger_write(dm_logger, 345 DC_LOG_DC(
349 log_type,
350 "\tsink name: %s, serial: %d\n", 346 "\tsink name: %s, serial: %d\n",
351 stream->sink->edid_caps.display_name, 347 stream->sink->edid_caps.display_name,
352 stream->sink->edid_caps.serial_number); 348 stream->sink->edid_caps.serial_number);
353 dm_logger_write(dm_logger, 349 DC_LOG_DC(
354 log_type,
355 "\tlink: %d\n", 350 "\tlink: %d\n",
356 stream->sink->link->link_index); 351 stream->sink->link->link_index);
357} 352}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0cb7e10d2505..ceb4c3725893 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.52" 41#define DC_VER "3.1.56"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
@@ -169,6 +169,12 @@ struct dc_config {
169 bool disable_disp_pll_sharing; 169 bool disable_disp_pll_sharing;
170}; 170};
171 171
172enum visual_confirm {
173 VISUAL_CONFIRM_DISABLE = 0,
174 VISUAL_CONFIRM_SURFACE = 1,
175 VISUAL_CONFIRM_HDR = 2,
176};
177
172enum dcc_option { 178enum dcc_option {
173 DCC_ENABLE = 0, 179 DCC_ENABLE = 0,
174 DCC_DISABLE = 1, 180 DCC_DISABLE = 1,
@@ -202,7 +208,7 @@ struct dc_clocks {
202}; 208};
203 209
204struct dc_debug { 210struct dc_debug {
205 bool surface_visual_confirm; 211 enum visual_confirm visual_confirm;
206 bool sanity_checks; 212 bool sanity_checks;
207 bool max_disp_clk; 213 bool max_disp_clk;
208 bool surface_trace; 214 bool surface_trace;
@@ -249,7 +255,15 @@ struct dc_debug {
249 bool always_use_regamma; 255 bool always_use_regamma;
250 bool p010_mpo_support; 256 bool p010_mpo_support;
251 bool recovery_enabled; 257 bool recovery_enabled;
258 bool avoid_vbios_exec_table;
259 bool scl_reset_length10;
260 bool hdmi20_disable;
252 261
262 struct {
263 uint32_t ltFailCount;
264 uint32_t i2cErrorCount;
265 uint32_t auxErrorCount;
266 } debug_data;
253}; 267};
254struct dc_state; 268struct dc_state;
255struct resource_pool; 269struct resource_pool;
@@ -275,7 +289,7 @@ struct dc {
275 /* Inputs into BW and WM calculations. */ 289 /* Inputs into BW and WM calculations. */
276 struct bw_calcs_dceip *bw_dceip; 290 struct bw_calcs_dceip *bw_dceip;
277 struct bw_calcs_vbios *bw_vbios; 291 struct bw_calcs_vbios *bw_vbios;
278#ifdef CONFIG_DRM_AMD_DC_DCN1_0 292#ifdef CONFIG_X86
279 struct dcn_soc_bounding_box *dcn_soc; 293 struct dcn_soc_bounding_box *dcn_soc;
280 struct dcn_ip_params *dcn_ip; 294 struct dcn_ip_params *dcn_ip;
281 struct display_mode_lib dml; 295 struct display_mode_lib dml;
@@ -384,7 +398,8 @@ enum dc_transfer_func_predefined {
384 TRANSFER_FUNCTION_LINEAR, 398 TRANSFER_FUNCTION_LINEAR,
385 TRANSFER_FUNCTION_UNITY, 399 TRANSFER_FUNCTION_UNITY,
386 TRANSFER_FUNCTION_HLG, 400 TRANSFER_FUNCTION_HLG,
387 TRANSFER_FUNCTION_HLG12 401 TRANSFER_FUNCTION_HLG12,
402 TRANSFER_FUNCTION_GAMMA22
388}; 403};
389 404
390struct dc_transfer_func { 405struct dc_transfer_func {
@@ -627,9 +642,14 @@ struct dpcd_caps {
627 struct dc_dongle_caps dongle_caps; 642 struct dc_dongle_caps dongle_caps;
628 643
629 uint32_t sink_dev_id; 644 uint32_t sink_dev_id;
645 int8_t sink_dev_id_str[6];
646 int8_t sink_hw_revision;
647 int8_t sink_fw_revision[2];
648
630 uint32_t branch_dev_id; 649 uint32_t branch_dev_id;
631 int8_t branch_dev_name[6]; 650 int8_t branch_dev_name[6];
632 int8_t branch_hw_revision; 651 int8_t branch_hw_revision;
652 int8_t branch_fw_revision[2];
633 653
634 bool allow_invalid_MSA_timing_param; 654 bool allow_invalid_MSA_timing_param;
635 bool panel_mode_edp; 655 bool panel_mode_edp;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index bd0fda0ceb91..e68077e65565 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -255,3 +255,54 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
255 255
256 return reg_val; 256 return reg_val;
257} 257}
258
259void generic_write_indirect_reg(const struct dc_context *ctx,
260 uint32_t addr_index, uint32_t addr_data,
261 uint32_t index, uint32_t data)
262{
263 dm_write_reg(ctx, addr_index, index);
264 dm_write_reg(ctx, addr_data, data);
265}
266
267uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
268 uint32_t addr_index, uint32_t addr_data,
269 uint32_t index)
270{
271 uint32_t value = 0;
272
273 dm_write_reg(ctx, addr_index, index);
274 value = dm_read_reg(ctx, addr_data);
275
276 return value;
277}
278
279
280uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
281 uint32_t addr_index, uint32_t addr_data,
282 uint32_t index, uint32_t reg_val, int n,
283 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
284 ...)
285{
286 uint32_t shift, mask, field_value;
287 int i = 1;
288
289 va_list ap;
290
291 va_start(ap, field_value1);
292
293 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
294
295 while (i < n) {
296 shift = va_arg(ap, uint32_t);
297 mask = va_arg(ap, uint32_t);
298 field_value = va_arg(ap, uint32_t);
299
300 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
301 i++;
302 }
303
304 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
305 va_end(ap);
306
307 return reg_val;
308}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 14afbc5c0a62..9cfd7ea845e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -404,9 +404,11 @@ struct dc_cursor_position {
404struct dc_cursor_mi_param { 404struct dc_cursor_mi_param {
405 unsigned int pixel_clk_khz; 405 unsigned int pixel_clk_khz;
406 unsigned int ref_clk_khz; 406 unsigned int ref_clk_khz;
407 unsigned int viewport_x_start; 407 struct rect viewport;
408 unsigned int viewport_width;
409 struct fixed31_32 h_scale_ratio; 408 struct fixed31_32 h_scale_ratio;
409 struct fixed31_32 v_scale_ratio;
410 enum dc_rotation_angle rotation;
411 bool mirror;
410}; 412};
411 413
412/* IPP related types */ 414/* IPP related types */
@@ -490,6 +492,7 @@ struct dc_cursor_attributes {
490 uint32_t height; 492 uint32_t height;
491 493
492 enum dc_cursor_color_format color_format; 494 enum dc_cursor_color_format color_format;
495 uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
493 496
494 /* In case we support HW Cursor rotation in the future */ 497 /* In case we support HW Cursor rotation in the future */
495 enum dc_rotation_angle rotation_angle; 498 enum dc_rotation_angle rotation_angle;
@@ -497,6 +500,11 @@ struct dc_cursor_attributes {
497 union dc_cursor_attribute_flags attribute_flags; 500 union dc_cursor_attribute_flags attribute_flags;
498}; 501};
499 502
503struct dpp_cursor_attributes {
504 int bias;
505 int scale;
506};
507
500/* OPP */ 508/* OPP */
501 509
502enum dc_color_space { 510enum dc_color_space {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8a716baa1203..070a56926308 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -172,7 +172,7 @@ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
172 * false - no change in Downstream port status. No further action required 172 * false - no change in Downstream port status. No further action required
173 * from DM. */ 173 * from DM. */
174bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, 174bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
175 union hpd_irq_data *hpd_irq_dpcd_data); 175 union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
176 176
177struct dc_sink_init_data; 177struct dc_sink_init_data;
178 178
@@ -210,10 +210,29 @@ bool dc_link_dp_set_test_pattern(
210 210
211void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); 211void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
212 212
213bool dc_link_is_dp_sink_present(struct dc_link *link);
214
213/* 215/*
214 * DPCD access interfaces 216 * DPCD access interfaces
215 */ 217 */
216 218
219void dc_link_set_drive_settings(struct dc *dc,
220 struct link_training_settings *lt_settings,
221 const struct dc_link *link);
222void dc_link_perform_link_training(struct dc *dc,
223 struct dc_link_settings *link_setting,
224 bool skip_video_pattern);
225void dc_link_set_preferred_link_settings(struct dc *dc,
226 struct dc_link_settings *link_setting,
227 struct dc_link *link);
228void dc_link_enable_hpd(const struct dc_link *link);
229void dc_link_disable_hpd(const struct dc_link *link);
230void dc_link_set_test_pattern(struct dc_link *link,
231 enum dp_test_pattern test_pattern,
232 const struct link_training_settings *p_link_settings,
233 const unsigned char *p_custom_pattern,
234 unsigned int cust_pattern_size);
235
217bool dc_submit_i2c( 236bool dc_submit_i2c(
218 struct dc *dc, 237 struct dc *dc,
219 uint32_t link_index, 238 uint32_t link_index,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index af503e0286a7..cbfe418006cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -100,6 +100,7 @@ struct dc_stream_state {
100 100
101 struct dc_cursor_attributes cursor_attributes; 101 struct dc_cursor_attributes cursor_attributes;
102 struct dc_cursor_position cursor_position; 102 struct dc_cursor_position cursor_position;
103 uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
103 104
104 /* from stream struct */ 105 /* from stream struct */
105 struct kref refcount; 106 struct kref refcount;
@@ -147,10 +148,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
147/* 148/*
148 * Log the current stream state. 149 * Log the current stream state.
149 */ 150 */
150void dc_stream_log( 151void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream);
151 const struct dc_stream_state *stream,
152 struct dal_logger *dc_logger,
153 enum dc_log_type log_type);
154 152
155uint8_t dc_get_current_stream_count(struct dc *dc); 153uint8_t dc_get_current_stream_count(struct dc *dc);
156struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i); 154struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
@@ -258,6 +256,7 @@ bool dc_stream_set_cursor_position(
258 struct dc_stream_state *stream, 256 struct dc_stream_state *stream,
259 const struct dc_cursor_position *position); 257 const struct dc_cursor_position *position);
260 258
259
261bool dc_stream_adjust_vmin_vmax(struct dc *dc, 260bool dc_stream_adjust_vmin_vmax(struct dc *dc,
262 struct dc_stream_state **stream, 261 struct dc_stream_state **stream,
263 int num_streams, 262 int num_streams,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index c96e526d07bb..8c6eb78b0c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -77,8 +77,6 @@ struct dc_context {
77 struct dc *dc; 77 struct dc *dc;
78 78
79 void *driver_context; /* e.g. amdgpu_device */ 79 void *driver_context; /* e.g. amdgpu_device */
80
81 struct dal_logger *logger;
82 void *cgs_device; 80 void *cgs_device;
83 81
84 enum dce_environment dce_environment; 82 enum dce_environment dce_environment;
@@ -194,6 +192,7 @@ union display_content_support {
194 192
195struct dc_panel_patch { 193struct dc_panel_patch {
196 unsigned int dppowerup_delay; 194 unsigned int dppowerup_delay;
195 unsigned int extra_t12_ms;
197}; 196};
198 197
199struct dc_edid_caps { 198struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index ca137757a69e..439dcf3b596c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -592,7 +592,7 @@ static uint32_t dce110_get_pix_clk_dividers(
592 case DCE_VERSION_11_2: 592 case DCE_VERSION_11_2:
593 case DCE_VERSION_11_22: 593 case DCE_VERSION_11_22:
594 case DCE_VERSION_12_0: 594 case DCE_VERSION_12_0:
595#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 595#ifdef CONFIG_X86
596 case DCN_VERSION_1_0: 596 case DCN_VERSION_1_0:
597#endif 597#endif
598 598
@@ -909,7 +909,7 @@ static bool dce110_program_pix_clk(
909 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); 909 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
910 struct bp_pixel_clock_parameters bp_pc_params = {0}; 910 struct bp_pixel_clock_parameters bp_pc_params = {0};
911 911
912#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 912#ifdef CONFIG_X86
913 if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { 913 if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
914 unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; 914 unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
915 unsigned dp_dto_ref_kHz = 700000; 915 unsigned dp_dto_ref_kHz = 700000;
@@ -982,7 +982,7 @@ static bool dce110_program_pix_clk(
982 case DCE_VERSION_11_2: 982 case DCE_VERSION_11_2:
983 case DCE_VERSION_11_22: 983 case DCE_VERSION_11_22:
984 case DCE_VERSION_12_0: 984 case DCE_VERSION_12_0:
985#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 985#ifdef CONFIG_X86
986 case DCN_VERSION_1_0: 986 case DCN_VERSION_1_0:
987#endif 987#endif
988 988
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index c45e2f76189e..801bb65707b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -55,7 +55,7 @@
55 CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ 55 CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
56 CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh) 56 CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
57 57
58#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 58#ifdef CONFIG_X86
59 59
60#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \ 60#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
61 SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ 61 SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 6882dc953a2c..8f8a2abac3f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -30,7 +30,7 @@
30#include "bios_parser_interface.h" 30#include "bios_parser_interface.h"
31#include "dc.h" 31#include "dc.h"
32#include "dmcu.h" 32#include "dmcu.h"
33#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 33#ifdef CONFIG_X86
34#include "dcn_calcs.h" 34#include "dcn_calcs.h"
35#endif 35#endif
36#include "core_types.h" 36#include "core_types.h"
@@ -478,7 +478,7 @@ static void dce12_update_clocks(struct dccg *dccg,
478 } 478 }
479} 479}
480 480
481#ifdef CONFIG_DRM_AMD_DC_DCN1_0 481#ifdef CONFIG_X86
482static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks) 482static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
483{ 483{
484 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; 484 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
@@ -666,7 +666,7 @@ static void dce_update_clocks(struct dccg *dccg,
666 } 666 }
667} 667}
668 668
669#ifdef CONFIG_DRM_AMD_DC_DCN1_0 669#ifdef CONFIG_X86
670static const struct display_clock_funcs dcn1_funcs = { 670static const struct display_clock_funcs dcn1_funcs = {
671 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, 671 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
672 .set_dispclk = dce112_set_clock, 672 .set_dispclk = dce112_set_clock,
@@ -821,7 +821,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
821 return &clk_dce->base; 821 return &clk_dce->base;
822} 822}
823 823
824#ifdef CONFIG_DRM_AMD_DC_DCN1_0 824#ifdef CONFIG_X86
825struct dccg *dcn1_dccg_create(struct dc_context *ctx) 825struct dccg *dcn1_dccg_create(struct dc_context *ctx)
826{ 826{
827 struct dc_debug *debug = &ctx->dc->debug; 827 struct dc_debug *debug = &ctx->dc->debug;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
index 7ce0a54e548f..e5e44adc6c27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -44,18 +44,14 @@
44 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh) 44 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
45 45
46#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \ 46#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
47 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
48 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\ 47 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
49 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\ 48 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
50 CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh)
51 49
52#define CLK_REG_FIELD_LIST(type) \ 50#define CLK_REG_FIELD_LIST(type) \
53 type DPREFCLK_SRC_SEL; \ 51 type DPREFCLK_SRC_SEL; \
54 type DENTIST_DPREFCLK_WDIVIDER; \ 52 type DENTIST_DPREFCLK_WDIVIDER; \
55 type DENTIST_DISPCLK_WDIVIDER; \ 53 type DENTIST_DISPCLK_WDIVIDER; \
56 type DENTIST_DPPCLK_WDIVIDER; \ 54 type DENTIST_DISPCLK_CHG_DONE;
57 type DENTIST_DISPCLK_CHG_DONE; \
58 type DENTIST_DPPCLK_CHG_DONE;
59 55
60struct dccg_shift { 56struct dccg_shift {
61 CLK_REG_FIELD_LIST(uint8_t) 57 CLK_REG_FIELD_LIST(uint8_t)
@@ -115,7 +111,7 @@ struct dccg *dce112_dccg_create(
115 111
116struct dccg *dce120_dccg_create(struct dc_context *ctx); 112struct dccg *dce120_dccg_create(struct dc_context *ctx);
117 113
118#ifdef CONFIG_DRM_AMD_DC_DCN1_0 114#ifdef CONFIG_X86
119struct dccg *dcn1_dccg_create(struct dc_context *ctx); 115struct dccg *dcn1_dccg_create(struct dc_context *ctx);
120#endif 116#endif
121 117
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a576b8bbb3cd..062a46543887 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -314,7 +314,7 @@ static void dce_get_psr_wait_loop(
314 return; 314 return;
315} 315}
316 316
317#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 317#ifdef CONFIG_X86
318static void dcn10_get_dmcu_state(struct dmcu *dmcu) 318static void dcn10_get_dmcu_state(struct dmcu *dmcu)
319{ 319{
320 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); 320 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -735,7 +735,7 @@ static const struct dmcu_funcs dce_funcs = {
735 .is_dmcu_initialized = dce_is_dmcu_initialized 735 .is_dmcu_initialized = dce_is_dmcu_initialized
736}; 736};
737 737
738#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 738#ifdef CONFIG_X86
739static const struct dmcu_funcs dcn10_funcs = { 739static const struct dmcu_funcs dcn10_funcs = {
740 .dmcu_init = dcn10_dmcu_init, 740 .dmcu_init = dcn10_dmcu_init,
741 .load_iram = dcn10_dmcu_load_iram, 741 .load_iram = dcn10_dmcu_load_iram,
@@ -787,7 +787,7 @@ struct dmcu *dce_dmcu_create(
787 return &dmcu_dce->base; 787 return &dmcu_dce->base;
788} 788}
789 789
790#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 790#ifdef CONFIG_X86
791struct dmcu *dcn10_dmcu_create( 791struct dmcu *dcn10_dmcu_create(
792 struct dc_context *ctx, 792 struct dc_context *ctx,
793 const struct dce_dmcu_registers *regs, 793 const struct dce_dmcu_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 057407892618..64dc75378541 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -147,6 +147,7 @@
147 SR(DCCG_GATE_DISABLE_CNTL2), \ 147 SR(DCCG_GATE_DISABLE_CNTL2), \
148 SR(DCFCLK_CNTL),\ 148 SR(DCFCLK_CNTL),\
149 SR(DCFCLK_CNTL), \ 149 SR(DCFCLK_CNTL), \
150 SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
150 /* todo: get these from GVM instead of reading registers ourselves */\ 151 /* todo: get these from GVM instead of reading registers ourselves */\
151 MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\ 152 MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
152 MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\ 153 MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
@@ -275,6 +276,8 @@ struct dce_hwseq_registers {
275 uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB; 276 uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
276 uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR; 277 uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
277 uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR; 278 uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
279 uint32_t AZALIA_AUDIO_DTO;
280 uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
278}; 281};
279 /* set field name */ 282 /* set field name */
280#define HWS_SF(blk_name, reg_name, field_name, post_fix)\ 283#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -361,7 +364,8 @@ struct dce_hwseq_registers {
361 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\ 364 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
362 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \ 365 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
363 HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ 366 HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
364 HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh) 367 HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh), \
368 HWS_SF(, DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
365 369
366#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\ 370#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
367 HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ 371 HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -500,7 +504,8 @@ struct dce_hwseq_registers {
500 type D1VGA_MODE_ENABLE; \ 504 type D1VGA_MODE_ENABLE; \
501 type D2VGA_MODE_ENABLE; \ 505 type D2VGA_MODE_ENABLE; \
502 type D3VGA_MODE_ENABLE; \ 506 type D3VGA_MODE_ENABLE; \
503 type D4VGA_MODE_ENABLE; 507 type D4VGA_MODE_ENABLE; \
508 type AZALIA_AUDIO_DTO_MODULE;
504 509
505struct dce_hwseq_shift { 510struct dce_hwseq_shift {
506 HWSEQ_REG_FIELD_LIST(uint8_t) 511 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index dbe3b26b6d9e..60e3c6a73d37 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -646,6 +646,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
646 if (!enc110->base.features.flags.bits.HDMI_6GB_EN && 646 if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
647 adjusted_pix_clk_khz >= 300000) 647 adjusted_pix_clk_khz >= 300000)
648 return false; 648 return false;
649 if (enc110->base.ctx->dc->debug.hdmi20_disable &&
650 crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
651 return false;
649 return true; 652 return true;
650} 653}
651 654
@@ -773,6 +776,9 @@ void dce110_link_encoder_construct(
773 __func__, 776 __func__,
774 result); 777 result);
775 } 778 }
779 if (enc110->base.ctx->dc->debug.hdmi20_disable) {
780 enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
781 }
776} 782}
777 783
778bool dce110_link_encoder_validate_output_with_stream( 784bool dce110_link_encoder_validate_output_with_stream(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index b235a75355b8..85686d917636 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -729,7 +729,7 @@ static bool dce_mi_program_surface_flip_and_addr(
729 return true; 729 return true;
730} 730}
731 731
732static struct mem_input_funcs dce_mi_funcs = { 732static const struct mem_input_funcs dce_mi_funcs = {
733 .mem_input_program_display_marks = dce_mi_program_display_marks, 733 .mem_input_program_display_marks = dce_mi_program_display_marks,
734 .allocate_mem_input = dce_mi_allocate_dmif, 734 .allocate_mem_input = dce_mi_allocate_dmif,
735 .free_mem_input = dce_mi_free_dmif, 735 .free_mem_input = dce_mi_free_dmif,
@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
741 .mem_input_is_flip_pending = dce_mi_is_flip_pending 741 .mem_input_is_flip_pending = dce_mi_is_flip_pending
742}; 742};
743 743
744static const struct mem_input_funcs dce112_mi_funcs = {
745 .mem_input_program_display_marks = dce112_mi_program_display_marks,
746 .allocate_mem_input = dce_mi_allocate_dmif,
747 .free_mem_input = dce_mi_free_dmif,
748 .mem_input_program_surface_flip_and_addr =
749 dce_mi_program_surface_flip_and_addr,
750 .mem_input_program_pte_vm = dce_mi_program_pte_vm,
751 .mem_input_program_surface_config =
752 dce_mi_program_surface_config,
753 .mem_input_is_flip_pending = dce_mi_is_flip_pending
754};
755
756static const struct mem_input_funcs dce120_mi_funcs = {
757 .mem_input_program_display_marks = dce120_mi_program_display_marks,
758 .allocate_mem_input = dce_mi_allocate_dmif,
759 .free_mem_input = dce_mi_free_dmif,
760 .mem_input_program_surface_flip_and_addr =
761 dce_mi_program_surface_flip_and_addr,
762 .mem_input_program_pte_vm = dce_mi_program_pte_vm,
763 .mem_input_program_surface_config =
764 dce_mi_program_surface_config,
765 .mem_input_is_flip_pending = dce_mi_is_flip_pending
766};
744 767
745void dce_mem_input_construct( 768void dce_mem_input_construct(
746 struct dce_mem_input *dce_mi, 769 struct dce_mem_input *dce_mi,
@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
769 const struct dce_mem_input_mask *mi_mask) 792 const struct dce_mem_input_mask *mi_mask)
770{ 793{
771 dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); 794 dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
772 dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks; 795 dce_mi->base.funcs = &dce112_mi_funcs;
773} 796}
774 797
775void dce120_mem_input_construct( 798void dce120_mem_input_construct(
@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
781 const struct dce_mem_input_mask *mi_mask) 804 const struct dce_mem_input_mask *mi_mask)
782{ 805{
783 dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); 806 dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
784 dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks; 807 dce_mi->base.funcs = &dce120_mi_funcs;
785} 808}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 91642e684858..b139b4017820 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -135,7 +135,7 @@ static void dce110_update_generic_info_packet(
135 AFMT_GENERIC0_UPDATE, (packet_index == 0), 135 AFMT_GENERIC0_UPDATE, (packet_index == 0),
136 AFMT_GENERIC2_UPDATE, (packet_index == 2)); 136 AFMT_GENERIC2_UPDATE, (packet_index == 2));
137 } 137 }
138#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 138#ifdef CONFIG_X86
139 if (REG(AFMT_VBI_PACKET_CONTROL1)) { 139 if (REG(AFMT_VBI_PACKET_CONTROL1)) {
140 switch (packet_index) { 140 switch (packet_index) {
141 case 0: 141 case 0:
@@ -229,7 +229,7 @@ static void dce110_update_hdmi_info_packet(
229 HDMI_GENERIC1_SEND, send, 229 HDMI_GENERIC1_SEND, send,
230 HDMI_GENERIC1_LINE, line); 230 HDMI_GENERIC1_LINE, line);
231 break; 231 break;
232#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 232#ifdef CONFIG_X86
233 case 4: 233 case 4:
234 if (REG(HDMI_GENERIC_PACKET_CONTROL2)) 234 if (REG(HDMI_GENERIC_PACKET_CONTROL2))
235 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, 235 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -274,7 +274,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
274 struct dc_crtc_timing *crtc_timing, 274 struct dc_crtc_timing *crtc_timing,
275 enum dc_color_space output_color_space) 275 enum dc_color_space output_color_space)
276{ 276{
277#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 277#ifdef CONFIG_X86
278 uint32_t h_active_start; 278 uint32_t h_active_start;
279 uint32_t v_active_start; 279 uint32_t v_active_start;
280 uint32_t misc0 = 0; 280 uint32_t misc0 = 0;
@@ -317,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
317 if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) 317 if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
318 REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); 318 REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
319 319
320#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 320#ifdef CONFIG_X86
321 if (enc110->se_mask->DP_VID_N_MUL) 321 if (enc110->se_mask->DP_VID_N_MUL)
322 REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); 322 REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
323#endif 323#endif
@@ -328,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
328 break; 328 break;
329 } 329 }
330 330
331#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 331#ifdef CONFIG_X86
332 if (REG(DP_MSA_MISC)) 332 if (REG(DP_MSA_MISC))
333 misc1 = REG_READ(DP_MSA_MISC); 333 misc1 = REG_READ(DP_MSA_MISC);
334#endif 334#endif
@@ -362,7 +362,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
362 /* set dynamic range and YCbCr range */ 362 /* set dynamic range and YCbCr range */
363 363
364 364
365#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 365#ifdef CONFIG_X86
366 switch (crtc_timing->display_color_depth) { 366 switch (crtc_timing->display_color_depth) {
367 case COLOR_DEPTH_666: 367 case COLOR_DEPTH_666:
368 colorimetry_bpc = 0; 368 colorimetry_bpc = 0;
@@ -441,7 +441,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
441 DP_DYN_RANGE, dynamic_range_rgb, 441 DP_DYN_RANGE, dynamic_range_rgb,
442 DP_YCBCR_RANGE, dynamic_range_ycbcr); 442 DP_YCBCR_RANGE, dynamic_range_ycbcr);
443 443
444#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 444#ifdef CONFIG_X86
445 if (REG(DP_MSA_COLORIMETRY)) 445 if (REG(DP_MSA_COLORIMETRY))
446 REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); 446 REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
447 447
@@ -476,7 +476,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
476 crtc_timing->v_front_porch; 476 crtc_timing->v_front_porch;
477 477
478 478
479#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 479#ifdef CONFIG_X86
480 /* start at begining of left border */ 480 /* start at begining of left border */
481 if (REG(DP_MSA_TIMING_PARAM2)) 481 if (REG(DP_MSA_TIMING_PARAM2))
482 REG_SET_2(DP_MSA_TIMING_PARAM2, 0, 482 REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -751,7 +751,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
751 dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd); 751 dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
752 } 752 }
753 753
754#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 754#ifdef CONFIG_X86
755 if (enc110->se_mask->HDMI_DB_DISABLE) { 755 if (enc110->se_mask->HDMI_DB_DISABLE) {
756 /* for bring up, disable dp double TODO */ 756 /* for bring up, disable dp double TODO */
757 if (REG(HDMI_DB_CONTROL)) 757 if (REG(HDMI_DB_CONTROL))
@@ -789,7 +789,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
789 HDMI_GENERIC1_LINE, 0, 789 HDMI_GENERIC1_LINE, 0,
790 HDMI_GENERIC1_SEND, 0); 790 HDMI_GENERIC1_SEND, 0);
791 791
792#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 792#ifdef CONFIG_X86
793 /* stop generic packets 2 & 3 on HDMI */ 793 /* stop generic packets 2 & 3 on HDMI */
794 if (REG(HDMI_GENERIC_PACKET_CONTROL2)) 794 if (REG(HDMI_GENERIC_PACKET_CONTROL2))
795 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0, 795 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index a02e719d7794..ab63d0d0304c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -155,7 +155,7 @@ static void program_overscan(
155 int overscan_bottom = data->v_active 155 int overscan_bottom = data->v_active
156 - data->recout.y - data->recout.height; 156 - data->recout.y - data->recout.height;
157 157
158 if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) { 158 if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
159 overscan_bottom += 2; 159 overscan_bottom += 2;
160 overscan_right += 2; 160 overscan_right += 2;
161 } 161 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 9cbd5036db07..33a14e163f88 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -864,17 +864,22 @@ void hwss_edp_power_control(
864 if (power_up) { 864 if (power_up) {
865 unsigned long long current_ts = dm_get_timestamp(ctx); 865 unsigned long long current_ts = dm_get_timestamp(ctx);
866 unsigned long long duration_in_ms = 866 unsigned long long duration_in_ms =
867 dm_get_elapse_time_in_ns( 867 div64_u64(dm_get_elapse_time_in_ns(
868 ctx, 868 ctx,
869 current_ts, 869 current_ts,
870 div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000)); 870 link->link_trace.time_stamp.edp_poweroff), 1000000);
871 unsigned long long wait_time_ms = 0; 871 unsigned long long wait_time_ms = 0;
872 872
873 /* max 500ms from LCDVDD off to on */ 873 /* max 500ms from LCDVDD off to on */
874 unsigned long long edp_poweroff_time_ms = 500;
875
876 if (link->local_sink != NULL)
877 edp_poweroff_time_ms =
878 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
874 if (link->link_trace.time_stamp.edp_poweroff == 0) 879 if (link->link_trace.time_stamp.edp_poweroff == 0)
875 wait_time_ms = 500; 880 wait_time_ms = edp_poweroff_time_ms;
876 else if (duration_in_ms < 500) 881 else if (duration_in_ms < edp_poweroff_time_ms)
877 wait_time_ms = 500 - duration_in_ms; 882 wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
878 883
879 if (wait_time_ms) { 884 if (wait_time_ms) {
880 msleep(wait_time_ms); 885 msleep(wait_time_ms);
@@ -1245,13 +1250,13 @@ static void program_scaler(const struct dc *dc,
1245{ 1250{
1246 struct tg_color color = {0}; 1251 struct tg_color color = {0};
1247 1252
1248#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1253#ifdef CONFIG_X86
1249 /* TOFPGA */ 1254 /* TOFPGA */
1250 if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL) 1255 if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
1251 return; 1256 return;
1252#endif 1257#endif
1253 1258
1254 if (dc->debug.surface_visual_confirm) 1259 if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1255 get_surface_visual_confirm_color(pipe_ctx, &color); 1260 get_surface_visual_confirm_color(pipe_ctx, &color);
1256 else 1261 else
1257 color_space_to_black_color(dc, 1262 color_space_to_black_color(dc,
@@ -2801,9 +2806,11 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
2801 struct dc_cursor_mi_param param = { 2806 struct dc_cursor_mi_param param = {
2802 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, 2807 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2803 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, 2808 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2804 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, 2809 .viewport = pipe_ctx->plane_res.scl_data.viewport,
2805 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, 2810 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
2806 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz 2811 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
2812 .rotation = pipe_ctx->plane_state->rotation,
2813 .mirror = pipe_ctx->plane_state->horizontal_mirror
2807 }; 2814 };
2808 2815
2809 if (pipe_ctx->plane_state->address.type 2816 if (pipe_ctx->plane_state->address.type
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 0564c8e31252..9b9fc3d96c07 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -1011,7 +1011,7 @@ void dce110_free_mem_input_v(
1011{ 1011{
1012} 1012}
1013 1013
1014static struct mem_input_funcs dce110_mem_input_v_funcs = { 1014static const struct mem_input_funcs dce110_mem_input_v_funcs = {
1015 .mem_input_program_display_marks = 1015 .mem_input_program_display_marks =
1016 dce_mem_input_v_program_display_marks, 1016 dce_mem_input_v_program_display_marks,
1017 .mem_input_program_chroma_display_marks = 1017 .mem_input_program_chroma_display_marks =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 3edaa006bd57..1c902e49a712 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -794,43 +794,38 @@ static bool dce110_validate_bandwidth(
794 794
795 if (memcmp(&dc->current_state->bw.dce, 795 if (memcmp(&dc->current_state->bw.dce,
796 &context->bw.dce, sizeof(context->bw.dce))) { 796 &context->bw.dce, sizeof(context->bw.dce))) {
797 struct log_entry log_entry; 797
798 dm_logger_open( 798 DC_LOG_BANDWIDTH_CALCS(
799 dc->ctx->logger, 799 "%s: finish,\n"
800 &log_entry, 800 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
801 LOG_BANDWIDTH_CALCS); 801 "stutMark_b: %d stutMark_a: %d\n"
802 dm_logger_append(&log_entry, "%s: finish,\n"
803 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" 802 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
804 "stutMark_b: %d stutMark_a: %d\n", 803 "stutMark_b: %d stutMark_a: %d\n"
804 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
805 "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
806 "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
807 "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
808 ,
805 __func__, 809 __func__,
806 context->bw.dce.nbp_state_change_wm_ns[0].b_mark, 810 context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
807 context->bw.dce.nbp_state_change_wm_ns[0].a_mark, 811 context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
808 context->bw.dce.urgent_wm_ns[0].b_mark, 812 context->bw.dce.urgent_wm_ns[0].b_mark,
809 context->bw.dce.urgent_wm_ns[0].a_mark, 813 context->bw.dce.urgent_wm_ns[0].a_mark,
810 context->bw.dce.stutter_exit_wm_ns[0].b_mark, 814 context->bw.dce.stutter_exit_wm_ns[0].b_mark,
811 context->bw.dce.stutter_exit_wm_ns[0].a_mark); 815 context->bw.dce.stutter_exit_wm_ns[0].a_mark,
812 dm_logger_append(&log_entry,
813 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
814 "stutMark_b: %d stutMark_a: %d\n",
815 context->bw.dce.nbp_state_change_wm_ns[1].b_mark, 816 context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
816 context->bw.dce.nbp_state_change_wm_ns[1].a_mark, 817 context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
817 context->bw.dce.urgent_wm_ns[1].b_mark, 818 context->bw.dce.urgent_wm_ns[1].b_mark,
818 context->bw.dce.urgent_wm_ns[1].a_mark, 819 context->bw.dce.urgent_wm_ns[1].a_mark,
819 context->bw.dce.stutter_exit_wm_ns[1].b_mark, 820 context->bw.dce.stutter_exit_wm_ns[1].b_mark,
820 context->bw.dce.stutter_exit_wm_ns[1].a_mark); 821 context->bw.dce.stutter_exit_wm_ns[1].a_mark,
821 dm_logger_append(&log_entry,
822 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
823 "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
824 context->bw.dce.nbp_state_change_wm_ns[2].b_mark, 822 context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
825 context->bw.dce.nbp_state_change_wm_ns[2].a_mark, 823 context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
826 context->bw.dce.urgent_wm_ns[2].b_mark, 824 context->bw.dce.urgent_wm_ns[2].b_mark,
827 context->bw.dce.urgent_wm_ns[2].a_mark, 825 context->bw.dce.urgent_wm_ns[2].a_mark,
828 context->bw.dce.stutter_exit_wm_ns[2].b_mark, 826 context->bw.dce.stutter_exit_wm_ns[2].b_mark,
829 context->bw.dce.stutter_exit_wm_ns[2].a_mark, 827 context->bw.dce.stutter_exit_wm_ns[2].a_mark,
830 context->bw.dce.stutter_mode_enable); 828 context->bw.dce.stutter_mode_enable,
831 dm_logger_append(&log_entry,
832 "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
833 "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
834 context->bw.dce.cpuc_state_change_enable, 829 context->bw.dce.cpuc_state_change_enable,
835 context->bw.dce.cpup_state_change_enable, 830 context->bw.dce.cpup_state_change_enable,
836 context->bw.dce.nbp_state_change_enable, 831 context->bw.dce.nbp_state_change_enable,
@@ -840,7 +835,6 @@ static bool dce110_validate_bandwidth(
840 context->bw.dce.sclk_deep_sleep_khz, 835 context->bw.dce.sclk_deep_sleep_khz,
841 context->bw.dce.yclk_khz, 836 context->bw.dce.yclk_khz,
842 context->bw.dce.blackout_recovery_time_us); 837 context->bw.dce.blackout_recovery_time_us);
843 dm_logger_close(&log_entry);
844 } 838 }
845 return result; 839 return result;
846} 840}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index a7dce060204f..aa8d6b10d2c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -235,7 +235,7 @@ static void program_overscan(
235 int overscan_right = data->h_active - data->recout.x - data->recout.width; 235 int overscan_right = data->h_active - data->recout.x - data->recout.width;
236 int overscan_bottom = data->v_active - data->recout.y - data->recout.height; 236 int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
237 237
238 if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) { 238 if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
239 overscan_bottom += 2; 239 overscan_bottom += 2;
240 overscan_right += 2; 240 overscan_right += 2;
241 } 241 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 9e1afb11e6ad..30d5b32892d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -744,43 +744,38 @@ bool dce112_validate_bandwidth(
744 744
745 if (memcmp(&dc->current_state->bw.dce, 745 if (memcmp(&dc->current_state->bw.dce,
746 &context->bw.dce, sizeof(context->bw.dce))) { 746 &context->bw.dce, sizeof(context->bw.dce))) {
747 struct log_entry log_entry; 747
748 dm_logger_open( 748 DC_LOG_BANDWIDTH_CALCS(
749 dc->ctx->logger, 749 "%s: finish,\n"
750 &log_entry, 750 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
751 LOG_BANDWIDTH_CALCS); 751 "stutMark_b: %d stutMark_a: %d\n"
752 dm_logger_append(&log_entry, "%s: finish,\n"
753 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" 752 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
754 "stutMark_b: %d stutMark_a: %d\n", 753 "stutMark_b: %d stutMark_a: %d\n"
754 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
755 "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
756 "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
757 "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
758 ,
755 __func__, 759 __func__,
756 context->bw.dce.nbp_state_change_wm_ns[0].b_mark, 760 context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
757 context->bw.dce.nbp_state_change_wm_ns[0].a_mark, 761 context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
758 context->bw.dce.urgent_wm_ns[0].b_mark, 762 context->bw.dce.urgent_wm_ns[0].b_mark,
759 context->bw.dce.urgent_wm_ns[0].a_mark, 763 context->bw.dce.urgent_wm_ns[0].a_mark,
760 context->bw.dce.stutter_exit_wm_ns[0].b_mark, 764 context->bw.dce.stutter_exit_wm_ns[0].b_mark,
761 context->bw.dce.stutter_exit_wm_ns[0].a_mark); 765 context->bw.dce.stutter_exit_wm_ns[0].a_mark,
762 dm_logger_append(&log_entry,
763 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
764 "stutMark_b: %d stutMark_a: %d\n",
765 context->bw.dce.nbp_state_change_wm_ns[1].b_mark, 766 context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
766 context->bw.dce.nbp_state_change_wm_ns[1].a_mark, 767 context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
767 context->bw.dce.urgent_wm_ns[1].b_mark, 768 context->bw.dce.urgent_wm_ns[1].b_mark,
768 context->bw.dce.urgent_wm_ns[1].a_mark, 769 context->bw.dce.urgent_wm_ns[1].a_mark,
769 context->bw.dce.stutter_exit_wm_ns[1].b_mark, 770 context->bw.dce.stutter_exit_wm_ns[1].b_mark,
770 context->bw.dce.stutter_exit_wm_ns[1].a_mark); 771 context->bw.dce.stutter_exit_wm_ns[1].a_mark,
771 dm_logger_append(&log_entry,
772 "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
773 "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
774 context->bw.dce.nbp_state_change_wm_ns[2].b_mark, 772 context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
775 context->bw.dce.nbp_state_change_wm_ns[2].a_mark, 773 context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
776 context->bw.dce.urgent_wm_ns[2].b_mark, 774 context->bw.dce.urgent_wm_ns[2].b_mark,
777 context->bw.dce.urgent_wm_ns[2].a_mark, 775 context->bw.dce.urgent_wm_ns[2].a_mark,
778 context->bw.dce.stutter_exit_wm_ns[2].b_mark, 776 context->bw.dce.stutter_exit_wm_ns[2].b_mark,
779 context->bw.dce.stutter_exit_wm_ns[2].a_mark, 777 context->bw.dce.stutter_exit_wm_ns[2].a_mark,
780 context->bw.dce.stutter_mode_enable); 778 context->bw.dce.stutter_mode_enable,
781 dm_logger_append(&log_entry,
782 "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
783 "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
784 context->bw.dce.cpuc_state_change_enable, 779 context->bw.dce.cpuc_state_change_enable,
785 context->bw.dce.cpup_state_change_enable, 780 context->bw.dce.cpup_state_change_enable,
786 context->bw.dce.nbp_state_change_enable, 781 context->bw.dce.nbp_state_change_enable,
@@ -790,7 +785,6 @@ bool dce112_validate_bandwidth(
790 context->bw.dce.sclk_deep_sleep_khz, 785 context->bw.dce.sclk_deep_sleep_khz,
791 context->bw.dce.yclk_khz, 786 context->bw.dce.yclk_khz,
792 context->bw.dce.blackout_recovery_time_us); 787 context->bw.dce.blackout_recovery_time_us);
793 dm_logger_close(&log_entry);
794 } 788 }
795 return result; 789 return result;
796} 790}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 742fd497ed00..bf8b68f8db4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -445,10 +445,10 @@ void dpp1_set_cursor_position(
445 uint32_t width) 445 uint32_t width)
446{ 446{
447 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 447 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
448 int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start; 448 int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
449 uint32_t cur_en = pos->enable ? 1 : 0; 449 uint32_t cur_en = pos->enable ? 1 : 0;
450 450
451 if (src_x_offset >= (int)param->viewport_width) 451 if (src_x_offset >= (int)param->viewport.width)
452 cur_en = 0; /* not visible beyond right edge*/ 452 cur_en = 0; /* not visible beyond right edge*/
453 453
454 if (src_x_offset + (int)width <= 0) 454 if (src_x_offset + (int)width <= 0)
@@ -459,6 +459,18 @@ void dpp1_set_cursor_position(
459 459
460} 460}
461 461
462void dpp1_cnv_set_optional_cursor_attributes(
463 struct dpp *dpp_base,
464 struct dpp_cursor_attributes *attr)
465{
466 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
467
468 if (attr) {
469 REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
470 REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
471 }
472}
473
462void dpp1_dppclk_control( 474void dpp1_dppclk_control(
463 struct dpp *dpp_base, 475 struct dpp *dpp_base,
464 bool dppclk_div, 476 bool dppclk_div,
@@ -499,6 +511,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
499 .dpp_full_bypass = dpp1_full_bypass, 511 .dpp_full_bypass = dpp1_full_bypass,
500 .set_cursor_attributes = dpp1_set_cursor_attributes, 512 .set_cursor_attributes = dpp1_set_cursor_attributes,
501 .set_cursor_position = dpp1_set_cursor_position, 513 .set_cursor_position = dpp1_set_cursor_position,
514 .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
502 .dpp_dppclk_control = dpp1_dppclk_control, 515 .dpp_dppclk_control = dpp1_dppclk_control,
503 .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, 516 .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
504}; 517};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e862cafa6501..e2889e61b18c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -119,6 +119,7 @@
119 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ 119 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
120 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ 120 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
121 SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ 121 SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
122 SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
122 SRI(DPP_CONTROL, DPP_TOP, id), \ 123 SRI(DPP_CONTROL, DPP_TOP, id), \
123 SRI(CM_HDR_MULT_COEF, CM, id) 124 SRI(CM_HDR_MULT_COEF, CM, id)
124 125
@@ -324,6 +325,8 @@
324 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ 325 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
325 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ 326 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
326 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ 327 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
328 TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
329 TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
327 TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ 330 TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
328 TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh) 331 TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
329 332
@@ -1076,7 +1079,9 @@
1076 type CUR0_COLOR1; \ 1079 type CUR0_COLOR1; \
1077 type DPPCLK_RATE_CONTROL; \ 1080 type DPPCLK_RATE_CONTROL; \
1078 type DPP_CLOCK_ENABLE; \ 1081 type DPP_CLOCK_ENABLE; \
1079 type CM_HDR_MULT_COEF; 1082 type CM_HDR_MULT_COEF; \
1083 type CUR0_FP_BIAS; \
1084 type CUR0_FP_SCALE;
1080 1085
1081struct dcn_dpp_shift { 1086struct dcn_dpp_shift {
1082 TF_REG_FIELD_LIST(uint8_t) 1087 TF_REG_FIELD_LIST(uint8_t)
@@ -1329,7 +1334,8 @@ struct dcn_dpp_mask {
1329 uint32_t CURSOR0_COLOR0; \ 1334 uint32_t CURSOR0_COLOR0; \
1330 uint32_t CURSOR0_COLOR1; \ 1335 uint32_t CURSOR0_COLOR1; \
1331 uint32_t DPP_CONTROL; \ 1336 uint32_t DPP_CONTROL; \
1332 uint32_t CM_HDR_MULT_COEF; 1337 uint32_t CM_HDR_MULT_COEF; \
1338 uint32_t CURSOR0_FP_SCALE_BIAS;
1333 1339
1334struct dcn_dpp_registers { 1340struct dcn_dpp_registers {
1335 DPP_COMMON_REG_VARIABLE_LIST 1341 DPP_COMMON_REG_VARIABLE_LIST
@@ -1370,6 +1376,10 @@ void dpp1_set_cursor_position(
1370 const struct dc_cursor_mi_param *param, 1376 const struct dc_cursor_mi_param *param,
1371 uint32_t width); 1377 uint32_t width);
1372 1378
1379void dpp1_cnv_set_optional_cursor_attributes(
1380 struct dpp *dpp_base,
1381 struct dpp_cursor_attributes *attr);
1382
1373bool dpp1_dscl_is_lb_conf_valid( 1383bool dpp1_dscl_is_lb_conf_valid(
1374 int ceil_vratio, 1384 int ceil_vratio,
1375 int num_partitions, 1385 int num_partitions,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f862fd148cca..4a863a5dab41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -621,6 +621,10 @@ static void dpp1_dscl_set_manual_ratio_init(
621static void dpp1_dscl_set_recout( 621static void dpp1_dscl_set_recout(
622 struct dcn10_dpp *dpp, const struct rect *recout) 622 struct dcn10_dpp *dpp, const struct rect *recout)
623{ 623{
624 int visual_confirm_on = 0;
625 if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
626 visual_confirm_on = 1;
627
624 REG_SET_2(RECOUT_START, 0, 628 REG_SET_2(RECOUT_START, 0,
625 /* First pixel of RECOUT */ 629 /* First pixel of RECOUT */
626 RECOUT_START_X, recout->x, 630 RECOUT_START_X, recout->x,
@@ -632,8 +636,7 @@ static void dpp1_dscl_set_recout(
632 RECOUT_WIDTH, recout->width, 636 RECOUT_WIDTH, recout->width,
633 /* Number of RECOUT vertical lines */ 637 /* Number of RECOUT vertical lines */
634 RECOUT_HEIGHT, recout->height 638 RECOUT_HEIGHT, recout->height
635 - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 * 639 - visual_confirm_on * 4 * (dpp->base.inst + 1));
636 (dpp->base.inst + 1));
637} 640}
638 641
639/* Main function to program scaler and line buffer in manual scaling mode */ 642/* Main function to program scaler and line buffer in manual scaling mode */
@@ -655,6 +658,12 @@ void dpp1_dscl_set_scaler_manual_scale(
655 658
656 dpp->scl_data = *scl_data; 659 dpp->scl_data = *scl_data;
657 660
661 /* Autocal off */
662 REG_SET_3(DSCL_AUTOCAL, 0,
663 AUTOCAL_MODE, AUTOCAL_MODE_OFF,
664 AUTOCAL_NUM_PIPE, 0,
665 AUTOCAL_PIPE_ID, 0);
666
658 /* Recout */ 667 /* Recout */
659 dpp1_dscl_set_recout(dpp, &scl_data->recout); 668 dpp1_dscl_set_recout(dpp, &scl_data->recout);
660 669
@@ -678,12 +687,6 @@ void dpp1_dscl_set_scaler_manual_scale(
678 if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) 687 if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
679 return; 688 return;
680 689
681 /* Autocal off */
682 REG_SET_3(DSCL_AUTOCAL, 0,
683 AUTOCAL_MODE, AUTOCAL_MODE_OFF,
684 AUTOCAL_NUM_PIPE, 0,
685 AUTOCAL_PIPE_ID, 0);
686
687 /* Black offsets */ 690 /* Black offsets */
688 if (ycbcr) 691 if (ycbcr)
689 REG_SET_2(SCL_BLACK_OFFSET, 0, 692 REG_SET_2(SCL_BLACK_OFFSET, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 93f52c58bc69..332354ca6529 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -152,16 +152,14 @@ void hubp1_program_tiling(
152 PIPE_ALIGNED, info->gfx9.pipe_aligned); 152 PIPE_ALIGNED, info->gfx9.pipe_aligned);
153} 153}
154 154
155void hubp1_program_size_and_rotation( 155void hubp1_program_size(
156 struct hubp *hubp, 156 struct hubp *hubp,
157 enum dc_rotation_angle rotation,
158 enum surface_pixel_format format, 157 enum surface_pixel_format format,
159 const union plane_size *plane_size, 158 const union plane_size *plane_size,
160 struct dc_plane_dcc_param *dcc, 159 struct dc_plane_dcc_param *dcc)
161 bool horizontal_mirror)
162{ 160{
163 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 161 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
164 uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror; 162 uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
165 163
166 /* Program data and meta surface pitch (calculation from addrlib) 164 /* Program data and meta surface pitch (calculation from addrlib)
167 * 444 or 420 luma 165 * 444 or 420 luma
@@ -192,13 +190,22 @@ void hubp1_program_size_and_rotation(
192 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 190 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
193 REG_UPDATE_2(DCSURF_SURFACE_PITCH_C, 191 REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
194 PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c); 192 PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
193}
194
195void hubp1_program_rotation(
196 struct hubp *hubp,
197 enum dc_rotation_angle rotation,
198 bool horizontal_mirror)
199{
200 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
201 uint32_t mirror;
202
195 203
196 if (horizontal_mirror) 204 if (horizontal_mirror)
197 mirror = 1; 205 mirror = 1;
198 else 206 else
199 mirror = 0; 207 mirror = 0;
200 208
201
202 /* Program rotation angle and horz mirror - no mirror */ 209 /* Program rotation angle and horz mirror - no mirror */
203 if (rotation == ROTATION_ANGLE_0) 210 if (rotation == ROTATION_ANGLE_0)
204 REG_UPDATE_2(DCSURF_SURFACE_CONFIG, 211 REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
@@ -450,9 +457,6 @@ bool hubp1_program_surface_flip_and_addr(
450 457
451 hubp->request_address = *address; 458 hubp->request_address = *address;
452 459
453 if (flip_immediate)
454 hubp->current_address = *address;
455
456 return true; 460 return true;
457} 461}
458 462
@@ -481,8 +485,8 @@ void hubp1_program_surface_config(
481{ 485{
482 hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks); 486 hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
483 hubp1_program_tiling(hubp, tiling_info, format); 487 hubp1_program_tiling(hubp, tiling_info, format);
484 hubp1_program_size_and_rotation( 488 hubp1_program_size(hubp, format, plane_size, dcc);
485 hubp, rotation, format, plane_size, dcc, horizontal_mirror); 489 hubp1_program_rotation(hubp, rotation, horizontal_mirror);
486 hubp1_program_pixel_format(hubp, format); 490 hubp1_program_pixel_format(hubp, format);
487} 491}
488 492
@@ -688,7 +692,6 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
688 if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part) 692 if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
689 return true; 693 return true;
690 694
691 hubp->current_address = hubp->request_address;
692 return false; 695 return false;
693} 696}
694 697
@@ -1061,9 +1064,11 @@ void hubp1_cursor_set_position(
1061 const struct dc_cursor_mi_param *param) 1064 const struct dc_cursor_mi_param *param)
1062{ 1065{
1063 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 1066 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
1064 int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start; 1067 int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
1068 int x_hotspot = pos->x_hotspot;
1069 int y_hotspot = pos->y_hotspot;
1070 uint32_t dst_x_offset;
1065 uint32_t cur_en = pos->enable ? 1 : 0; 1071 uint32_t cur_en = pos->enable ? 1 : 0;
1066 uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
1067 1072
1068 /* 1073 /*
1069 * Guard aganst cursor_set_position() from being called with invalid 1074 * Guard aganst cursor_set_position() from being called with invalid
@@ -1075,6 +1080,18 @@ void hubp1_cursor_set_position(
1075 if (hubp->curs_attr.address.quad_part == 0) 1080 if (hubp->curs_attr.address.quad_part == 0)
1076 return; 1081 return;
1077 1082
1083 if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
1084 src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
1085 y_hotspot = pos->x_hotspot;
1086 x_hotspot = pos->y_hotspot;
1087 }
1088
1089 if (param->mirror) {
1090 x_hotspot = param->viewport.width - x_hotspot;
1091 src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
1092 }
1093
1094 dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
1078 dst_x_offset *= param->ref_clk_khz; 1095 dst_x_offset *= param->ref_clk_khz;
1079 dst_x_offset /= param->pixel_clk_khz; 1096 dst_x_offset /= param->pixel_clk_khz;
1080 1097
@@ -1085,7 +1102,7 @@ void hubp1_cursor_set_position(
1085 dc_fixpt_from_int(dst_x_offset), 1102 dc_fixpt_from_int(dst_x_offset),
1086 param->h_scale_ratio)); 1103 param->h_scale_ratio));
1087 1104
1088 if (src_x_offset >= (int)param->viewport_width) 1105 if (src_x_offset >= (int)param->viewport.width)
1089 cur_en = 0; /* not visible beyond right edge*/ 1106 cur_en = 0; /* not visible beyond right edge*/
1090 1107
1091 if (src_x_offset + (int)hubp->curs_attr.width <= 0) 1108 if (src_x_offset + (int)hubp->curs_attr.width <= 0)
@@ -1102,8 +1119,8 @@ void hubp1_cursor_set_position(
1102 CURSOR_Y_POSITION, pos->y); 1119 CURSOR_Y_POSITION, pos->y);
1103 1120
1104 REG_SET_2(CURSOR_HOT_SPOT, 0, 1121 REG_SET_2(CURSOR_HOT_SPOT, 0,
1105 CURSOR_HOT_SPOT_X, pos->x_hotspot, 1122 CURSOR_HOT_SPOT_X, x_hotspot,
1106 CURSOR_HOT_SPOT_Y, pos->y_hotspot); 1123 CURSOR_HOT_SPOT_Y, y_hotspot);
1107 1124
1108 REG_SET(CURSOR_DST_OFFSET, 0, 1125 REG_SET(CURSOR_DST_OFFSET, 0,
1109 CURSOR_DST_X_OFFSET, dst_x_offset); 1126 CURSOR_DST_X_OFFSET, dst_x_offset);
@@ -1125,7 +1142,7 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
1125 REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst); 1142 REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
1126} 1143}
1127 1144
1128static struct hubp_funcs dcn10_hubp_funcs = { 1145static const struct hubp_funcs dcn10_hubp_funcs = {
1129 .hubp_program_surface_flip_and_addr = 1146 .hubp_program_surface_flip_and_addr =
1130 hubp1_program_surface_flip_and_addr, 1147 hubp1_program_surface_flip_and_addr,
1131 .hubp_program_surface_config = 1148 .hubp_program_surface_config =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index d901d5092969..f689feace82d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -268,8 +268,6 @@
268 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\ 268 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
269 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\ 269 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
270 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\ 270 HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
271 HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
272 HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
273 HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\ 271 HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
274 HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\ 272 HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
275 HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\ 273 HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
@@ -388,6 +386,8 @@
388#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\ 386#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
389 HUBP_MASK_SH_LIST_DCN(mask_sh),\ 387 HUBP_MASK_SH_LIST_DCN(mask_sh),\
390 HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\ 388 HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
389 HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
390 HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
391 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\ 391 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
392 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\ 392 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
393 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\ 393 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
@@ -679,12 +679,15 @@ void hubp1_program_pixel_format(
679 struct hubp *hubp, 679 struct hubp *hubp,
680 enum surface_pixel_format format); 680 enum surface_pixel_format format);
681 681
682void hubp1_program_size_and_rotation( 682void hubp1_program_size(
683 struct hubp *hubp, 683 struct hubp *hubp,
684 enum dc_rotation_angle rotation,
685 enum surface_pixel_format format, 684 enum surface_pixel_format format,
686 const union plane_size *plane_size, 685 const union plane_size *plane_size,
687 struct dc_plane_dcc_param *dcc, 686 struct dc_plane_dcc_param *dcc);
687
688void hubp1_program_rotation(
689 struct hubp *hubp,
690 enum dc_rotation_angle rotation,
688 bool horizontal_mirror); 691 bool horizontal_mirror);
689 692
690void hubp1_program_tiling( 693void hubp1_program_tiling(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 3b2cb2d3b8a6..c87f6e603055 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -834,7 +834,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
834} 834}
835 835
836 836
837static void dcn10_verify_allow_pstate_change_high(struct dc *dc) 837void dcn10_verify_allow_pstate_change_high(struct dc *dc)
838{ 838{
839 static bool should_log_hw_state; /* prevent hw state log by default */ 839 static bool should_log_hw_state; /* prevent hw state log by default */
840 840
@@ -1157,12 +1157,19 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
1157 1157
1158 if (plane_state == NULL) 1158 if (plane_state == NULL)
1159 return; 1159 return;
1160
1160 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); 1161 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1162
1161 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( 1163 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1162 pipe_ctx->plane_res.hubp, 1164 pipe_ctx->plane_res.hubp,
1163 &plane_state->address, 1165 &plane_state->address,
1164 plane_state->flip_immediate); 1166 plane_state->flip_immediate);
1167
1165 plane_state->status.requested_address = plane_state->address; 1168 plane_state->status.requested_address = plane_state->address;
1169
1170 if (plane_state->flip_immediate)
1171 plane_state->status.current_address = plane_state->address;
1172
1166 if (addr_patched) 1173 if (addr_patched)
1167 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; 1174 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1168} 1175}
@@ -1768,6 +1775,43 @@ static void dcn10_get_surface_visual_confirm_color(
1768 } 1775 }
1769} 1776}
1770 1777
1778static void dcn10_get_hdr_visual_confirm_color(
1779 struct pipe_ctx *pipe_ctx,
1780 struct tg_color *color)
1781{
1782 uint32_t color_value = MAX_TG_COLOR_VALUE;
1783
1784 // Determine the overscan color based on the top-most (desktop) plane's context
1785 struct pipe_ctx *top_pipe_ctx = pipe_ctx;
1786
1787 while (top_pipe_ctx->top_pipe != NULL)
1788 top_pipe_ctx = top_pipe_ctx->top_pipe;
1789
1790 switch (top_pipe_ctx->plane_res.scl_data.format) {
1791 case PIXEL_FORMAT_ARGB2101010:
1792 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
1793 /* HDR10, ARGB2101010 - set boarder color to red */
1794 color->color_r_cr = color_value;
1795 }
1796 break;
1797 case PIXEL_FORMAT_FP16:
1798 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
1799 /* HDR10, FP16 - set boarder color to blue */
1800 color->color_b_cb = color_value;
1801 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
1802 /* FreeSync 2 HDR - set boarder color to green */
1803 color->color_g_y = color_value;
1804 }
1805 break;
1806 default:
1807 /* SDR - set boarder color to Gray */
1808 color->color_r_cr = color_value/2;
1809 color->color_b_cb = color_value/2;
1810 color->color_g_y = color_value/2;
1811 break;
1812 }
1813}
1814
1771static uint16_t fixed_point_to_int_frac( 1815static uint16_t fixed_point_to_int_frac(
1772 struct fixed31_32 arg, 1816 struct fixed31_32 arg,
1773 uint8_t integer_bits, 1817 uint8_t integer_bits,
@@ -1848,11 +1892,10 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
1848 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); 1892 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
1849} 1893}
1850 1894
1851 1895static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
1852static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
1853{ 1896{
1854 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1897 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1855 struct mpcc_blnd_cfg blnd_cfg; 1898 struct mpcc_blnd_cfg blnd_cfg = {0};
1856 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; 1899 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
1857 int mpcc_id; 1900 int mpcc_id;
1858 struct mpcc *new_mpcc; 1901 struct mpcc *new_mpcc;
@@ -1863,13 +1906,17 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
1863 1906
1864 /* TODO: proper fix once fpga works */ 1907 /* TODO: proper fix once fpga works */
1865 1908
1866 if (dc->debug.surface_visual_confirm) 1909 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
1910 dcn10_get_hdr_visual_confirm_color(
1911 pipe_ctx, &blnd_cfg.black_color);
1912 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
1867 dcn10_get_surface_visual_confirm_color( 1913 dcn10_get_surface_visual_confirm_color(
1868 pipe_ctx, &blnd_cfg.black_color); 1914 pipe_ctx, &blnd_cfg.black_color);
1869 else 1915 } else {
1870 color_space_to_black_color( 1916 color_space_to_black_color(
1871 dc, pipe_ctx->stream->output_color_space, 1917 dc, pipe_ctx->stream->output_color_space,
1872 &blnd_cfg.black_color); 1918 &blnd_cfg.black_color);
1919 }
1873 1920
1874 if (per_pixel_alpha) 1921 if (per_pixel_alpha)
1875 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; 1922 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
@@ -1994,7 +2041,7 @@ static void update_dchubp_dpp(
1994 2041
1995 if (plane_state->update_flags.bits.full_update || 2042 if (plane_state->update_flags.bits.full_update ||
1996 plane_state->update_flags.bits.per_pixel_alpha_change) 2043 plane_state->update_flags.bits.per_pixel_alpha_change)
1997 update_mpcc(dc, pipe_ctx); 2044 dc->hwss.update_mpcc(dc, pipe_ctx);
1998 2045
1999 if (plane_state->update_flags.bits.full_update || 2046 if (plane_state->update_flags.bits.full_update ||
2000 plane_state->update_flags.bits.per_pixel_alpha_change || 2047 plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2104,6 +2151,33 @@ static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2104 pipe_ctx->plane_res.dpp, hw_mult); 2151 pipe_ctx->plane_res.dpp, hw_mult);
2105} 2152}
2106 2153
2154void dcn10_program_pipe(
2155 struct dc *dc,
2156 struct pipe_ctx *pipe_ctx,
2157 struct dc_state *context)
2158{
2159 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2160 dcn10_enable_plane(dc, pipe_ctx, context);
2161
2162 update_dchubp_dpp(dc, pipe_ctx, context);
2163
2164 set_hdr_multiplier(pipe_ctx);
2165
2166 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2167 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2168 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2169 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
2170
2171 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2172 * only do gamma programming for full update.
2173 * TODO: This can be further optimized/cleaned up
2174 * Always call this for now since it does memcmp inside before
2175 * doing heavy calculation and programming
2176 */
2177 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2178 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
2179}
2180
2107static void program_all_pipe_in_tree( 2181static void program_all_pipe_in_tree(
2108 struct dc *dc, 2182 struct dc *dc,
2109 struct pipe_ctx *pipe_ctx, 2183 struct pipe_ctx *pipe_ctx,
@@ -2122,29 +2196,11 @@ static void program_all_pipe_in_tree(
2122 pipe_ctx->stream_res.tg); 2196 pipe_ctx->stream_res.tg);
2123 2197
2124 dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); 2198 dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
2199
2125 } 2200 }
2126 2201
2127 if (pipe_ctx->plane_state != NULL) { 2202 if (pipe_ctx->plane_state != NULL) {
2128 if (pipe_ctx->plane_state->update_flags.bits.full_update) 2203 dcn10_program_pipe(dc, pipe_ctx, context);
2129 dcn10_enable_plane(dc, pipe_ctx, context);
2130
2131 update_dchubp_dpp(dc, pipe_ctx, context);
2132
2133 set_hdr_multiplier(pipe_ctx);
2134
2135 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2136 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2137 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2138 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
2139
2140 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2141 * only do gamma programming for full update.
2142 * TODO: This can be further optimized/cleaned up
2143 * Always call this for now since it does memcmp inside before
2144 * doing heavy calculation and programming
2145 */
2146 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2147 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
2148 } 2204 }
2149 2205
2150 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) { 2206 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
@@ -2269,7 +2325,7 @@ static void dcn10_apply_ctx_for_surface(
2269 old_pipe_ctx->plane_state && 2325 old_pipe_ctx->plane_state &&
2270 old_pipe_ctx->stream_res.tg == tg) { 2326 old_pipe_ctx->stream_res.tg == tg) {
2271 2327
2272 hwss1_plane_atomic_disconnect(dc, old_pipe_ctx); 2328 dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
2273 removed_pipe[i] = true; 2329 removed_pipe[i] = true;
2274 2330
2275 DC_LOG_DC("Reset mpcc for pipe %d\n", 2331 DC_LOG_DC("Reset mpcc for pipe %d\n",
@@ -2484,16 +2540,20 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2484{ 2540{
2485 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2541 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2486 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2542 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2543 bool flip_pending;
2487 2544
2488 if (plane_state == NULL) 2545 if (plane_state == NULL)
2489 return; 2546 return;
2490 2547
2491 plane_state->status.is_flip_pending = 2548 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2492 pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2493 pipe_ctx->plane_res.hubp); 2549 pipe_ctx->plane_res.hubp);
2494 2550
2495 plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address; 2551 plane_state->status.is_flip_pending = flip_pending;
2496 if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO && 2552
2553 if (!flip_pending)
2554 plane_state->status.current_address = plane_state->status.requested_address;
2555
2556 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2497 tg->funcs->is_stereo_left_eye) { 2557 tg->funcs->is_stereo_left_eye) {
2498 plane_state->status.is_right_eye = 2558 plane_state->status.is_right_eye =
2499 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg); 2559 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
@@ -2520,9 +2580,11 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2520 struct dc_cursor_mi_param param = { 2580 struct dc_cursor_mi_param param = {
2521 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, 2581 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2522 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, 2582 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2523 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, 2583 .viewport = pipe_ctx->plane_res.scl_data.viewport,
2524 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, 2584 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
2525 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz 2585 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
2586 .rotation = pipe_ctx->plane_state->rotation,
2587 .mirror = pipe_ctx->plane_state->horizontal_mirror
2526 }; 2588 };
2527 2589
2528 if (pipe_ctx->plane_state->address.type 2590 if (pipe_ctx->plane_state->address.type
@@ -2546,6 +2608,33 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2546 pipe_ctx->plane_res.dpp, attributes->color_format); 2608 pipe_ctx->plane_res.dpp, attributes->color_format);
2547} 2609}
2548 2610
2611static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
2612{
2613 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
2614 struct fixed31_32 multiplier;
2615 struct dpp_cursor_attributes opt_attr = { 0 };
2616 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
2617 struct custom_float_format fmt;
2618
2619 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
2620 return;
2621
2622 fmt.exponenta_bits = 5;
2623 fmt.mantissa_bits = 10;
2624 fmt.sign = true;
2625
2626 if (sdr_white_level > 80) {
2627 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
2628 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
2629 }
2630
2631 opt_attr.scale = hw_scale;
2632 opt_attr.bias = 0;
2633
2634 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
2635 pipe_ctx->plane_res.dpp, &opt_attr);
2636}
2637
2549static const struct hw_sequencer_funcs dcn10_funcs = { 2638static const struct hw_sequencer_funcs dcn10_funcs = {
2550 .program_gamut_remap = program_gamut_remap, 2639 .program_gamut_remap = program_gamut_remap,
2551 .program_csc_matrix = program_csc_matrix, 2640 .program_csc_matrix = program_csc_matrix,
@@ -2553,7 +2642,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2553 .apply_ctx_to_hw = dce110_apply_ctx_to_hw, 2642 .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
2554 .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, 2643 .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
2555 .update_plane_addr = dcn10_update_plane_addr, 2644 .update_plane_addr = dcn10_update_plane_addr,
2645 .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
2556 .update_dchub = dcn10_update_dchub, 2646 .update_dchub = dcn10_update_dchub,
2647 .update_mpcc = dcn10_update_mpcc,
2557 .update_pending_status = dcn10_update_pending_status, 2648 .update_pending_status = dcn10_update_pending_status,
2558 .set_input_transfer_func = dcn10_set_input_transfer_func, 2649 .set_input_transfer_func = dcn10_set_input_transfer_func,
2559 .set_output_transfer_func = dcn10_set_output_transfer_func, 2650 .set_output_transfer_func = dcn10_set_output_transfer_func,
@@ -2591,7 +2682,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2591 .edp_power_control = hwss_edp_power_control, 2682 .edp_power_control = hwss_edp_power_control,
2592 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 2683 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
2593 .set_cursor_position = dcn10_set_cursor_position, 2684 .set_cursor_position = dcn10_set_cursor_position,
2594 .set_cursor_attribute = dcn10_set_cursor_attribute 2685 .set_cursor_attribute = dcn10_set_cursor_attribute,
2686 .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
2595}; 2687};
2596 2688
2597 2689
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 44f734b73f9e..7139fb73e966 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -39,4 +39,11 @@ bool is_rgb_cspace(enum dc_color_space output_color_space);
39 39
40void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); 40void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
41 41
42void dcn10_verify_allow_pstate_change_high(struct dc *dc);
43
44void dcn10_program_pipe(
45 struct dc *dc,
46 struct pipe_ctx *pipe_ctx,
47 struct dc_state *context);
48
42#endif /* __DC_HWSS_DCN10_H__ */ 49#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index fd9dc70190a8..6f675206a136 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -65,11 +65,6 @@ enum {
65 DP_MST_UPDATE_MAX_RETRY = 50 65 DP_MST_UPDATE_MAX_RETRY = 50
66}; 66};
67 67
68
69
70static void aux_initialize(struct dcn10_link_encoder *enc10);
71
72
73static const struct link_encoder_funcs dcn10_lnk_enc_funcs = { 68static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
74 .validate_output_with_stream = 69 .validate_output_with_stream =
75 dcn10_link_encoder_validate_output_with_stream, 70 dcn10_link_encoder_validate_output_with_stream,
@@ -445,12 +440,11 @@ static uint8_t get_frontend_source(
445 } 440 }
446} 441}
447 442
448static void configure_encoder( 443void configure_encoder(
449 struct dcn10_link_encoder *enc10, 444 struct dcn10_link_encoder *enc10,
450 const struct dc_link_settings *link_settings) 445 const struct dc_link_settings *link_settings)
451{ 446{
452 /* set number of lanes */ 447 /* set number of lanes */
453
454 REG_SET(DP_CONFIG, 0, 448 REG_SET(DP_CONFIG, 0,
455 DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE); 449 DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
456 450
@@ -602,6 +596,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
602 if (!enc10->base.features.flags.bits.HDMI_6GB_EN && 596 if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
603 adjusted_pix_clk_khz >= 300000) 597 adjusted_pix_clk_khz >= 300000)
604 return false; 598 return false;
599 if (enc10->base.ctx->dc->debug.hdmi20_disable &&
600 crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
601 return false;
605 return true; 602 return true;
606} 603}
607 604
@@ -734,6 +731,9 @@ void dcn10_link_encoder_construct(
734 __func__, 731 __func__,
735 result); 732 result);
736 } 733 }
734 if (enc10->base.ctx->dc->debug.hdmi20_disable) {
735 enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
736 }
737} 737}
738 738
739bool dcn10_link_encoder_validate_output_with_stream( 739bool dcn10_link_encoder_validate_output_with_stream(
@@ -812,7 +812,7 @@ void dcn10_link_encoder_hw_init(
812 ASSERT(result == BP_RESULT_OK); 812 ASSERT(result == BP_RESULT_OK);
813 813
814 } 814 }
815 aux_initialize(enc10); 815 dcn10_aux_initialize(enc10);
816 816
817 /* reinitialize HPD. 817 /* reinitialize HPD.
818 * hpd_initialize() will pass DIG_FE id to HW context. 818 * hpd_initialize() will pass DIG_FE id to HW context.
@@ -1349,8 +1349,7 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
1349 FN(reg, f1), v1,\ 1349 FN(reg, f1), v1,\
1350 FN(reg, f2), v2) 1350 FN(reg, f2), v2)
1351 1351
1352static void aux_initialize( 1352void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
1353 struct dcn10_link_encoder *enc10)
1354{ 1353{
1355 enum hpd_source_id hpd_source = enc10->base.hpd_source; 1354 enum hpd_source_id hpd_source = enc10->base.hpd_source;
1356 1355
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 2a97cdb2cfbb..49ead12b2532 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -42,6 +42,7 @@
42#define LE_DCN_COMMON_REG_LIST(id) \ 42#define LE_DCN_COMMON_REG_LIST(id) \
43 SRI(DIG_BE_CNTL, DIG, id), \ 43 SRI(DIG_BE_CNTL, DIG, id), \
44 SRI(DIG_BE_EN_CNTL, DIG, id), \ 44 SRI(DIG_BE_EN_CNTL, DIG, id), \
45 SRI(TMDS_CTL_BITS, DIG, id), \
45 SRI(DP_CONFIG, DP, id), \ 46 SRI(DP_CONFIG, DP, id), \
46 SRI(DP_DPHY_CNTL, DP, id), \ 47 SRI(DP_DPHY_CNTL, DP, id), \
47 SRI(DP_DPHY_PRBS_CNTL, DP, id), \ 48 SRI(DP_DPHY_PRBS_CNTL, DP, id), \
@@ -64,6 +65,7 @@
64 SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ 65 SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
65 SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) 66 SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
66 67
68
67#define LE_DCN10_REG_LIST(id)\ 69#define LE_DCN10_REG_LIST(id)\
68 LE_DCN_COMMON_REG_LIST(id) 70 LE_DCN_COMMON_REG_LIST(id)
69 71
@@ -100,6 +102,7 @@ struct dcn10_link_enc_registers {
100 uint32_t DP_DPHY_BS_SR_SWAP_CNTL; 102 uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
101 uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; 103 uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
102 uint32_t DP_SEC_CNTL1; 104 uint32_t DP_SEC_CNTL1;
105 uint32_t TMDS_CTL_BITS;
103}; 106};
104 107
105#define LE_SF(reg_name, field_name, post_fix)\ 108#define LE_SF(reg_name, field_name, post_fix)\
@@ -110,6 +113,7 @@ struct dcn10_link_enc_registers {
110 LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\ 113 LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
111 LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\ 114 LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
112 LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\ 115 LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
116 LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
113 LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\ 117 LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
114 LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\ 118 LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
115 LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\ 119 LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
@@ -198,10 +202,11 @@ struct dcn10_link_enc_registers {
198 type DP_MSE_SAT_SLOT_COUNT3;\ 202 type DP_MSE_SAT_SLOT_COUNT3;\
199 type DP_MSE_SAT_UPDATE;\ 203 type DP_MSE_SAT_UPDATE;\
200 type DP_MSE_16_MTP_KEEPOUT;\ 204 type DP_MSE_16_MTP_KEEPOUT;\
205 type DC_HPD_EN;\
206 type TMDS_CTL0;\
201 type AUX_HPD_SEL;\ 207 type AUX_HPD_SEL;\
202 type AUX_LS_READ_EN;\ 208 type AUX_LS_READ_EN;\
203 type AUX_RX_RECEIVE_WINDOW;\ 209 type AUX_RX_RECEIVE_WINDOW
204 type DC_HPD_EN
205 210
206struct dcn10_link_enc_shift { 211struct dcn10_link_enc_shift {
207 DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t); 212 DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
@@ -266,6 +271,10 @@ void dcn10_link_encoder_setup(
266 struct link_encoder *enc, 271 struct link_encoder *enc,
267 enum signal_type signal); 272 enum signal_type signal);
268 273
274void configure_encoder(
275 struct dcn10_link_encoder *enc10,
276 const struct dc_link_settings *link_settings);
277
269/* enables TMDS PHY output */ 278/* enables TMDS PHY output */
270/* TODO: still need depth or just pass in adjusted pixel clock? */ 279/* TODO: still need depth or just pass in adjusted pixel clock? */
271void dcn10_link_encoder_enable_tmds_output( 280void dcn10_link_encoder_enable_tmds_output(
@@ -327,4 +336,6 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
327 336
328bool dcn10_is_dig_enabled(struct link_encoder *enc); 337bool dcn10_is_dig_enabled(struct link_encoder *enc);
329 338
339void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
340
330#endif /* __DC_LINK_ENCODER__DCN10_H__ */ 341#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 9ca51ae46de7..958994edf2c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -428,7 +428,7 @@ void mpc1_read_mpcc_state(
428 MPCC_BUSY, &s->busy); 428 MPCC_BUSY, &s->busy);
429} 429}
430 430
431const struct mpc_funcs dcn10_mpc_funcs = { 431static const struct mpc_funcs dcn10_mpc_funcs = {
432 .read_mpcc_state = mpc1_read_mpcc_state, 432 .read_mpcc_state = mpc1_read_mpcc_state,
433 .insert_plane = mpc1_insert_plane, 433 .insert_plane = mpc1_insert_plane,
434 .remove_mpcc = mpc1_remove_mpcc, 434 .remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 77a1a9d541a4..ab958cff3b76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -385,7 +385,7 @@ void opp1_destroy(struct output_pixel_processor **opp)
385 *opp = NULL; 385 *opp = NULL;
386} 386}
387 387
388static struct opp_funcs dcn10_opp_funcs = { 388static const struct opp_funcs dcn10_opp_funcs = {
389 .opp_set_dyn_expansion = opp1_set_dyn_expansion, 389 .opp_set_dyn_expansion = opp1_set_dyn_expansion,
390 .opp_program_fmt = opp1_program_fmt, 390 .opp_program_fmt = opp1_program_fmt,
391 .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, 391 .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e6a3ade154b9..411f89218e01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1324,6 +1324,72 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
1324 return (underflow_occurred == 1); 1324 return (underflow_occurred == 1);
1325} 1325}
1326 1326
1327bool optc1_configure_crc(struct timing_generator *optc,
1328 const struct crc_params *params)
1329{
1330 struct optc *optc1 = DCN10TG_FROM_TG(optc);
1331
1332 /* Cannot configure crc on a CRTC that is disabled */
1333 if (!optc1_is_tg_enabled(optc))
1334 return false;
1335
1336 REG_WRITE(OTG_CRC_CNTL, 0);
1337
1338 if (!params->enable)
1339 return true;
1340
1341 /* Program frame boundaries */
1342 /* Window A x axis start and end. */
1343 REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
1344 OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
1345 OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
1346
1347 /* Window A y axis start and end. */
1348 REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
1349 OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
1350 OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
1351
1352 /* Window B x axis start and end. */
1353 REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
1354 OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
1355 OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
1356
1357 /* Window B y axis start and end. */
1358 REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
1359 OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
1360 OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
1361
1362 /* Set crc mode and selection, and enable. Only using CRC0*/
1363 REG_UPDATE_3(OTG_CRC_CNTL,
1364 OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
1365 OTG_CRC0_SELECT, params->selection,
1366 OTG_CRC_EN, 1);
1367
1368 return true;
1369}
1370
1371bool optc1_get_crc(struct timing_generator *optc,
1372 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
1373{
1374 uint32_t field = 0;
1375 struct optc *optc1 = DCN10TG_FROM_TG(optc);
1376
1377 REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field);
1378
1379 /* Early return if CRC is not enabled for this CRTC */
1380 if (!field)
1381 return false;
1382
1383 REG_GET_2(OTG_CRC0_DATA_RG,
1384 CRC0_R_CR, r_cr,
1385 CRC0_G_Y, g_y);
1386
1387 REG_GET(OTG_CRC0_DATA_B,
1388 CRC0_B_CB, b_cb);
1389
1390 return true;
1391}
1392
1327static const struct timing_generator_funcs dcn10_tg_funcs = { 1393static const struct timing_generator_funcs dcn10_tg_funcs = {
1328 .validate_timing = optc1_validate_timing, 1394 .validate_timing = optc1_validate_timing,
1329 .program_timing = optc1_program_timing, 1395 .program_timing = optc1_program_timing,
@@ -1360,6 +1426,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
1360 .is_tg_enabled = optc1_is_tg_enabled, 1426 .is_tg_enabled = optc1_is_tg_enabled,
1361 .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, 1427 .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
1362 .clear_optc_underflow = optc1_clear_optc_underflow, 1428 .clear_optc_underflow = optc1_clear_optc_underflow,
1429 .get_crc = optc1_get_crc,
1430 .configure_crc = optc1_configure_crc,
1363}; 1431};
1364 1432
1365void dcn10_timing_generator_init(struct optc *optc1) 1433void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 59ed272e0c49..c1b114209fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -75,7 +75,14 @@
75 SRI(CONTROL, VTG, inst),\ 75 SRI(CONTROL, VTG, inst),\
76 SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ 76 SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
77 SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\ 77 SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
78 SRI(OTG_GSL_CONTROL, OTG, inst) 78 SRI(OTG_GSL_CONTROL, OTG, inst),\
79 SRI(OTG_CRC_CNTL, OTG, inst),\
80 SRI(OTG_CRC0_DATA_RG, OTG, inst),\
81 SRI(OTG_CRC0_DATA_B, OTG, inst),\
82 SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
83 SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
84 SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
85 SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
79 86
80#define TG_COMMON_REG_LIST_DCN1_0(inst) \ 87#define TG_COMMON_REG_LIST_DCN1_0(inst) \
81 TG_COMMON_REG_LIST_DCN(inst),\ 88 TG_COMMON_REG_LIST_DCN(inst),\
@@ -138,6 +145,13 @@ struct dcn_optc_registers {
138 uint32_t OTG_GSL_WINDOW_X; 145 uint32_t OTG_GSL_WINDOW_X;
139 uint32_t OTG_GSL_WINDOW_Y; 146 uint32_t OTG_GSL_WINDOW_Y;
140 uint32_t OTG_VUPDATE_KEEPOUT; 147 uint32_t OTG_VUPDATE_KEEPOUT;
148 uint32_t OTG_CRC_CNTL;
149 uint32_t OTG_CRC0_DATA_RG;
150 uint32_t OTG_CRC0_DATA_B;
151 uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
152 uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
153 uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
154 uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
141}; 155};
142 156
143#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ 157#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -232,7 +246,21 @@ struct dcn_optc_registers {
232 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ 246 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
233 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ 247 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
234 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ 248 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
235 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh) 249 SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
250 SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
251 SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
252 SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
253 SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
254 SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
255 SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
256 SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
257 SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
258 SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
259 SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
260 SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
261 SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
262 SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
263 SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
236 264
237 265
238#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ 266#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -363,7 +391,22 @@ struct dcn_optc_registers {
363 type OTG_MASTER_UPDATE_LOCK_GSL_EN;\ 391 type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
364 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\ 392 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
365 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\ 393 type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
366 type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN; 394 type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
395 type OTG_CRC_CONT_EN;\
396 type OTG_CRC0_SELECT;\
397 type OTG_CRC_EN;\
398 type CRC0_R_CR;\
399 type CRC0_G_Y;\
400 type CRC0_B_CB;\
401 type OTG_CRC0_WINDOWA_X_START;\
402 type OTG_CRC0_WINDOWA_X_END;\
403 type OTG_CRC0_WINDOWA_Y_START;\
404 type OTG_CRC0_WINDOWA_Y_END;\
405 type OTG_CRC0_WINDOWB_X_START;\
406 type OTG_CRC0_WINDOWB_X_END;\
407 type OTG_CRC0_WINDOWB_Y_START;\
408 type OTG_CRC0_WINDOWB_Y_END;
409
367 410
368#define TG_REG_FIELD_LIST(type) \ 411#define TG_REG_FIELD_LIST(type) \
369 TG_REG_FIELD_LIST_DCN1_0(type) 412 TG_REG_FIELD_LIST_DCN1_0(type)
@@ -511,4 +554,15 @@ bool optc1_get_otg_active_size(struct timing_generator *optc,
511 uint32_t *otg_active_width, 554 uint32_t *otg_active_width,
512 uint32_t *otg_active_height); 555 uint32_t *otg_active_height);
513 556
557void optc1_enable_crtc_reset(
558 struct timing_generator *optc,
559 int source_tg_inst,
560 struct crtc_trigger_info *crtc_tp);
561
562bool optc1_configure_crc(struct timing_generator *optc,
563 const struct crc_params *params);
564
565bool optc1_get_crc(struct timing_generator *optc,
566 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
567
514#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ 568#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 771e0cf29bba..84581b3c392b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1035,11 +1035,11 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
1035 return DC_OK; 1035 return DC_OK;
1036} 1036}
1037 1037
1038static struct dc_cap_funcs cap_funcs = { 1038static const struct dc_cap_funcs cap_funcs = {
1039 .get_dcc_compression_cap = dcn10_get_dcc_compression_cap 1039 .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
1040}; 1040};
1041 1041
1042static struct resource_funcs dcn10_res_pool_funcs = { 1042static const struct resource_funcs dcn10_res_pool_funcs = {
1043 .destroy = dcn10_destroy_resource_pool, 1043 .destroy = dcn10_destroy_resource_pool,
1044 .link_enc_create = dcn10_link_encoder_create, 1044 .link_enc_create = dcn10_link_encoder_create,
1045 .validate_bandwidth = dcn_validate_bandwidth, 1045 .validate_bandwidth = dcn_validate_bandwidth,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 034369fbb9e2..5d4527d03045 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -40,6 +40,14 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
40 const struct dc_edid *edid, 40 const struct dc_edid *edid,
41 struct dc_edid_caps *edid_caps); 41 struct dc_edid_caps *edid_caps);
42 42
43
44/*
45 * Update DP branch info
46 */
47void dm_helpers_dp_update_branch_info(
48 struct dc_context *ctx,
49 const struct dc_link *link);
50
43/* 51/*
44 * Writes payload allocation table in immediate downstream device. 52 * Writes payload allocation table in immediate downstream device.
45 */ 53 */
@@ -103,6 +111,9 @@ bool dm_helpers_submit_i2c(
103 const struct dc_link *link, 111 const struct dc_link *link,
104 struct i2c_command *cmd); 112 struct i2c_command *cmd);
105 113
114bool dm_helpers_is_dp_sink_present(
115 struct dc_link *link);
116
106enum dc_edid_status dm_helpers_read_local_edid( 117enum dc_edid_status dm_helpers_read_local_edid(
107 struct dc_context *ctx, 118 struct dc_context *ctx,
108 struct dc_link *link, 119 struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 6943801c5fd3..cbafce649e33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -111,6 +111,8 @@ struct _vcs_dpi_soc_bounding_box_st {
111 double xfc_bus_transport_time_us; 111 double xfc_bus_transport_time_us;
112 double xfc_xbuf_latency_tolerance_us; 112 double xfc_xbuf_latency_tolerance_us;
113 int use_urgent_burst_bw; 113 int use_urgent_burst_bw;
114 double max_hscl_ratio;
115 double max_vscl_ratio;
114 struct _vcs_dpi_voltage_scaling_st clock_limits[7]; 116 struct _vcs_dpi_voltage_scaling_st clock_limits[7];
115}; 117};
116 118
@@ -303,6 +305,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
303 unsigned char otg_inst; 305 unsigned char otg_inst;
304 unsigned char odm_split_cnt; 306 unsigned char odm_split_cnt;
305 unsigned char odm_combine; 307 unsigned char odm_combine;
308 unsigned char use_maximum_vstartup;
306}; 309};
307 310
308struct _vcs_dpi_display_pipe_params_st { 311struct _vcs_dpi_display_pipe_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index 562ee189d780..b9d9930a4974 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -61,7 +61,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
61############################################################################### 61###############################################################################
62# DCN 1x 62# DCN 1x
63############################################################################### 63###############################################################################
64ifdef CONFIG_DRM_AMD_DC_DCN1_0 64ifdef CONFIG_X86
65GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o 65GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
66 66
67AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10)) 67AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 9c4a56c738c0..bf40725f982f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -82,13 +82,16 @@
82 DDC_GPIO_I2C_REG_LIST(cd),\ 82 DDC_GPIO_I2C_REG_LIST(cd),\
83 .ddc_setup = 0 83 .ddc_setup = 0
84 84
85#define DDC_MASK_SH_LIST(mask_sh) \ 85#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
86 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\ 86 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
87 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\ 87 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
88 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\ 88 SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
89 SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\ 89 SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
90 SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\ 90 SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
91 SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\ 91 SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh)
92
93#define DDC_MASK_SH_LIST(mask_sh) \
94 DDC_MASK_SH_LIST_COMMON(mask_sh),\
92 SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\ 95 SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
93 SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh) 96 SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
94 97
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index ab5483c0c502..f20161c5706d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -375,6 +375,7 @@ struct gpio *dal_gpio_create_irq(
375 case GPIO_ID_GPIO_PAD: 375 case GPIO_ID_GPIO_PAD:
376 break; 376 break;
377 default: 377 default:
378 id = GPIO_ID_HPD;
378 ASSERT_CRITICAL(false); 379 ASSERT_CRITICAL(false);
379 return NULL; 380 return NULL;
380 } 381 }
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 0caee3523017..83df779984e5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -43,7 +43,7 @@
43#include "dce80/hw_factory_dce80.h" 43#include "dce80/hw_factory_dce80.h"
44#include "dce110/hw_factory_dce110.h" 44#include "dce110/hw_factory_dce110.h"
45#include "dce120/hw_factory_dce120.h" 45#include "dce120/hw_factory_dce120.h"
46#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 46#ifdef CONFIG_X86
47#include "dcn10/hw_factory_dcn10.h" 47#include "dcn10/hw_factory_dcn10.h"
48#endif 48#endif
49 49
@@ -81,7 +81,7 @@ bool dal_hw_factory_init(
81 case DCE_VERSION_12_0: 81 case DCE_VERSION_12_0:
82 dal_hw_factory_dce120_init(factory); 82 dal_hw_factory_dce120_init(factory);
83 return true; 83 return true;
84#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 84#ifdef CONFIG_X86
85 case DCN_VERSION_1_0: 85 case DCN_VERSION_1_0:
86 dal_hw_factory_dcn10_init(factory); 86 dal_hw_factory_dcn10_init(factory);
87 return true; 87 return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 55c707488541..e7541310480b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -43,7 +43,7 @@
43#include "dce80/hw_translate_dce80.h" 43#include "dce80/hw_translate_dce80.h"
44#include "dce110/hw_translate_dce110.h" 44#include "dce110/hw_translate_dce110.h"
45#include "dce120/hw_translate_dce120.h" 45#include "dce120/hw_translate_dce120.h"
46#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 46#ifdef CONFIG_X86
47#include "dcn10/hw_translate_dcn10.h" 47#include "dcn10/hw_translate_dcn10.h"
48#endif 48#endif
49 49
@@ -78,7 +78,7 @@ bool dal_hw_translate_init(
78 case DCE_VERSION_12_0: 78 case DCE_VERSION_12_0:
79 dal_hw_translate_dce120_init(translate); 79 dal_hw_translate_dce120_init(translate);
80 return true; 80 return true;
81#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 81#ifdef CONFIG_X86
82 case DCN_VERSION_1_0: 82 case DCN_VERSION_1_0:
83 dal_hw_translate_dcn10_init(translate); 83 dal_hw_translate_dcn10_init(translate);
84 return true; 84 return true;
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
index 352885cb4d07..a851d07f0190 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -71,7 +71,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
71############################################################################### 71###############################################################################
72# DCN 1.0 family 72# DCN 1.0 family
73############################################################################### 73###############################################################################
74ifdef CONFIG_DRM_AMD_DC_DCN1_0 74ifdef CONFIG_X86
75I2CAUX_DCN1 = i2caux_dcn10.o 75I2CAUX_DCN1 = i2caux_dcn10.o
76 76
77AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1)) 77AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
index 1d7309611978..0afd2fa57bbe 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -128,8 +128,20 @@ static void process_read_reply(
128 ctx->status = 128 ctx->status =
129 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; 129 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
130 ctx->operation_succeeded = false; 130 ctx->operation_succeeded = false;
131 } else if (ctx->returned_byte < ctx->current_read_length) {
132 ctx->current_read_length -= ctx->returned_byte;
133
134 ctx->offset += ctx->returned_byte;
135
136 ++ctx->invalid_reply_retry_aux_on_ack;
137
138 if (ctx->invalid_reply_retry_aux_on_ack >
139 AUX_INVALID_REPLY_RETRY_COUNTER) {
140 ctx->status =
141 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
142 ctx->operation_succeeded = false;
143 }
131 } else { 144 } else {
132 ctx->current_read_length = ctx->returned_byte;
133 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED; 145 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
134 ctx->transaction_complete = true; 146 ctx->transaction_complete = true;
135 ctx->operation_succeeded = true; 147 ctx->operation_succeeded = true;
@@ -290,7 +302,6 @@ static bool read_command(
290 ctx.operation_succeeded); 302 ctx.operation_succeeded);
291 } 303 }
292 304
293 request->payload.length = ctx.reply.length;
294 return ctx.operation_succeeded; 305 return ctx.operation_succeeded;
295} 306}
296 307
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
index b01488f710d5..c33a2898d967 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -44,6 +44,12 @@ struct aux_engine_funcs {
44 void (*process_channel_reply)( 44 void (*process_channel_reply)(
45 struct aux_engine *engine, 45 struct aux_engine *engine,
46 struct aux_reply_transaction_data *reply); 46 struct aux_reply_transaction_data *reply);
47 int (*read_channel_reply)(
48 struct aux_engine *engine,
49 uint32_t size,
50 uint8_t *buffer,
51 uint8_t *reply_result,
52 uint32_t *sw_status);
47 enum aux_channel_operation_result (*get_channel_status)( 53 enum aux_channel_operation_result (*get_channel_status)(
48 struct aux_engine *engine, 54 struct aux_engine *engine,
49 uint8_t *returned_bytes); 55 uint8_t *returned_bytes);
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
index e8d3781deaed..8b704ab0471c 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -97,6 +97,7 @@ struct i2caux *dal_i2caux_dce100_create(
97 97
98 dal_i2caux_dce110_construct(i2caux_dce110, 98 dal_i2caux_dce110_construct(i2caux_dce110,
99 ctx, 99 ctx,
100 ARRAY_SIZE(dce100_aux_regs),
100 dce100_aux_regs, 101 dce100_aux_regs,
101 dce100_hw_engine_regs, 102 dce100_hw_engine_regs,
102 &i2c_shift, 103 &i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
index 2b927f25937b..ae5caa97caca 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -275,61 +275,92 @@ static void submit_channel_request(
275 REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); 275 REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
276} 276}
277 277
278static void process_channel_reply( 278static int read_channel_reply(struct aux_engine *engine, uint32_t size,
279 struct aux_engine *engine, 279 uint8_t *buffer, uint8_t *reply_result,
280 struct aux_reply_transaction_data *reply) 280 uint32_t *sw_status)
281{ 281{
282 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); 282 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
283 uint32_t bytes_replied;
284 uint32_t reply_result_32;
283 285
284 /* Need to do a read to get the number of bytes to process 286 *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
285 * Alternatively, this information can be passed - 287 &bytes_replied);
286 * but that causes coupling which isn't good either. */
287 288
288 uint32_t bytes_replied; 289 /* In case HPD is LOW, exit AUX transaction */
289 uint32_t value; 290 if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
291 return -1;
290 292
291 value = REG_GET(AUX_SW_STATUS, 293 /* Need at least the status byte */
292 AUX_SW_REPLY_BYTE_COUNT, &bytes_replied); 294 if (!bytes_replied)
295 return -1;
293 296
294 /* in case HPD is LOW, exit AUX transaction */ 297 REG_UPDATE_1BY1_3(AUX_SW_DATA,
295 if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { 298 AUX_SW_INDEX, 0,
296 reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON; 299 AUX_SW_AUTOINCREMENT_DISABLE, 1,
297 return; 300 AUX_SW_DATA_RW, 1);
298 }
299 301
300 if (bytes_replied) { 302 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
301 uint32_t reply_result; 303 reply_result_32 = reply_result_32 >> 4;
304 *reply_result = (uint8_t)reply_result_32;
302 305
303 REG_UPDATE_1BY1_3(AUX_SW_DATA, 306 if (reply_result_32 == 0) { /* ACK */
304 AUX_SW_INDEX, 0, 307 uint32_t i = 0;
305 AUX_SW_AUTOINCREMENT_DISABLE, 1,
306 AUX_SW_DATA_RW, 1);
307 308
308 REG_GET(AUX_SW_DATA, 309 /* First byte was already used to get the command status */
309 AUX_SW_DATA, &reply_result); 310 --bytes_replied;
310 311
311 reply_result = reply_result >> 4; 312 /* Do not overflow buffer */
313 if (bytes_replied > size)
314 return -1;
312 315
313 switch (reply_result) { 316 while (i < bytes_replied) {
314 case 0: /* ACK */ { 317 uint32_t aux_sw_data_val;
315 uint32_t i = 0;
316 318
317 /* first byte was already used 319 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
318 * to get the command status */ 320 buffer[i] = aux_sw_data_val;
319 --bytes_replied; 321 ++i;
322 }
323
324 return i;
325 }
326
327 return 0;
328}
329
330static void process_channel_reply(
331 struct aux_engine *engine,
332 struct aux_reply_transaction_data *reply)
333{
334 int bytes_replied;
335 uint8_t reply_result;
336 uint32_t sw_status;
320 337
321 while (i < bytes_replied) { 338 bytes_replied = read_channel_reply(engine, reply->length, reply->data,
322 uint32_t aux_sw_data_val; 339 &reply_result, &sw_status);
323 340
324 REG_GET(AUX_SW_DATA, 341 /* in case HPD is LOW, exit AUX transaction */
325 AUX_SW_DATA, &aux_sw_data_val); 342 if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
343 reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
344 return;
345 }
326 346
327 reply->data[i] = aux_sw_data_val; 347 if (bytes_replied < 0) {
328 ++i; 348 /* Need to handle an error case...
329 } 349 * Hopefully, upper layer function won't call this function if
350 * the number of bytes in the reply was 0, because there was
351 * surely an error that was asserted that should have been
352 * handled for hot plug case, this could happens
353 */
354 if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
355 reply->status = AUX_TRANSACTION_REPLY_INVALID;
356 ASSERT_CRITICAL(false);
357 return;
358 }
359 } else {
330 360
361 switch (reply_result) {
362 case 0: /* ACK */
331 reply->status = AUX_TRANSACTION_REPLY_AUX_ACK; 363 reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
332 }
333 break; 364 break;
334 case 1: /* NACK */ 365 case 1: /* NACK */
335 reply->status = AUX_TRANSACTION_REPLY_AUX_NACK; 366 reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
@@ -346,17 +377,6 @@ static void process_channel_reply(
346 default: 377 default:
347 reply->status = AUX_TRANSACTION_REPLY_INVALID; 378 reply->status = AUX_TRANSACTION_REPLY_INVALID;
348 } 379 }
349 } else {
350 /* Need to handle an error case...
351 * hopefully, upper layer function won't call this function
352 * if the number of bytes in the reply was 0
353 * because there was surely an error that was asserted
354 * that should have been handled
355 * for hot plug case, this could happens*/
356 if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
357 reply->status = AUX_TRANSACTION_REPLY_INVALID;
358 ASSERT_CRITICAL(false);
359 }
360 } 380 }
361} 381}
362 382
@@ -427,6 +447,7 @@ static const struct aux_engine_funcs aux_engine_funcs = {
427 .acquire_engine = acquire_engine, 447 .acquire_engine = acquire_engine,
428 .submit_channel_request = submit_channel_request, 448 .submit_channel_request = submit_channel_request,
429 .process_channel_reply = process_channel_reply, 449 .process_channel_reply = process_channel_reply,
450 .read_channel_reply = read_channel_reply,
430 .get_channel_status = get_channel_status, 451 .get_channel_status = get_channel_status,
431 .is_engine_available = is_engine_available, 452 .is_engine_available = is_engine_available,
432}; 453};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index b7256f595052..9cbe1a7a6bcb 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -62,12 +62,7 @@ enum dc_i2c_arbitration {
62 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH 62 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
63}; 63};
64 64
65enum { 65
66 /* No timeout in HW
67 * (timeout implemented in SW by querying status) */
68 I2C_SETUP_TIME_LIMIT = 255,
69 I2C_HW_BUFFER_SIZE = 538
70};
71 66
72/* 67/*
73 * @brief 68 * @brief
@@ -152,6 +147,11 @@ static bool setup_engine(
152 struct i2c_engine *i2c_engine) 147 struct i2c_engine *i2c_engine)
153{ 148{
154 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine); 149 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
150 uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
151 uint32_t reset_length = 0;
152
153 if (hw_engine->base.base.setup_limit != 0)
154 i2c_setup_limit = hw_engine->base.base.setup_limit;
155 155
156 /* Program pin select */ 156 /* Program pin select */
157 REG_UPDATE_6( 157 REG_UPDATE_6(
@@ -164,11 +164,15 @@ static bool setup_engine(
164 DC_I2C_DDC_SELECT, hw_engine->engine_id); 164 DC_I2C_DDC_SELECT, hw_engine->engine_id);
165 165
166 /* Program time limit */ 166 /* Program time limit */
167 REG_UPDATE_N( 167 if (hw_engine->base.base.send_reset_length == 0) {
168 SETUP, 2, 168 /*pre-dcn*/
169 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT, 169 REG_UPDATE_N(
170 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); 170 SETUP, 2,
171 171 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
172 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
173 } else {
174 reset_length = hw_engine->base.base.send_reset_length;
175 }
172 /* Program HW priority 176 /* Program HW priority
173 * set to High - interrupt software I2C at any time 177 * set to High - interrupt software I2C at any time
174 * Enable restart of SW I2C that was interrupted by HW 178 * Enable restart of SW I2C that was interrupted by HW
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
index 5bb04085f670..fea2946906ed 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -192,6 +192,7 @@ struct i2c_hw_engine_dce110 {
192 /* number of pending transactions (before GO) */ 192 /* number of pending transactions (before GO) */
193 uint32_t transaction_count; 193 uint32_t transaction_count;
194 uint32_t engine_keep_power_up_count; 194 uint32_t engine_keep_power_up_count;
195 uint32_t i2_setup_time_limit;
195}; 196};
196 197
197struct i2c_hw_engine_dce110_create_arg { 198struct i2c_hw_engine_dce110_create_arg {
@@ -207,4 +208,11 @@ struct i2c_hw_engine_dce110_create_arg {
207struct i2c_engine *dal_i2c_hw_engine_dce110_create( 208struct i2c_engine *dal_i2c_hw_engine_dce110_create(
208 const struct i2c_hw_engine_dce110_create_arg *arg); 209 const struct i2c_hw_engine_dce110_create_arg *arg);
209 210
211enum {
212 I2C_SETUP_TIME_LIMIT_DCE = 255,
213 I2C_SETUP_TIME_LIMIT_DCN = 3,
214 I2C_HW_BUFFER_SIZE = 538,
215 I2C_SEND_RESET_LENGTH_9 = 9,
216 I2C_SEND_RESET_LENGTH_10 = 10,
217};
210#endif 218#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
index 2a047f8ca0e9..1d748ac1d6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -43,6 +43,9 @@
43#include "i2c_sw_engine_dce110.h" 43#include "i2c_sw_engine_dce110.h"
44#include "i2c_hw_engine_dce110.h" 44#include "i2c_hw_engine_dce110.h"
45#include "aux_engine_dce110.h" 45#include "aux_engine_dce110.h"
46#include "../../dc.h"
47#include "dc_types.h"
48
46 49
47/* 50/*
48 * Post-requisites: headers required by this unit 51 * Post-requisites: headers required by this unit
@@ -199,6 +202,7 @@ static const struct dce110_i2c_hw_engine_mask i2c_mask = {
199void dal_i2caux_dce110_construct( 202void dal_i2caux_dce110_construct(
200 struct i2caux_dce110 *i2caux_dce110, 203 struct i2caux_dce110 *i2caux_dce110,
201 struct dc_context *ctx, 204 struct dc_context *ctx,
205 unsigned int num_i2caux_inst,
202 const struct dce110_aux_registers aux_regs[], 206 const struct dce110_aux_registers aux_regs[],
203 const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[], 207 const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
204 const struct dce110_i2c_hw_engine_shift *i2c_shift, 208 const struct dce110_i2c_hw_engine_shift *i2c_shift,
@@ -249,9 +253,22 @@ void dal_i2caux_dce110_construct(
249 253
250 base->i2c_hw_engines[line_id] = 254 base->i2c_hw_engines[line_id] =
251 dal_i2c_hw_engine_dce110_create(&hw_arg_dce110); 255 dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
252 256 if (base->i2c_hw_engines[line_id] != NULL) {
257 switch (ctx->dce_version) {
258 case DCN_VERSION_1_0:
259 base->i2c_hw_engines[line_id]->setup_limit =
260 I2C_SETUP_TIME_LIMIT_DCN;
261 base->i2c_hw_engines[line_id]->send_reset_length = 0;
262 break;
263 default:
264 base->i2c_hw_engines[line_id]->setup_limit =
265 I2C_SETUP_TIME_LIMIT_DCE;
266 base->i2c_hw_engines[line_id]->send_reset_length = 0;
267 break;
268 }
269 }
253 ++i; 270 ++i;
254 } while (i < ARRAY_SIZE(hw_ddc_lines)); 271 } while (i < num_i2caux_inst);
255 272
256 /* Create AUX engines for all lines which has assisted HW AUX 273 /* Create AUX engines for all lines which has assisted HW AUX
257 * 'i' (loop counter) used as DDC/AUX engine_id */ 274 * 'i' (loop counter) used as DDC/AUX engine_id */
@@ -272,7 +289,7 @@ void dal_i2caux_dce110_construct(
272 dal_aux_engine_dce110_create(&aux_init_data); 289 dal_aux_engine_dce110_create(&aux_init_data);
273 290
274 ++i; 291 ++i;
275 } while (i < ARRAY_SIZE(hw_aux_lines)); 292 } while (i < num_i2caux_inst);
276 293
277 /*TODO Generic I2C SW and HW*/ 294 /*TODO Generic I2C SW and HW*/
278} 295}
@@ -303,6 +320,7 @@ struct i2caux *dal_i2caux_dce110_create(
303 320
304 dal_i2caux_dce110_construct(i2caux_dce110, 321 dal_i2caux_dce110_construct(i2caux_dce110,
305 ctx, 322 ctx,
323 ARRAY_SIZE(dce110_aux_regs),
306 dce110_aux_regs, 324 dce110_aux_regs,
307 i2c_hw_engine_regs, 325 i2c_hw_engine_regs,
308 &i2c_shift, 326 &i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
index 1b1f71c60ac9..d3d8cc58666a 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -45,6 +45,7 @@ struct i2caux *dal_i2caux_dce110_create(
45void dal_i2caux_dce110_construct( 45void dal_i2caux_dce110_construct(
46 struct i2caux_dce110 *i2caux_dce110, 46 struct i2caux_dce110 *i2caux_dce110,
47 struct dc_context *ctx, 47 struct dc_context *ctx,
48 unsigned int num_i2caux_inst,
48 const struct dce110_aux_registers *aux_regs, 49 const struct dce110_aux_registers *aux_regs,
49 const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs, 50 const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
50 const struct dce110_i2c_hw_engine_shift *i2c_shift, 51 const struct dce110_i2c_hw_engine_shift *i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
index dafc1a727f7f..a9db04738724 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -93,6 +93,7 @@ static void construct(
93{ 93{
94 dal_i2caux_dce110_construct(i2caux_dce110, 94 dal_i2caux_dce110_construct(i2caux_dce110,
95 ctx, 95 ctx,
96 ARRAY_SIZE(dce112_aux_regs),
96 dce112_aux_regs, 97 dce112_aux_regs,
97 dce112_hw_engine_regs, 98 dce112_hw_engine_regs,
98 &i2c_shift, 99 &i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
index 0e7b18260027..6a4f344c1db4 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dce120_create(
111 111
112 dal_i2caux_dce110_construct(i2caux_dce110, 112 dal_i2caux_dce110_construct(i2caux_dce110,
113 ctx, 113 ctx,
114 ARRAY_SIZE(dce120_aux_regs),
114 dce120_aux_regs, 115 dce120_aux_regs,
115 dce120_hw_engine_regs, 116 dce120_hw_engine_regs,
116 &i2c_shift, 117 &i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
index e44a8901f38b..a59c1f50c1e8 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dcn10_create(
111 111
112 dal_i2caux_dce110_construct(i2caux_dce110, 112 dal_i2caux_dce110_construct(i2caux_dce110,
113 ctx, 113 ctx,
114 ARRAY_SIZE(dcn10_aux_regs),
114 dcn10_aux_regs, 115 dcn10_aux_regs,
115 dcn10_hw_engine_regs, 116 dcn10_hw_engine_regs,
116 &i2c_shift, 117 &i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
index 58fc0f25eceb..ded6ea34b714 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -86,6 +86,8 @@ struct i2c_engine {
86 struct engine base; 86 struct engine base;
87 const struct i2c_engine_funcs *funcs; 87 const struct i2c_engine_funcs *funcs;
88 uint32_t timeout_delay; 88 uint32_t timeout_delay;
89 uint32_t setup_limit;
90 uint32_t send_reset_length;
89}; 91};
90 92
91void dal_i2c_engine_construct( 93void dal_i2c_engine_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index 14dc8c94d862..f7ed355fc84f 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -59,7 +59,7 @@
59 59
60#include "dce120/i2caux_dce120.h" 60#include "dce120/i2caux_dce120.h"
61 61
62#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 62#ifdef CONFIG_X86
63#include "dcn10/i2caux_dcn10.h" 63#include "dcn10/i2caux_dcn10.h"
64#endif 64#endif
65 65
@@ -91,7 +91,7 @@ struct i2caux *dal_i2caux_create(
91 return dal_i2caux_dce100_create(ctx); 91 return dal_i2caux_dce100_create(ctx);
92 case DCE_VERSION_12_0: 92 case DCE_VERSION_12_0:
93 return dal_i2caux_dce120_create(ctx); 93 return dal_i2caux_dce120_create(ctx);
94#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 94#ifdef CONFIG_X86
95 case DCN_VERSION_1_0: 95 case DCN_VERSION_1_0:
96 return dal_i2caux_dcn10_create(ctx); 96 return dal_i2caux_dcn10_create(ctx);
97#endif 97#endif
@@ -254,7 +254,6 @@ bool dal_i2caux_submit_aux_command(
254 break; 254 break;
255 } 255 }
256 256
257 cmd->payloads->length = request.payload.length;
258 ++index_of_payload; 257 ++index_of_payload;
259 } 258 }
260 259
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 00d728e629fa..4446652a9a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,7 +33,7 @@
33#include "dc_bios_types.h" 33#include "dc_bios_types.h"
34#include "mem_input.h" 34#include "mem_input.h"
35#include "hubp.h" 35#include "hubp.h"
36#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 36#ifdef CONFIG_X86
37#include "mpc.h" 37#include "mpc.h"
38#endif 38#endif
39 39
@@ -148,7 +148,6 @@ struct resource_pool {
148 unsigned int underlay_pipe_index; 148 unsigned int underlay_pipe_index;
149 unsigned int stream_enc_count; 149 unsigned int stream_enc_count;
150 unsigned int ref_clock_inKhz; 150 unsigned int ref_clock_inKhz;
151 unsigned int dentist_vco_freq_khz;
152 unsigned int timing_generator_count; 151 unsigned int timing_generator_count;
153 152
154 /* 153 /*
@@ -222,7 +221,7 @@ struct pipe_ctx {
222 struct pipe_ctx *top_pipe; 221 struct pipe_ctx *top_pipe;
223 struct pipe_ctx *bottom_pipe; 222 struct pipe_ctx *bottom_pipe;
224 223
225#ifdef CONFIG_DRM_AMD_DC_DCN1_0 224#ifdef CONFIG_X86
226 struct _vcs_dpi_display_dlg_regs_st dlg_regs; 225 struct _vcs_dpi_display_dlg_regs_st dlg_regs;
227 struct _vcs_dpi_display_ttu_regs_st ttu_regs; 226 struct _vcs_dpi_display_ttu_regs_st ttu_regs;
228 struct _vcs_dpi_display_rq_regs_st rq_regs; 227 struct _vcs_dpi_display_rq_regs_st rq_regs;
@@ -277,7 +276,7 @@ struct dc_state {
277 276
278 /* Note: these are big structures, do *not* put on stack! */ 277 /* Note: these are big structures, do *not* put on stack! */
279 struct dm_pp_display_configuration pp_display_cfg; 278 struct dm_pp_display_configuration pp_display_cfg;
280#ifdef CONFIG_DRM_AMD_DC_DCN1_0 279#ifdef CONFIG_X86
281 struct dcn_bw_internal_vars dcn_bw_vars; 280 struct dcn_bw_internal_vars dcn_bw_vars;
282#endif 281#endif
283 282
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 30b3a08b91be..538b83303b86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,22 +102,13 @@ bool dal_ddc_service_query_ddc_data(
102 uint8_t *read_buf, 102 uint8_t *read_buf,
103 uint32_t read_size); 103 uint32_t read_size);
104 104
105enum ddc_result dal_ddc_service_read_dpcd_data( 105int dc_link_aux_transfer(struct ddc_service *ddc,
106 struct ddc_service *ddc, 106 unsigned int address,
107 bool i2c, 107 uint8_t *reply,
108 enum i2c_mot_mode mot, 108 void *buffer,
109 uint32_t address, 109 unsigned int size,
110 uint8_t *data, 110 enum aux_transaction_type type,
111 uint32_t len, 111 enum i2caux_transaction_action action);
112 uint32_t *read);
113
114enum ddc_result dal_ddc_service_write_dpcd_data(
115 struct ddc_service *ddc,
116 bool i2c,
117 enum i2c_mot_mode mot,
118 uint32_t address,
119 const uint8_t *data,
120 uint32_t len);
121 112
122void dal_ddc_service_write_scdc_data( 113void dal_ddc_service_write_scdc_data(
123 struct ddc_service *ddc_service, 114 struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 582458f028f8..74ad94b0e4f0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -151,6 +151,9 @@ struct dpp_funcs {
151 void (*dpp_set_hdr_multiplier)( 151 void (*dpp_set_hdr_multiplier)(
152 struct dpp *dpp_base, 152 struct dpp *dpp_base,
153 uint32_t multiplier); 153 uint32_t multiplier);
154 void (*set_optional_cursor_attributes)(
155 struct dpp *dpp_base,
156 struct dpp_cursor_attributes *attr);
154 157
155 void (*dpp_dppclk_control)( 158 void (*dpp_dppclk_control)(
156 struct dpp *dpp_base, 159 struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 97df82cddf82..4f3f9e68ccfa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -43,10 +43,9 @@ enum cursor_lines_per_chunk {
43}; 43};
44 44
45struct hubp { 45struct hubp {
46 struct hubp_funcs *funcs; 46 const struct hubp_funcs *funcs;
47 struct dc_context *ctx; 47 struct dc_context *ctx;
48 struct dc_plane_address request_address; 48 struct dc_plane_address request_address;
49 struct dc_plane_address current_address;
50 int inst; 49 int inst;
51 50
52 /* run time states */ 51 /* run time states */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 47f1dc5a43b7..da89c2edb07c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -64,7 +64,7 @@ struct stutter_modes {
64}; 64};
65 65
66struct mem_input { 66struct mem_input {
67 struct mem_input_funcs *funcs; 67 const struct mem_input_funcs *funcs;
68 struct dc_context *ctx; 68 struct dc_context *ctx;
69 struct dc_plane_address request_address; 69 struct dc_plane_address request_address;
70 struct dc_plane_address current_address; 70 struct dc_plane_address current_address;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 2506601120af..a14ce4de80b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -102,10 +102,18 @@ struct hw_sequencer_funcs {
102 const struct dc *dc, 102 const struct dc *dc,
103 struct pipe_ctx *pipe_ctx); 103 struct pipe_ctx *pipe_ctx);
104 104
105 void (*plane_atomic_disconnect)(
106 struct dc *dc,
107 struct pipe_ctx *pipe_ctx);
108
105 void (*update_dchub)( 109 void (*update_dchub)(
106 struct dce_hwseq *hws, 110 struct dce_hwseq *hws,
107 struct dchub_init_data *dh_data); 111 struct dchub_init_data *dh_data);
108 112
113 void (*update_mpcc)(
114 struct dc *dc,
115 struct pipe_ctx *pipe_ctx);
116
109 void (*update_pending_status)( 117 void (*update_pending_status)(
110 struct pipe_ctx *pipe_ctx); 118 struct pipe_ctx *pipe_ctx);
111 119
@@ -215,6 +223,7 @@ struct hw_sequencer_funcs {
215 223
216 void (*set_cursor_position)(struct pipe_ctx *pipe); 224 void (*set_cursor_position)(struct pipe_ctx *pipe);
217 void (*set_cursor_attribute)(struct pipe_ctx *pipe); 225 void (*set_cursor_attribute)(struct pipe_ctx *pipe);
226 void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
218 227
219}; 228};
220 229
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 3306e7b0b3e3..cf5a84b9e27c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -445,4 +445,50 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
445 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 445 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
446 uint8_t shift7, uint32_t mask7, uint32_t *field_value7, 446 uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
447 uint8_t shift8, uint32_t mask8, uint32_t *field_value8); 447 uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
448
449
450/* indirect register access */
451
452#define IX_REG_SET_N(index_reg_name, data_reg_name, index, n, initial_val, ...) \
453 generic_indirect_reg_update_ex(CTX, \
454 REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
455 initial_val, \
456 n, __VA_ARGS__)
457
458#define IX_REG_SET_2(index_reg_name, data_reg_name, index, init_value, f1, v1, f2, v2) \
459 IX_REG_SET_N(index_reg_name, data_reg_name, index, 2, init_value, \
460 FN(reg, f1), v1,\
461 FN(reg, f2), v2)
462
463
464#define IX_REG_READ(index_reg_name, data_reg_name, index) \
465 generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
466
467
468
469#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
470 generic_indirect_reg_update_ex(CTX, \
471 REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
472 IX_REG_READ(index_reg_name, data_reg_name, index), \
473 n, __VA_ARGS__)
474
475#define IX_REG_UPDATE_2(index_reg_name, data_reg_name, index, f1, v1, f2, v2) \
476 IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, 2,\
477 FN(reg, f1), v1,\
478 FN(reg, f2), v2)
479
480void generic_write_indirect_reg(const struct dc_context *ctx,
481 uint32_t addr_index, uint32_t addr_data,
482 uint32_t index, uint32_t data);
483
484uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
485 uint32_t addr_index, uint32_t addr_data,
486 uint32_t index);
487
488uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
489 uint32_t addr_index, uint32_t addr_data,
490 uint32_t index, uint32_t reg_val, int n,
491 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
492 ...);
493
448#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ 494#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 498515aad4a5..a76ee600ecee 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -60,7 +60,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
60############################################################################### 60###############################################################################
61# DCN 1x 61# DCN 1x
62############################################################################### 62###############################################################################
63ifdef CONFIG_DRM_AMD_DC_DCN1_0 63ifdef CONFIG_X86
64IRQ_DCN1 = irq_service_dcn10.o 64IRQ_DCN1 = irq_service_dcn10.o
65 65
66AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1)) 66AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 604bea01fc13..ae3fd0a235ba 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -36,7 +36,7 @@
36#include "dce120/irq_service_dce120.h" 36#include "dce120/irq_service_dce120.h"
37 37
38 38
39#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 39#ifdef CONFIG_X86
40#include "dcn10/irq_service_dcn10.h" 40#include "dcn10/irq_service_dcn10.h"
41#endif 41#endif
42 42
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a407892905af..c9fce9066ad8 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -48,7 +48,7 @@
48 48
49#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__) 49#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
50 50
51#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 51#ifdef CONFIG_X86
52#include <asm/fpu/api.h> 52#include <asm/fpu/api.h>
53#endif 53#endif
54 54
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index d8e52e3b8e3c..1c66166d0a94 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -27,6 +27,9 @@
27#define __DAL_DPCD_DEFS_H__ 27#define __DAL_DPCD_DEFS_H__
28 28
29#include <drm/drm_dp_helper.h> 29#include <drm/drm_dp_helper.h>
30#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
31#define DP_SINK_HW_REVISION_START 0x409
32#endif
30 33
31enum dpcd_revision { 34enum dpcd_revision {
32 DPCD_REV_10 = 0x10, 35 DPCD_REV_10 = 0x10,
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 0f10ed710e0d..e3c79616682d 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,49 +40,7 @@ struct dc_state;
40 * 40 *
41 */ 41 */
42 42
43struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask); 43void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
44
45uint32_t dal_logger_destroy(struct dal_logger **logger);
46
47void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
48
49void dm_logger_write(
50 struct dal_logger *logger,
51 enum dc_log_type log_type,
52 const char *msg,
53 ...);
54
55void dm_logger_append(
56 struct log_entry *entry,
57 const char *msg,
58 ...);
59
60void dm_logger_append_va(
61 struct log_entry *entry,
62 const char *msg,
63 va_list args);
64
65void dm_logger_append_heading(struct log_entry *entry);
66
67void dm_logger_open(
68 struct dal_logger *logger,
69 struct log_entry *entry,
70 enum dc_log_type log_type);
71
72void dm_logger_close(struct log_entry *entry);
73
74void dc_conn_log(struct dc_context *ctx,
75 const struct dc_link *link,
76 uint8_t *hex_data,
77 int hex_data_count,
78 enum dc_log_type event,
79 const char *msg,
80 ...);
81
82void logger_write(struct dal_logger *logger,
83 enum dc_log_type log_type,
84 const char *msg,
85 void *paralist);
86 44
87void pre_surface_trace( 45void pre_surface_trace(
88 struct dc *dc, 46 struct dc *dc,
@@ -108,28 +66,31 @@ void context_clock_trace(
108 * marked by this macro. 66 * marked by this macro.
109 * Note that the message will be printed exactly once for every function 67 * Note that the message will be printed exactly once for every function
110 * it is used in order to avoid repeating of the same message. */ 68 * it is used in order to avoid repeating of the same message. */
69
111#define DAL_LOGGER_NOT_IMPL(fmt, ...) \ 70#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
112{ \ 71 do { \
113 static bool print_not_impl = true; \ 72 static bool print_not_impl = true; \
114\ 73 if (print_not_impl == true) { \
115 if (print_not_impl == true) { \ 74 print_not_impl = false; \
116 print_not_impl = false; \ 75 DRM_WARN("DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
117 dm_logger_write(ctx->logger, LOG_WARNING, \ 76 } \
118 "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \ 77 } while (0)
119 } \
120}
121 78
122/****************************************************************************** 79/******************************************************************************
123 * Convenience macros to save on typing. 80 * Convenience macros to save on typing.
124 *****************************************************************************/ 81 *****************************************************************************/
125 82
126#define DC_ERROR(...) \ 83#define DC_ERROR(...) \
127 dm_logger_write(dc_ctx->logger, LOG_ERROR, \ 84 do { \
128 __VA_ARGS__) 85 (void)(dc_ctx); \
86 DC_LOG_ERROR(__VA_ARGS__); \
87 } while (0)
129 88
130#define DC_SYNC_INFO(...) \ 89#define DC_SYNC_INFO(...) \
131 dm_logger_write(dc_ctx->logger, LOG_SYNC, \ 90 do { \
132 __VA_ARGS__) 91 (void)(dc_ctx); \
92 DC_LOG_SYNC(__VA_ARGS__); \
93 } while (0)
133 94
134/* Connectivity log format: 95/* Connectivity log format:
135 * [time stamp] [drm] [Major_minor] [connector name] message..... 96 * [time stamp] [drm] [Major_minor] [connector name] message.....
@@ -139,20 +100,30 @@ void context_clock_trace(
139 */ 100 */
140 101
141#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \ 102#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
142 dc_conn_log(link->ctx, link, hex_data, hex_len, \ 103 do { \
143 LOG_EVENT_DETECTION, ##__VA_ARGS__) 104 (void)(link); \
105 dc_conn_log_hex_linux(hex_data, hex_len); \
106 DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
107 } while (0)
144 108
145#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \ 109#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
146 dc_conn_log(link->ctx, link, hex_data, hex_len, \ 110 do { \
147 LOG_EVENT_LINK_LOSS, ##__VA_ARGS__) 111 (void)(link); \
112 dc_conn_log_hex_linux(hex_data, hex_len); \
113 DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
114 } while (0)
148 115
149#define CONN_MSG_LT(link, ...) \ 116#define CONN_MSG_LT(link, ...) \
150 dc_conn_log(link->ctx, link, NULL, 0, \ 117 do { \
151 LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__) 118 (void)(link); \
119 DC_LOG_EVENT_LINK_TRAINING(__VA_ARGS__); \
120 } while (0)
152 121
153#define CONN_MSG_MODE(link, ...) \ 122#define CONN_MSG_MODE(link, ...) \
154 dc_conn_log(link->ctx, link, NULL, 0, \ 123 do { \
155 LOG_EVENT_MODE_SET, ##__VA_ARGS__) 124 (void)(link); \
125 DC_LOG_EVENT_MODE_SET(__VA_ARGS__); \
126 } while (0)
156 127
157/* 128/*
158 * Display Test Next logging 129 * Display Test Next logging
@@ -167,38 +138,21 @@ void context_clock_trace(
167 dm_dtn_log_end(dc_ctx) 138 dm_dtn_log_end(dc_ctx)
168 139
169#define PERFORMANCE_TRACE_START() \ 140#define PERFORMANCE_TRACE_START() \
170 unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \ 141 unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx)
171 unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \ 142
172 unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \ 143#define PERFORMANCE_TRACE_END() \
173 if (dc->debug.performance_trace) {\ 144 do { \
174 dm_logger_flush_buffer(dc->ctx->logger, false);\ 145 unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx); \
175 dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\ 146 if (dc->debug.performance_trace) { \
176 dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\ 147 DC_LOG_PERF_TRACE("%s duration: %lld ticks\n", __func__, \
177 dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
178 }
179
180#define PERFORMANCE_TRACE_END() do {\
181 unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
182 if (dc->debug.performance_trace) {\
183 dm_logger_write(dc->ctx->logger, \
184 LOG_PERF_TRACE, \
185 "%s duration: %d ticks\n", __func__,\
186 perf_trc_end_stmp - perf_trc_start_stmp); \ 148 perf_trc_end_stmp - perf_trc_start_stmp); \
187 if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
188 dc->ctx->logger->mask = perf_trc_start_log_msk;\
189 dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
190 dm_logger_flush_buffer(dc->ctx->logger, false);\
191 } \ 149 } \
192 } \ 150 } while (0)
193} while (0)
194 151
195#define DISPLAY_STATS_BEGIN(entry) \ 152#define DISPLAY_STATS_BEGIN(entry) (void)(entry)
196 dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
197 153
198#define DISPLAY_STATS(msg, ...) \ 154#define DISPLAY_STATS(msg, ...) DC_LOG_PERF_TRACE(msg, __VA_ARGS__)
199 dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
200 155
201#define DISPLAY_STATS_END(entry) \ 156#define DISPLAY_STATS_END(entry) (void)(entry)
202 dm_logger_close(&entry)
203 157
204#endif /* __DAL_LOGGER_INTERFACE_H__ */ 158#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 0a540b9897a6..ad3695e67b76 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -138,63 +138,4 @@ enum dc_log_type {
138 (1 << LOG_HW_AUDIO)| \ 138 (1 << LOG_HW_AUDIO)| \
139 (1 << LOG_BANDWIDTH_CALCS)*/ 139 (1 << LOG_BANDWIDTH_CALCS)*/
140 140
141union logger_flags {
142 struct {
143 uint32_t ENABLE_CONSOLE:1; /* Print to console */
144 uint32_t ENABLE_BUFFER:1; /* Print to buffer */
145 uint32_t RESERVED:30;
146 } bits;
147 uint32_t value;
148};
149
150struct log_entry {
151 struct dal_logger *logger;
152 enum dc_log_type type;
153
154 char *buf;
155 uint32_t buf_offset;
156 uint32_t max_buf_bytes;
157};
158
159/**
160* Structure for enumerating log types
161*/
162struct dc_log_type_info {
163 enum dc_log_type type;
164 char name[MAX_NAME_LEN];
165};
166
167/* Structure for keeping track of offsets, buffer, etc */
168
169#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
170
171/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
172 * change log line size to 896 to meet the request.
173 */
174#define LOG_MAX_LINE_SIZE 896
175
176struct dal_logger {
177
178 /* How far into the circular buffer has been read by dsat
179 * Read offset should never cross write offset. Write \0's to
180 * read data just to be sure?
181 */
182 uint32_t buffer_read_offset;
183
184 /* How far into the circular buffer we have written
185 * Write offset should never cross read offset
186 */
187 uint32_t buffer_write_offset;
188
189 uint32_t open_count;
190
191 char *log_buffer; /* Pointer to malloc'ed buffer */
192 uint32_t log_buffer_size; /* Size of circular buffer */
193
194 uint32_t mask; /*array of masks for major elements*/
195
196 union logger_flags flags;
197 struct dc_context *ctx;
198};
199
200#endif /* __DAL_LOGGER_TYPES_H__ */ 141#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/color/luts_1d.h b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
new file mode 100644
index 000000000000..66b1fad572ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#ifndef LUTS_1D_H
26#define LUTS_1D_H
27
28#include "hw_shared.h"
29
30struct point_config {
31 uint32_t custom_float_x;
32 uint32_t custom_float_y;
33 uint32_t custom_float_slope;
34};
35
36struct lut_point {
37 uint32_t red;
38 uint32_t green;
39 uint32_t blue;
40 uint32_t delta_red;
41 uint32_t delta_green;
42 uint32_t delta_blue;
43};
44
45struct pwl_1dlut_parameter {
46 struct gamma_curve arr_curve_points[34];
47 struct point_config arr_points[2];
48 struct lut_point rgb_resulted[256];
49 uint32_t hw_points_num;
50};
51#endif // LUTS_1D_H
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
new file mode 100644
index 000000000000..36306c57a2b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_GFX_9_0_H__
27#define __IRQSRCS_GFX_9_0_H__
28
29
30#define GFX_9_0__SRCID__CP_RB_INTERRUPT_PKT 176 /* B0 CP_INTERRUPT pkt in RB */
31#define GFX_9_0__SRCID__CP_IB1_INTERRUPT_PKT 177 /* B1 CP_INTERRUPT pkt in IB1 */
32#define GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT 178 /* B2 CP_INTERRUPT pkt in IB2 */
33#define GFX_9_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 /* B4 PM4 Pkt Rsvd Bits Error */
34#define GFX_9_0__SRCID__CP_EOP_INTERRUPT 181 /* B5 End-of-Pipe Interrupt */
35#define GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR 183 /* B7 Bad Opcode Error */
36#define GFX_9_0__SRCID__CP_PRIV_REG_FAULT 184 /* B8 Privileged Register Fault */
37#define GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT 185 /* B9 Privileged Instr Fault */
38#define GFX_9_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 /* BA Wait Memory Semaphore Fault (Synchronization Object Fault) */
39#define GFX_9_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 /* BB Context Empty Interrupt */
40#define GFX_9_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 /* BC Context Busy Interrupt */
41#define GFX_9_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 /* C0 CP.ME Wait_Reg_Mem Poll Timeout */
42#define GFX_9_0__SRCID__CP_SIG_INCOMPLETE 193 /* C1 "Surface Probe Fault Signal Incomplete" */
43#define GFX_9_0__SRCID__CP_PREEMPT_ACK 194 /* C2 Preemption Ack-wledge */
44#define GFX_9_0__SRCID__CP_GPF 195 /* C3 General Protection Fault (GPF) */
45#define GFX_9_0__SRCID__CP_GDS_ALLOC_ERROR 196 /* C4 GDS Alloc Error */
46#define GFX_9_0__SRCID__CP_ECC_ERROR 197 /* C5 ECC Error */
47#define GFX_9_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 /* C7 Compute query status */
48#define GFX_9_0__SRCID__CP_VM_DOORBELL 200 /* C8 Unattached VM Doorbell Received */
49#define GFX_9_0__SRCID__CP_FUE_ERROR 201 /* C9 ECC FUE Error */
50#define GFX_9_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 /* CA Streaming Perf Monitor Interrupt */
51#define GFX_9_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 /* E8 CRead timeout error */
52#define GFX_9_0__SRCID__GRBM_REG_GUI_IDLE 233 /* E9 Register GUI Idle */
53#define GFX_9_0__SRCID__SQ_INTERRUPT_ID 239 /* EF SQ Interrupt (ttrace wrap, errors) */
54
55#endif /* __IRQSRCS_GFX_9_0_H__ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
index c6b6f97de9de..aaed7f59e0e2 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
@@ -198,4 +198,102 @@
198#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a 198#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a
199#define VISLANDS30_IV_EXTID_HPD_RX_F 11 199#define VISLANDS30_IV_EXTID_HPD_RX_F 11
200 200
201#define VISLANDS30_IV_SRCID_GPIO_19 0x00000053 /* 83 */
202
203#define VISLANDS30_IV_SRCID_SRBM_READ_TIMEOUT_ERR 0x00000060 /* 96 */
204#define VISLANDS30_IV_SRCID_SRBM_CTX_SWITCH 0x00000061 /* 97 */
205
206#define VISLANDS30_IV_SRBM_REG_ACCESS_ERROR 0x00000062 /* 98 */
207
208
209#define VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP 0x00000077 /* 119 */
210#define VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE 0x0000007c /* 124 */
211
212#define VISLANDS30_IV_SRCID_BIF_PF_VF_MSGBUF_VALID 0x00000087 /* 135 */
213
214#define VISLANDS30_IV_SRCID_BIF_VF_PF_MSGBUF_ACK 0x0000008a /* 138 */
215
216#define VISLANDS30_IV_SRCID_SYS_PAGE_INV_FAULT 0x0000008c /* 140 */
217#define VISLANDS30_IV_SRCID_SYS_MEM_PROT_FAULT 0x0000008d /* 141 */
218
219#define VISLANDS30_IV_SRCID_SEM_PAGE_INV_FAULT 0x00000090 /* 144 */
220#define VISLANDS30_IV_SRCID_SEM_MEM_PROT_FAULT 0x00000091 /* 145 */
221
222#define VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT 0x00000092 /* 146 */
223#define VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT 0x00000093 /* 147 */
224
225#define VISLANDS30_IV_SRCID_ACP 0x000000a2 /* 162 */
226
227#define VISLANDS30_IV_SRCID_VCE_TRAP 0x000000a7 /* 167 */
228#define VISLANDS30_IV_EXTID_VCE_TRAP_GENERAL_PURPOSE 0
229#define VISLANDS30_IV_EXTID_VCE_TRAP_LOW_LATENCY 1
230#define VISLANDS30_IV_EXTID_VCE_TRAP_REAL_TIME 2
231
232#define VISLANDS30_IV_SRCID_CP_INT_RB 0x000000b0 /* 176 */
233#define VISLANDS30_IV_SRCID_CP_INT_IB1 0x000000b1 /* 177 */
234#define VISLANDS30_IV_SRCID_CP_INT_IB2 0x000000b2 /* 178 */
235#define VISLANDS30_IV_SRCID_CP_PM4_RES_BITS_ERR 0x000000b4 /* 180 */
236#define VISLANDS30_IV_SRCID_CP_END_OF_PIPE 0x000000b5 /* 181 */
237#define VISLANDS30_IV_SRCID_CP_BAD_OPCODE 0x000000b7 /* 183 */
238#define VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT 0x000000b8 /* 184 */
239#define VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT 0x000000b9 /* 185 */
240#define VISLANDS30_IV_SRCID_CP_WAIT_MEM_SEM_FAULT 0x000000ba /* 186 */
241#define VISLANDS30_IV_SRCID_CP_GUI_IDLE 0x000000bb /* 187 */
242#define VISLANDS30_IV_SRCID_CP_GUI_BUSY 0x000000bc /* 188 */
243
244#define VISLANDS30_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000bf /* 191 */
245#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
246
247#define CARRIZO_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000c7 /* 199 */
248
249#define VISLANDS30_IV_SRCID_CP_WAIT_REG_MEM_POLL_TIMEOUT 0x000000c0 /* 192 */
250#define VISLANDS30_IV_SRCID_CP_SEM_SIG_INCOMPL 0x000000c1 /* 193 */
251#define VISLANDS30_IV_SRCID_CP_PREEMPT_ACK 0x000000c2 /* 194 */
252#define VISLANDS30_IV_SRCID_CP_GENERAL_PROT_FAULT 0x000000c3 /* 195 */
253#define VISLANDS30_IV_SRCID_CP_GDS_ALLOC_ERROR 0x000000c4 /* 196 */
254#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
255
256#define VISLANDS30_IV_SRCID_RLC_STRM_PERF_MONITOR 0x000000ca /* 202 */
257
258#define VISLANDS30_IV_SDMA_ATOMIC_SRC_ID 0x000000da /* 218 */
259
260#define VISLANDS30_IV_SRCID_SDMA_ECC_ERROR 0x000000dc /* 220 */
261
262#define VISLANDS30_IV_SRCID_SDMA_TRAP 0x000000e0 /* 224 */
263#define VISLANDS30_IV_SRCID_SDMA_SEM_INCOMPLETE 0x000000e1 /* 225 */
264#define VISLANDS30_IV_SRCID_SDMA_SEM_WAIT 0x000000e2 /* 226 */
265
266
267#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER 0x000000e5 /* 229 */
268
269#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH 0x000000e6 /* 230 */
270#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW 0x000000e7 /* 231 */
271
272#define VISLANDS30_IV_SRCID_GRBM_READ_TIMEOUT_ERR 0x000000e8 /* 232 */
273#define VISLANDS30_IV_SRCID_GRBM_REG_GUI_IDLE 0x000000e9 /* 233 */
274
275#define VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG 0x000000ef /* 239 */
276
277#define VISLANDS30_IV_SRCID_SDMA_PREEMPT 0x000000f0 /* 240 */
278#define VISLANDS30_IV_SRCID_SDMA_VM_HOLE 0x000000f2 /* 242 */
279#define VISLANDS30_IV_SRCID_SDMA_CTXEMPTY 0x000000f3 /* 243 */
280#define VISLANDS30_IV_SRCID_SDMA_DOORBELL_INVALID 0x000000f4 /* 244 */
281#define VISLANDS30_IV_SRCID_SDMA_FROZEN 0x000000f5 /* 245 */
282#define VISLANDS30_IV_SRCID_SDMA_POLL_TIMEOUT 0x000000f6 /* 246 */
283#define VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE 0x000000f7 /* 247 */
284
285#define VISLANDS30_IV_SRCID_CG_THERMAL_TRIG 0x000000f8 /* 248 */
286
287#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER_TRIGGER 0x000000fd /* 253 */
288
289/* These are not "real" source ids defined by HW */
290#define VISLANDS30_IV_SRCID_VM_CONTEXT_ALL 0x00000100 /* 256 */
291#define VISLANDS30_IV_EXTID_VM_CONTEXT0_ALL 0
292#define VISLANDS30_IV_EXTID_VM_CONTEXT1_ALL 1
293
294
295/* IV Extended IDs */
296#define VISLANDS30_IV_EXTID_NONE 0x00000000
297#define VISLANDS30_IV_EXTID_INVALID 0xffffffff
298
201#endif // _IVSRCID_VISLANDS30_H_ 299#endif // _IVSRCID_VISLANDS30_H_
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
new file mode 100644
index 000000000000..802413832fe8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_SDMA0_4_0_H__
27#define __IRQSRCS_SDMA0_4_0_H__
28
29#define SDMA0_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
30#define SDMA0_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
31#define SDMA0_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
32#define SDMA0_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
33#define SDMA0_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
34#define SDMA0_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
35#define SDMA0_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
36#define SDMA0_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
37#define SDMA0_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
38#define SDMA0_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
39#define SDMA0_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
40#define SDMA0_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
41#define SDMA0_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
42#define SDMA0_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
43#define SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
44#define SDMA0_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
45#define SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
46#define SDMA0_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
47
48#endif /* __IRQSRCS_SDMA_4_0_H__ */
49
50
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
new file mode 100644
index 000000000000..d12a35619f9a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_SDMA1_4_0_H__
27#define __IRQSRCS_SDMA1_4_0_H__
28
29#define SDMA1_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
30#define SDMA1_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
31#define SDMA1_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
32#define SDMA1_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
33#define SDMA1_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
34#define SDMA1_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
35#define SDMA1_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
36#define SDMA1_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
37#define SDMA1_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
38#define SDMA1_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
39#define SDMA1_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
40#define SDMA1_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
41#define SDMA1_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
42#define SDMA1_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
43#define SDMA1_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
44#define SDMA1_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
45#define SDMA1_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
46#define SDMA1_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
47
48#endif /* __IRQSRCS_SDMA1_4_0_H__ */
49
50
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
new file mode 100644
index 000000000000..02bab4673cd4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_SMUIO_9_0_H__
27#define __IRQSRCS_SMUIO_9_0_H__
28
29#define SMUIO_9_0__SRCID__SMUIO_GPIO19 83 /* GPIO19 interrupt */
30
31#endif /* __IRQSRCS_SMUIO_9_0_H__ */
32
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
new file mode 100644
index 000000000000..5218bc53fb2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_THM_9_0_H__
27#define __IRQSRCS_THM_9_0_H__
28
29#define THM_9_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
30#define THM_9_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
31
32#endif /* __IRQSRCS_THM_9_0_H__ */
33
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
new file mode 100644
index 000000000000..fb041aee6c66
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_UVD_7_0_H__
27#define __IRQSRCS_UVD_7_0_H__
28
29#define UVD_7_0__SRCID__UVD_ENC_GEN_PURP 119
30#define UVD_7_0__SRCID__UVD_ENC_LOW_LATENCY 120
31#define UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* UVD system message interrupt */
32
33#endif /* __IRQSRCS_UVD_7_0_H__ */
34
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
new file mode 100644
index 000000000000..3440bab565af
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_VCE_4_0_H__
27#define __IRQSRCS_VCE_4_0_H__
28
29#define VCE_4_0__CTXID__VCE_TRAP_GENERAL_PURPOSE 0
30#define VCE_4_0__CTXID__VCE_TRAP_LOW_LATENCY 1
31#define VCE_4_0__CTXID__VCE_TRAP_REAL_TIME 2
32
33#endif /* __IRQSRCS_VCE_4_0_H__ */
34
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
new file mode 100644
index 000000000000..e5951709bfc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_VCN_1_0_H__
27#define __IRQSRCS_VCN_1_0_H__
28
29#define VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 /* 0x77 Encoder General Purpose */
30#define VCN_1_0__SRCID__UVD_ENC_LOW_LATENCY 120 /* 0x78 Encoder Low Latency */
31#define VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* 0x7c UVD system message interrupt */
32
33#endif /* __IRQSRCS_VCN_1_0_H__ */
34
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
new file mode 100644
index 000000000000..d130936c9989
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __IRQSRCS_VMC_1_0_H__
27#define __IRQSRCS_VMC_1_0_H__
28
29
30#define VMC_1_0__SRCID__VM_FAULT 0
31#define VMC_1_0__SRCID__VM_CONTEXT0_ALL 256
32#define VMC_1_0__SRCID__VM_CONTEXT1_ALL 257
33
34#define UTCL2_1_0__SRCID__FAULT 0 /* UTC L2 has encountered a fault or retry scenario */
35
36
37#endif /* __IRQSRCS_VMC_1_0_H__ */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 145e5c403bea..75c208283e5f 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1206,7 +1206,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
1206 struct pp_hwmgr *hwmgr = handle; 1206 struct pp_hwmgr *hwmgr = handle;
1207 1207
1208 if (!hwmgr || !hwmgr->pm_en) 1208 if (!hwmgr || !hwmgr->pm_en)
1209 return -EINVAL;; 1209 return -EINVAL;
1210 1210
1211 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { 1211 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1212 pr_info("%s was not implemented.\n", __func__); 1212 pr_info("%s was not implemented.\n", __func__);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 9b675d9bd162..8994aa5c8cf8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -147,10 +147,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
147 smu7_init_function_pointers(hwmgr); 147 smu7_init_function_pointers(hwmgr);
148 break; 148 break;
149 case AMDGPU_FAMILY_AI: 149 case AMDGPU_FAMILY_AI:
150 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
151 switch (hwmgr->chip_id) { 150 switch (hwmgr->chip_id) {
152 case CHIP_VEGA10: 151 case CHIP_VEGA10:
153 case CHIP_VEGA20: 152 case CHIP_VEGA20:
153 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
154 hwmgr->smumgr_funcs = &vega10_smu_funcs; 154 hwmgr->smumgr_funcs = &vega10_smu_funcs;
155 vega10_hwmgr_init(hwmgr); 155 vega10_hwmgr_init(hwmgr);
156 break; 156 break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 077b79938528..052e60dfaf9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,8 @@
48#include "processpptables.h" 48#include "processpptables.h"
49#include "pp_thermal.h" 49#include "pp_thermal.h"
50 50
51#include "ivsrcid/ivsrcid_vislands30.h"
52
51#define MC_CG_ARB_FREQ_F0 0x0a 53#define MC_CG_ARB_FREQ_F0 0x0a
52#define MC_CG_ARB_FREQ_F1 0x0b 54#define MC_CG_ARB_FREQ_F1 0x0b
53#define MC_CG_ARB_FREQ_F2 0x0c 55#define MC_CG_ARB_FREQ_F2 0x0c
@@ -4105,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4105 4107
4106 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4108 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4107 AMDGPU_IH_CLIENTID_LEGACY, 4109 AMDGPU_IH_CLIENTID_LEGACY,
4108 230, 4110 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4109 source); 4111 source);
4110 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4112 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4111 AMDGPU_IH_CLIENTID_LEGACY, 4113 AMDGPU_IH_CLIENTID_LEGACY,
4112 231, 4114 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4113 source); 4115 source);
4114 4116
4115 /* Register CTF(GPIO_19) interrupt */ 4117 /* Register CTF(GPIO_19) interrupt */
4116 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4118 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4117 AMDGPU_IH_CLIENTID_LEGACY, 4119 AMDGPU_IH_CLIENTID_LEGACY,
4118 83, 4120 VISLANDS30_IV_SRCID_GPIO_19,
4119 source); 4121 source);
4120 4122
4121 return 0; 4123 return 0;
@@ -4610,12 +4612,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4610 return -EINVAL; 4612 return -EINVAL;
4611 dep_sclk_table = table_info->vdd_dep_on_sclk; 4613 dep_sclk_table = table_info->vdd_dep_on_sclk;
4612 for (i = 0; i < dep_sclk_table->count; i++) 4614 for (i = 0; i < dep_sclk_table->count; i++)
4613 clocks->clock[i] = dep_sclk_table->entries[i].clk; 4615 clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
4614 clocks->count = dep_sclk_table->count; 4616 clocks->count = dep_sclk_table->count;
4615 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 4617 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4616 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 4618 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4617 for (i = 0; i < sclk_table->count; i++) 4619 for (i = 0; i < sclk_table->count; i++)
4618 clocks->clock[i] = sclk_table->entries[i].clk; 4620 clocks->clock[i] = sclk_table->entries[i].clk * 10;
4619 clocks->count = sclk_table->count; 4621 clocks->count = sclk_table->count;
4620 } 4622 }
4621 4623
@@ -4647,7 +4649,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4647 return -EINVAL; 4649 return -EINVAL;
4648 dep_mclk_table = table_info->vdd_dep_on_mclk; 4650 dep_mclk_table = table_info->vdd_dep_on_mclk;
4649 for (i = 0; i < dep_mclk_table->count; i++) { 4651 for (i = 0; i < dep_mclk_table->count; i++) {
4650 clocks->clock[i] = dep_mclk_table->entries[i].clk; 4652 clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
4651 clocks->latency[i] = smu7_get_mem_latency(hwmgr, 4653 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4652 dep_mclk_table->entries[i].clk); 4654 dep_mclk_table->entries[i].clk);
4653 } 4655 }
@@ -4655,7 +4657,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4655 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 4657 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4656 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 4658 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4657 for (i = 0; i < mclk_table->count; i++) 4659 for (i = 0; i < mclk_table->count; i++)
4658 clocks->clock[i] = mclk_table->entries[i].clk; 4660 clocks->clock[i] = mclk_table->entries[i].clk * 10;
4659 clocks->count = mclk_table->count; 4661 clocks->count = mclk_table->count;
4660 } 4662 }
4661 return 0; 4663 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 50690c72b2ea..288802f209dd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1604,17 +1604,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
1604 switch (type) { 1604 switch (type) {
1605 case amd_pp_disp_clock: 1605 case amd_pp_disp_clock:
1606 for (i = 0; i < clocks->count; i++) 1606 for (i = 0; i < clocks->count; i++)
1607 clocks->clock[i] = data->sys_info.display_clock[i]; 1607 clocks->clock[i] = data->sys_info.display_clock[i] * 10;
1608 break; 1608 break;
1609 case amd_pp_sys_clock: 1609 case amd_pp_sys_clock:
1610 table = hwmgr->dyn_state.vddc_dependency_on_sclk; 1610 table = hwmgr->dyn_state.vddc_dependency_on_sclk;
1611 for (i = 0; i < clocks->count; i++) 1611 for (i = 0; i < clocks->count; i++)
1612 clocks->clock[i] = table->entries[i].clk; 1612 clocks->clock[i] = table->entries[i].clk * 10;
1613 break; 1613 break;
1614 case amd_pp_mem_clock: 1614 case amd_pp_mem_clock:
1615 clocks->count = SMU8_NUM_NBPMEMORYCLOCK; 1615 clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1616 for (i = 0; i < clocks->count; i++) 1616 for (i = 0; i < clocks->count; i++)
1617 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; 1617 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
1618 break; 1618 break;
1619 default: 1619 default:
1620 return -1; 1620 return -1;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 3effb5583d1f..2aab1b475945 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -25,6 +25,9 @@
25#include "ppatomctrl.h" 25#include "ppatomctrl.h"
26#include "ppsmc.h" 26#include "ppsmc.h"
27#include "atom.h" 27#include "atom.h"
28#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
29#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
30#include "ivsrcid/ivsrcid_vislands30.h"
28 31
29uint8_t convert_to_vid(uint16_t vddc) 32uint8_t convert_to_vid(uint16_t vddc)
30{ 33{
@@ -543,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev,
543 uint32_t src_id = entry->src_id; 546 uint32_t src_id = entry->src_id;
544 547
545 if (client_id == AMDGPU_IH_CLIENTID_LEGACY) { 548 if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
546 if (src_id == 230) 549 if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
547 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", 550 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
548 PCI_BUS_NUM(adev->pdev->devfn), 551 PCI_BUS_NUM(adev->pdev->devfn),
549 PCI_SLOT(adev->pdev->devfn), 552 PCI_SLOT(adev->pdev->devfn),
550 PCI_FUNC(adev->pdev->devfn)); 553 PCI_FUNC(adev->pdev->devfn));
551 else if (src_id == 231) 554 else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
552 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", 555 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
553 PCI_BUS_NUM(adev->pdev->devfn), 556 PCI_BUS_NUM(adev->pdev->devfn),
554 PCI_SLOT(adev->pdev->devfn), 557 PCI_SLOT(adev->pdev->devfn),
555 PCI_FUNC(adev->pdev->devfn)); 558 PCI_FUNC(adev->pdev->devfn));
556 else if (src_id == 83) 559 else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
557 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n", 560 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
558 PCI_BUS_NUM(adev->pdev->devfn), 561 PCI_BUS_NUM(adev->pdev->devfn),
559 PCI_SLOT(adev->pdev->devfn), 562 PCI_SLOT(adev->pdev->devfn),
@@ -594,17 +597,17 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
594 597
595 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 598 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
596 SOC15_IH_CLIENTID_THM, 599 SOC15_IH_CLIENTID_THM,
597 0, 600 THM_9_0__SRCID__THM_DIG_THERM_L2H,
598 source); 601 source);
599 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 602 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
600 SOC15_IH_CLIENTID_THM, 603 SOC15_IH_CLIENTID_THM,
601 1, 604 THM_9_0__SRCID__THM_DIG_THERM_H2L,
602 source); 605 source);
603 606
604 /* Register CTF(GPIO_19) interrupt */ 607 /* Register CTF(GPIO_19) interrupt */
605 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 608 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
606 SOC15_IH_CLIENTID_ROM_SMUIO, 609 SOC15_IH_CLIENTID_ROM_SMUIO,
607 83, 610 SMUIO_9_0__SRCID__SMUIO_GPIO19,
608 source); 611 source);
609 612
610 return 0; 613 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 5e771bc119d6..1a0dccb3fac1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2896,11 +2896,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2896 2896
2897 vega10_enable_disable_PCC_limit_feature(hwmgr, true); 2897 vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2898 2898
2899 if ((hwmgr->smu_version == 0x001c2c00) ||
2900 (hwmgr->smu_version == 0x001c2d00))
2901 smum_send_msg_to_smc_with_parameter(hwmgr,
2902 PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
2903
2904 smum_send_msg_to_smc_with_parameter(hwmgr, 2899 smum_send_msg_to_smc_with_parameter(hwmgr,
2905 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); 2900 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2906 2901
@@ -3801,7 +3796,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
3801 3796
3802 if (i < dpm_table->count) { 3797 if (i < dpm_table->count) {
3803 clock_req.clock_type = amd_pp_dcef_clock; 3798 clock_req.clock_type = amd_pp_dcef_clock;
3804 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; 3799 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
3805 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { 3800 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3806 smum_send_msg_to_smc_with_parameter( 3801 smum_send_msg_to_smc_with_parameter(
3807 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 3802 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 57492878874f..4ed218dd8ba7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -423,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
423 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit * 423 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
424 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; 424 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
425 425
426 if (hwmgr->feature_mask & PP_GFXOFF_MASK)
427 data->gfxoff_controlled_by_driver = true;
428 else
429 data->gfxoff_controlled_by_driver = false;
430
426 return result; 431 return result;
427} 432}
428 433
@@ -472,7 +477,7 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
472 "[GetNumOfDpmLevel] failed to get dpm levels!", 477 "[GetNumOfDpmLevel] failed to get dpm levels!",
473 return ret); 478 return ret);
474 479
475 vega12_read_arg_from_smc(hwmgr, num_of_levels); 480 *num_of_levels = smum_get_argument(hwmgr);
476 PP_ASSERT_WITH_CODE(*num_of_levels > 0, 481 PP_ASSERT_WITH_CODE(*num_of_levels > 0,
477 "[GetNumOfDpmLevel] number of clk levels is invalid!", 482 "[GetNumOfDpmLevel] number of clk levels is invalid!",
478 return -EINVAL); 483 return -EINVAL);
@@ -483,7 +488,7 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
483static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, 488static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
484 PPCLK_e clkID, uint32_t index, uint32_t *clock) 489 PPCLK_e clkID, uint32_t index, uint32_t *clock)
485{ 490{
486 int result; 491 int result = 0;
487 492
488 /* 493 /*
489 *SMU expects the Clock ID to be in the top 16 bits. 494 *SMU expects the Clock ID to be in the top 16 bits.
@@ -494,11 +499,7 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
494 "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!", 499 "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
495 return -EINVAL); 500 return -EINVAL);
496 501
497 result = vega12_read_arg_from_smc(hwmgr, clock); 502 *clock = smum_get_argument(hwmgr);
498
499 PP_ASSERT_WITH_CODE(*clock != 0,
500 "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
501 return -EINVAL);
502 503
503 return result; 504 return result;
504} 505}
@@ -879,21 +880,21 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
879 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0, 880 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
880 "[GetClockRanges] Failed to get max ac clock from SMC!", 881 "[GetClockRanges] Failed to get max ac clock from SMC!",
881 return -EINVAL); 882 return -EINVAL);
882 vega12_read_arg_from_smc(hwmgr, &(clock->ACMax)); 883 clock->ACMax = smum_get_argument(hwmgr);
883 884
884 /* AC Min */ 885 /* AC Min */
885 PP_ASSERT_WITH_CODE( 886 PP_ASSERT_WITH_CODE(
886 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0, 887 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
887 "[GetClockRanges] Failed to get min ac clock from SMC!", 888 "[GetClockRanges] Failed to get min ac clock from SMC!",
888 return -EINVAL); 889 return -EINVAL);
889 vega12_read_arg_from_smc(hwmgr, &(clock->ACMin)); 890 clock->ACMin = smum_get_argument(hwmgr);
890 891
891 /* DC Max */ 892 /* DC Max */
892 PP_ASSERT_WITH_CODE( 893 PP_ASSERT_WITH_CODE(
893 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0, 894 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
894 "[GetClockRanges] Failed to get max dc clock from SMC!", 895 "[GetClockRanges] Failed to get max dc clock from SMC!",
895 return -EINVAL); 896 return -EINVAL);
896 vega12_read_arg_from_smc(hwmgr, &(clock->DCMax)); 897 clock->DCMax = smum_get_argument(hwmgr);
897 898
898 return 0; 899 return 0;
899} 900}
@@ -1214,7 +1215,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
1214 "Failed to get current package power!", 1215 "Failed to get current package power!",
1215 return -EINVAL); 1216 return -EINVAL);
1216 1217
1217 vega12_read_arg_from_smc(hwmgr, &value); 1218 value = smum_get_argument(hwmgr);
1218 /* power value is an integer */ 1219 /* power value is an integer */
1219 *query = value << 8; 1220 *query = value << 8;
1220#endif 1221#endif
@@ -1230,11 +1231,8 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
1230 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 1231 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
1231 PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, 1232 PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
1232 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", 1233 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1233 return -1); 1234 return -EINVAL);
1234 PP_ASSERT_WITH_CODE( 1235 gfx_clk = smum_get_argument(hwmgr);
1235 vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
1236 "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
1237 return -1);
1238 1236
1239 *gfx_freq = gfx_clk * 100; 1237 *gfx_freq = gfx_clk * 100;
1240 1238
@@ -1250,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
1250 PP_ASSERT_WITH_CODE( 1248 PP_ASSERT_WITH_CODE(
1251 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0, 1249 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
1252 "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", 1250 "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1253 return -1); 1251 return -EINVAL);
1254 PP_ASSERT_WITH_CODE( 1252 mem_clk = smum_get_argument(hwmgr);
1255 vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
1256 "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
1257 return -1);
1258 1253
1259 *mclk_freq = mem_clk * 100; 1254 *mclk_freq = mem_clk * 100;
1260 1255
@@ -1271,16 +1266,12 @@ static int vega12_get_current_activity_percent(
1271#if 0 1266#if 0
1272 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); 1267 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
1273 if (!ret) { 1268 if (!ret) {
1274 ret = vega12_read_arg_from_smc(hwmgr, &current_activity); 1269 current_activity = smum_get_argument(hwmgr);
1275 if (!ret) { 1270 if (current_activity > 100) {
1276 if (current_activity > 100) {
1277 PP_ASSERT(false,
1278 "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
1279 current_activity = 100;
1280 }
1281 } else
1282 PP_ASSERT(false, 1271 PP_ASSERT(false,
1283 "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!"); 1272 "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
1273 current_activity = 100;
1274 }
1284 } else 1275 } else
1285 PP_ASSERT(false, 1276 PP_ASSERT(false,
1286 "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!"); 1277 "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
@@ -1361,7 +1352,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1361 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 1352 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1362 switch (clk_type) { 1353 switch (clk_type) {
1363 case amd_pp_dcef_clock: 1354 case amd_pp_dcef_clock:
1364 clk_freq = clock_req->clock_freq_in_khz / 100;
1365 clk_select = PPCLK_DCEFCLK; 1355 clk_select = PPCLK_DCEFCLK;
1366 break; 1356 break;
1367 case amd_pp_disp_clock: 1357 case amd_pp_disp_clock:
@@ -1410,7 +1400,7 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
1410 1400
1411 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { 1401 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1412 clock_req.clock_type = amd_pp_dcef_clock; 1402 clock_req.clock_type = amd_pp_dcef_clock;
1413 clock_req.clock_freq_in_khz = min_clocks.dcefClock; 1403 clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
1414 if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) { 1404 if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
1415 if (data->smu_features[GNLD_DS_DCEFCLK].supported) 1405 if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1416 PP_ASSERT_WITH_CODE( 1406 PP_ASSERT_WITH_CODE(
@@ -1877,7 +1867,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1877 for (i = 0; i < clocks.num_levels; i++) 1867 for (i = 0; i < clocks.num_levels; i++)
1878 size += sprintf(buf + size, "%d: %uMhz %s\n", 1868 size += sprintf(buf + size, "%d: %uMhz %s\n",
1879 i, clocks.data[i].clocks_in_khz / 1000, 1869 i, clocks.data[i].clocks_in_khz / 1000,
1880 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); 1870 (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
1881 break; 1871 break;
1882 1872
1883 case PP_MCLK: 1873 case PP_MCLK:
@@ -1893,7 +1883,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1893 for (i = 0; i < clocks.num_levels; i++) 1883 for (i = 0; i < clocks.num_levels; i++)
1894 size += sprintf(buf + size, "%d: %uMhz %s\n", 1884 size += sprintf(buf + size, "%d: %uMhz %s\n",
1895 i, clocks.data[i].clocks_in_khz / 1000, 1885 i, clocks.data[i].clocks_in_khz / 1000,
1896 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); 1886 (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
1897 break; 1887 break;
1898 1888
1899 case PP_PCIE: 1889 case PP_PCIE:
@@ -2329,6 +2319,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2329 return 0; 2319 return 0;
2330} 2320}
2331 2321
2322static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
2323{
2324 struct vega12_hwmgr *data =
2325 (struct vega12_hwmgr *)(hwmgr->backend);
2326 int ret = 0;
2327
2328 if (data->gfxoff_controlled_by_driver)
2329 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
2330
2331 return ret;
2332}
2333
2334static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
2335{
2336 struct vega12_hwmgr *data =
2337 (struct vega12_hwmgr *)(hwmgr->backend);
2338 int ret = 0;
2339
2340 if (data->gfxoff_controlled_by_driver)
2341 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
2342
2343 return ret;
2344}
2345
2346static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
2347{
2348 if (enable)
2349 return vega12_enable_gfx_off(hwmgr);
2350 else
2351 return vega12_disable_gfx_off(hwmgr);
2352}
2353
2332static const struct pp_hwmgr_func vega12_hwmgr_funcs = { 2354static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2333 .backend_init = vega12_hwmgr_backend_init, 2355 .backend_init = vega12_hwmgr_backend_init,
2334 .backend_fini = vega12_hwmgr_backend_fini, 2356 .backend_fini = vega12_hwmgr_backend_fini,
@@ -2378,6 +2400,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2378 .get_thermal_temperature_range = vega12_get_thermal_temperature_range, 2400 .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
2379 .register_irq_handlers = smu9_register_irq_handlers, 2401 .register_irq_handlers = smu9_register_irq_handlers,
2380 .start_thermal_controller = vega12_start_thermal_controller, 2402 .start_thermal_controller = vega12_start_thermal_controller,
2403 .powergate_gfx = vega12_gfx_off_control,
2381}; 2404};
2382 2405
2383int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) 2406int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e17237c90eea..b3e424d28994 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -393,6 +393,9 @@ struct vega12_hwmgr {
393 struct vega12_smc_state_table smc_state_table; 393 struct vega12_smc_state_table smc_state_table;
394 394
395 struct vega12_clock_range clk_range[PPCLK_COUNT]; 395 struct vega12_clock_range clk_range[PPCLK_COUNT];
396
397 /* ---- Gfxoff ---- */
398 bool gfxoff_controlled_by_driver;
396}; 399};
397 400
398#define VEGA12_DPM2_NEAR_TDP_DEC 10 401#define VEGA12_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index cfd9e6ccb790..904eb2c9155b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
34 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 34 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
35 PPSMC_MSG_GetCurrentRpm), 35 PPSMC_MSG_GetCurrentRpm),
36 "Attempt to get current RPM from SMC Failed!", 36 "Attempt to get current RPM from SMC Failed!",
37 return -1); 37 return -EINVAL);
38 PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr, 38 *current_rpm = smum_get_argument(hwmgr);
39 current_rpm), 39
40 "Attempt to read current RPM from SMC Failed!",
41 return -1);
42 return 0; 40 return 0;
43} 41}
44 42
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index b3363f26039a..d3d96260f440 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -194,7 +194,7 @@ struct pp_smumgr_func {
194 int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); 194 int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
195 int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, 195 int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
196 uint32_t firmware); 196 uint32_t firmware);
197 int (*get_argument)(struct pp_hwmgr *hwmgr); 197 uint32_t (*get_argument)(struct pp_hwmgr *hwmgr);
198 int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); 198 int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
199 int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, 199 int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
200 uint16_t msg, uint32_t parameter); 200 uint16_t msg, uint32_t parameter);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 89dfbf53c7e6..82550a8a3a3f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -80,7 +80,7 @@ enum SMU10_TABLE_ID {
80 SMU10_CLOCKTABLE, 80 SMU10_CLOCKTABLE,
81}; 81};
82 82
83extern int smum_get_argument(struct pp_hwmgr *hwmgr); 83extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
84 84
85extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); 85extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
86 86
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 0a200406a1ec..8d557accaef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -26,7 +26,7 @@
26SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \ 26SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
27 polaris10_smumgr.o iceland_smumgr.o \ 27 polaris10_smumgr.o iceland_smumgr.o \
28 smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \ 28 smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
29 vega12_smumgr.o vegam_smumgr.o 29 vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o
30 30
31AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) 31AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
32 32
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 0a563f6fe9ea..bb07d43f3874 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -68,7 +68,7 @@ static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
68 return 0; 68 return 0;
69} 69}
70 70
71static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) 71static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
72{ 72{
73 struct amdgpu_device *adev = hwmgr->adev; 73 struct amdgpu_device *adev = hwmgr->adev;
74 74
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index d644a9bb9078..a029e47c2319 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -379,8 +379,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
379{ 379{
380 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 380 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
381 uint32_t fw_to_load; 381 uint32_t fw_to_load;
382 int result = 0; 382 int r = 0;
383 struct SMU_DRAMData_TOC *toc;
384 383
385 if (!hwmgr->reload_fw) { 384 if (!hwmgr->reload_fw) {
386 pr_info("skip reloading...\n"); 385 pr_info("skip reloading...\n");
@@ -421,49 +420,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
421 + UCODE_ID_CP_MEC_JT2_MASK; 420 + UCODE_ID_CP_MEC_JT2_MASK;
422 } 421 }
423 422
424 toc = (struct SMU_DRAMData_TOC *)smu_data->header; 423 if (!smu_data->toc) {
425 toc->num_entries = 0; 424 struct SMU_DRAMData_TOC *toc;
426 toc->structure_version = 1; 425
426 smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
427 if (!smu_data->toc)
428 return -ENOMEM;
429 toc = smu_data->toc;
430 toc->num_entries = 0;
431 toc->structure_version = 1;
427 432
428 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 433 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
429 UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), 434 UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
430 "Failed to Get Firmware Entry.", return -EINVAL); 435 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
431 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 436 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
432 UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), 437 UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
433 "Failed to Get Firmware Entry.", return -EINVAL); 438 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
434 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 439 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
435 UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), 440 UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
436 "Failed to Get Firmware Entry.", return -EINVAL); 441 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
437 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 442 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
438 UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), 443 UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
439 "Failed to Get Firmware Entry.", return -EINVAL); 444 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
440 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 445 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
441 UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), 446 UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
442 "Failed to Get Firmware Entry.", return -EINVAL); 447 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
443 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 448 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
444 UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), 449 UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
445 "Failed to Get Firmware Entry.", return -EINVAL); 450 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
446 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 451 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
447 UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), 452 UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
448 "Failed to Get Firmware Entry.", return -EINVAL); 453 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
449 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 454 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
450 UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), 455 UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
451 "Failed to Get Firmware Entry.", return -EINVAL); 456 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
452 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
453 UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
454 "Failed to Get Firmware Entry.", return -EINVAL);
455 if (!hwmgr->not_vf)
456 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 457 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
458 UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
459 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
460 if (!hwmgr->not_vf)
461 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
457 UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), 462 UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
458 "Failed to Get Firmware Entry.", return -EINVAL); 463 "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
459 464 }
465 memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
466 sizeof(struct SMU_DRAMData_TOC));
460 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); 467 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
461 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); 468 smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
462 469
463 if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) 470 if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
464 pr_err("Fail to Request SMU Load uCode"); 471 pr_err("Fail to Request SMU Load uCode");
465 472
466 return result; 473 return r;
474
475failed:
476 kfree(smu_data->toc);
477 smu_data->toc = NULL;
478 return r;
467} 479}
468 480
469/* Check if the FW has been loaded, SMU will not return if loading has not finished. */ 481/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
@@ -570,7 +582,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
570int smu7_init(struct pp_hwmgr *hwmgr) 582int smu7_init(struct pp_hwmgr *hwmgr)
571{ 583{
572 struct smu7_smumgr *smu_data; 584 struct smu7_smumgr *smu_data;
573 uint64_t mc_addr = 0;
574 int r; 585 int r;
575 /* Allocate memory for backend private data */ 586 /* Allocate memory for backend private data */
576 smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 587 smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
@@ -584,15 +595,12 @@ int smu7_init(struct pp_hwmgr *hwmgr)
584 PAGE_SIZE, 595 PAGE_SIZE,
585 AMDGPU_GEM_DOMAIN_VRAM, 596 AMDGPU_GEM_DOMAIN_VRAM,
586 &smu_data->header_buffer.handle, 597 &smu_data->header_buffer.handle,
587 &mc_addr, 598 &smu_data->header_buffer.mc_addr,
588 &smu_data->header_buffer.kaddr); 599 &smu_data->header_buffer.kaddr);
589 600
590 if (r) 601 if (r)
591 return -EINVAL; 602 return -EINVAL;
592 603
593 smu_data->header = smu_data->header_buffer.kaddr;
594 smu_data->header_buffer.mc_addr = mc_addr;
595
596 if (!hwmgr->not_vf) 604 if (!hwmgr->not_vf)
597 return 0; 605 return 0;
598 606
@@ -602,7 +610,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
602 PAGE_SIZE, 610 PAGE_SIZE,
603 AMDGPU_GEM_DOMAIN_VRAM, 611 AMDGPU_GEM_DOMAIN_VRAM,
604 &smu_data->smu_buffer.handle, 612 &smu_data->smu_buffer.handle,
605 &mc_addr, 613 &smu_data->smu_buffer.mc_addr,
606 &smu_data->smu_buffer.kaddr); 614 &smu_data->smu_buffer.kaddr);
607 615
608 if (r) { 616 if (r) {
@@ -611,7 +619,6 @@ int smu7_init(struct pp_hwmgr *hwmgr)
611 &smu_data->header_buffer.kaddr); 619 &smu_data->header_buffer.kaddr);
612 return -EINVAL; 620 return -EINVAL;
613 } 621 }
614 smu_data->smu_buffer.mc_addr = mc_addr;
615 622
616 if (smum_is_hw_avfs_present(hwmgr)) 623 if (smum_is_hw_avfs_present(hwmgr))
617 hwmgr->avfs_supported = true; 624 hwmgr->avfs_supported = true;
@@ -633,6 +640,9 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
633 &smu_data->smu_buffer.mc_addr, 640 &smu_data->smu_buffer.mc_addr,
634 &smu_data->smu_buffer.kaddr); 641 &smu_data->smu_buffer.kaddr);
635 642
643
644 kfree(smu_data->toc);
645 smu_data->toc = NULL;
636 kfree(hwmgr->smu_backend); 646 kfree(hwmgr->smu_backend);
637 hwmgr->smu_backend = NULL; 647 hwmgr->smu_backend = NULL;
638 return 0; 648 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 39c9bfda0ab4..01f0538fba6b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -37,10 +37,9 @@ struct smu7_buffer_entry {
37}; 37};
38 38
39struct smu7_smumgr { 39struct smu7_smumgr {
40 uint8_t *header;
41 uint8_t *mec_image;
42 struct smu7_buffer_entry smu_buffer; 40 struct smu7_buffer_entry smu_buffer;
43 struct smu7_buffer_entry header_buffer; 41 struct smu7_buffer_entry header_buffer;
42 struct SMU_DRAMData_TOC *toc;
44 43
45 uint32_t soft_regs_start; 44 uint32_t soft_regs_start;
46 uint32_t dpm_table_start; 45 uint32_t dpm_table_start;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index c861d3023474..f7e3bc22bb93 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = {
52 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, 52 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
53}; 53};
54 54
55static int smu8_get_argument(struct pp_hwmgr *hwmgr) 55static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
56{ 56{
57 if (hwmgr == NULL || hwmgr->device == NULL) 57 if (hwmgr == NULL || hwmgr->device == NULL)
58 return -EINVAL; 58 return 0;
59 59
60 return cgs_read_register(hwmgr->device, 60 return cgs_read_register(hwmgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0); 61 mmSMU_MP1_SRBM2P_ARG_0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
new file mode 100644
index 000000000000..079fc8e8f709
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "smumgr.h"
25#include "vega10_inc.h"
26#include "soc15_common.h"
27#include "pp_debug.h"
28
29
30/* MP Apertures */
31#define MP0_Public 0x03800000
32#define MP0_SRAM 0x03900000
33#define MP1_Public 0x03b00000
34#define MP1_SRAM 0x03c00004
35
36#define smnMP1_FIRMWARE_FLAGS 0x3010028
37
38bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
39{
40 struct amdgpu_device *adev = hwmgr->adev;
41 uint32_t mp1_fw_flags;
42
43 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
44 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
45
46 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
47
48 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
49 return true;
50
51 return false;
52}
53
54/*
55 * Check if SMC has responded to previous message.
56 *
57 * @param smumgr the address of the powerplay hardware manager.
58 * @return TRUE SMC has responded, FALSE otherwise.
59 */
60static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
61{
62 struct amdgpu_device *adev = hwmgr->adev;
63 uint32_t reg;
64 uint32_t ret;
65
66 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
67
68 ret = phm_wait_for_register_unequal(hwmgr, reg,
69 0, MP1_C2PMSG_90__CONTENT_MASK);
70
71 if (ret)
72 pr_err("No response from smu\n");
73
74 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
75}
76
77/*
78 * Send a message to the SMC, and do not wait for its response.
79 * @param smumgr the address of the powerplay hardware manager.
80 * @param msg the message to send.
81 * @return Always return 0.
82 */
83static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
84 uint16_t msg)
85{
86 struct amdgpu_device *adev = hwmgr->adev;
87
88 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
89
90 return 0;
91}
92
93/*
94 * Send a message to the SMC, and wait for its response.
95 * @param hwmgr the address of the powerplay hardware manager.
96 * @param msg the message to send.
97 * @return Always return 0.
98 */
99int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
100{
101 struct amdgpu_device *adev = hwmgr->adev;
102 uint32_t ret;
103
104 smu9_wait_for_response(hwmgr);
105
106 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
107
108 smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
109
110 ret = smu9_wait_for_response(hwmgr);
111 if (ret != 1)
112 pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
113
114 return 0;
115}
116
117/*
118 * Send a message to the SMC with parameter
119 * @param hwmgr: the address of the powerplay hardware manager.
120 * @param msg: the message to send.
121 * @param parameter: the parameter to send
122 * @return Always return 0.
123 */
124int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
125 uint16_t msg, uint32_t parameter)
126{
127 struct amdgpu_device *adev = hwmgr->adev;
128 uint32_t ret;
129
130 smu9_wait_for_response(hwmgr);
131
132 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
133
134 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
135
136 smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
137
138 ret = smu9_wait_for_response(hwmgr);
139 if (ret != 1)
140 pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
141
142 return 0;
143}
144
145uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
146{
147 struct amdgpu_device *adev = hwmgr->adev;
148
149 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
150}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
new file mode 100644
index 000000000000..1462279ca128
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _SMU9_SMUMANAGER_H_
24#define _SMU9_SMUMANAGER_H_
25
26bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr);
27int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
28int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
29 uint16_t msg, uint32_t parameter);
30uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr);
31
32#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c9837935f0f5..99d5e4f98f49 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -96,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
96 return 0; 96 return 0;
97} 97}
98 98
99int smum_get_argument(struct pp_hwmgr *hwmgr) 99uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
100{ 100{
101 if (NULL != hwmgr->smumgr_funcs->get_argument) 101 if (NULL != hwmgr->smumgr_funcs->get_argument)
102 return hwmgr->smumgr_funcs->get_argument(hwmgr); 102 return hwmgr->smumgr_funcs->get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index e84669c448a3..5d19115f410c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -28,142 +28,11 @@
28#include "vega10_hwmgr.h" 28#include "vega10_hwmgr.h"
29#include "vega10_ppsmc.h" 29#include "vega10_ppsmc.h"
30#include "smu9_driver_if.h" 30#include "smu9_driver_if.h"
31#include "smu9_smumgr.h"
31#include "ppatomctrl.h" 32#include "ppatomctrl.h"
32#include "pp_debug.h" 33#include "pp_debug.h"
33 34
34 35
35#define AVFS_EN_MSB 1568
36#define AVFS_EN_LSB 1568
37
38/* Microcode file is stored in this buffer */
39#define BUFFER_SIZE 80000
40#define MAX_STRING_SIZE 15
41#define BUFFER_SIZETWO 131072 /* 128 *1024 */
42
43/* MP Apertures */
44#define MP0_Public 0x03800000
45#define MP0_SRAM 0x03900000
46#define MP1_Public 0x03b00000
47#define MP1_SRAM 0x03c00004
48
49#define smnMP1_FIRMWARE_FLAGS 0x3010028
50#define smnMP0_FW_INTF 0x3010104
51#define smnMP1_PUB_CTRL 0x3010b14
52
53static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
54{
55 struct amdgpu_device *adev = hwmgr->adev;
56 uint32_t mp1_fw_flags;
57
58 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
59 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
60
61 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
62
63 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
64 return true;
65
66 return false;
67}
68
69/*
70 * Check if SMC has responded to previous message.
71 *
72 * @param smumgr the address of the powerplay hardware manager.
73 * @return TRUE SMC has responded, FALSE otherwise.
74 */
75static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
76{
77 struct amdgpu_device *adev = hwmgr->adev;
78 uint32_t reg;
79 uint32_t ret;
80
81 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
82
83 ret = phm_wait_for_register_unequal(hwmgr, reg,
84 0, MP1_C2PMSG_90__CONTENT_MASK);
85
86 if (ret)
87 pr_err("No response from smu\n");
88
89 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
90}
91
92/*
93 * Send a message to the SMC, and do not wait for its response.
94 * @param smumgr the address of the powerplay hardware manager.
95 * @param msg the message to send.
96 * @return Always return 0.
97 */
98static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
99 uint16_t msg)
100{
101 struct amdgpu_device *adev = hwmgr->adev;
102
103 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
104
105 return 0;
106}
107
108/*
109 * Send a message to the SMC, and wait for its response.
110 * @param hwmgr the address of the powerplay hardware manager.
111 * @param msg the message to send.
112 * @return Always return 0.
113 */
114static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
115{
116 struct amdgpu_device *adev = hwmgr->adev;
117 uint32_t ret;
118
119 vega10_wait_for_response(hwmgr);
120
121 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
122
123 vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
124
125 ret = vega10_wait_for_response(hwmgr);
126 if (ret != 1)
127 pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
128
129 return 0;
130}
131
132/*
133 * Send a message to the SMC with parameter
134 * @param hwmgr: the address of the powerplay hardware manager.
135 * @param msg: the message to send.
136 * @param parameter: the parameter to send
137 * @return Always return 0.
138 */
139static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
140 uint16_t msg, uint32_t parameter)
141{
142 struct amdgpu_device *adev = hwmgr->adev;
143 uint32_t ret;
144
145 vega10_wait_for_response(hwmgr);
146
147 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
148
149 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
150
151 vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
152
153 ret = vega10_wait_for_response(hwmgr);
154 if (ret != 1)
155 pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
156
157 return 0;
158}
159
160static int vega10_get_argument(struct pp_hwmgr *hwmgr)
161{
162 struct amdgpu_device *adev = hwmgr->adev;
163
164 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
165}
166
167static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, 36static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
168 uint8_t *table, int16_t table_id) 37 uint8_t *table, int16_t table_id)
169{ 38{
@@ -175,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
175 "Invalid SMU Table version!", return -EINVAL); 44 "Invalid SMU Table version!", return -EINVAL);
176 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 45 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
177 "Invalid SMU Table Length!", return -EINVAL); 46 "Invalid SMU Table Length!", return -EINVAL);
178 vega10_send_msg_to_smc_with_parameter(hwmgr, 47 smu9_send_msg_to_smc_with_parameter(hwmgr,
179 PPSMC_MSG_SetDriverDramAddrHigh, 48 PPSMC_MSG_SetDriverDramAddrHigh,
180 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 49 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
181 vega10_send_msg_to_smc_with_parameter(hwmgr, 50 smu9_send_msg_to_smc_with_parameter(hwmgr,
182 PPSMC_MSG_SetDriverDramAddrLow, 51 PPSMC_MSG_SetDriverDramAddrLow,
183 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 52 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
184 vega10_send_msg_to_smc_with_parameter(hwmgr, 53 smu9_send_msg_to_smc_with_parameter(hwmgr,
185 PPSMC_MSG_TransferTableSmu2Dram, 54 PPSMC_MSG_TransferTableSmu2Dram,
186 priv->smu_tables.entry[table_id].table_id); 55 priv->smu_tables.entry[table_id].table_id);
187 56
@@ -206,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
206 memcpy(priv->smu_tables.entry[table_id].table, table, 75 memcpy(priv->smu_tables.entry[table_id].table, table,
207 priv->smu_tables.entry[table_id].size); 76 priv->smu_tables.entry[table_id].size);
208 77
209 vega10_send_msg_to_smc_with_parameter(hwmgr, 78 smu9_send_msg_to_smc_with_parameter(hwmgr,
210 PPSMC_MSG_SetDriverDramAddrHigh, 79 PPSMC_MSG_SetDriverDramAddrHigh,
211 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 80 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
212 vega10_send_msg_to_smc_with_parameter(hwmgr, 81 smu9_send_msg_to_smc_with_parameter(hwmgr,
213 PPSMC_MSG_SetDriverDramAddrLow, 82 PPSMC_MSG_SetDriverDramAddrLow,
214 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 83 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
215 vega10_send_msg_to_smc_with_parameter(hwmgr, 84 smu9_send_msg_to_smc_with_parameter(hwmgr,
216 PPSMC_MSG_TransferTableDram2Smu, 85 PPSMC_MSG_TransferTableDram2Smu,
217 priv->smu_tables.entry[table_id].table_id); 86 priv->smu_tables.entry[table_id].table_id);
218 87
@@ -225,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
225 if (features_enabled == NULL) 94 if (features_enabled == NULL)
226 return -EINVAL; 95 return -EINVAL;
227 96
228 vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures); 97 smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
229 *features_enabled = vega10_get_argument(hwmgr); 98 *features_enabled = smu9_get_argument(hwmgr);
230 99
231 return 0; 100 return 0;
232} 101}
@@ -248,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
248 struct vega10_smumgr *priv = hwmgr->smu_backend; 117 struct vega10_smumgr *priv = hwmgr->smu_backend;
249 118
250 if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) { 119 if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
251 vega10_send_msg_to_smc_with_parameter(hwmgr, 120 smu9_send_msg_to_smc_with_parameter(hwmgr,
252 PPSMC_MSG_SetToolsDramAddrHigh, 121 PPSMC_MSG_SetToolsDramAddrHigh,
253 upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); 122 upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
254 vega10_send_msg_to_smc_with_parameter(hwmgr, 123 smu9_send_msg_to_smc_with_parameter(hwmgr,
255 PPSMC_MSG_SetToolsDramAddrLow, 124 PPSMC_MSG_SetToolsDramAddrLow,
256 lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); 125 lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
257 } 126 }
@@ -265,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
265 uint32_t dev_id; 134 uint32_t dev_id;
266 uint32_t rev_id; 135 uint32_t rev_id;
267 136
268 PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr, 137 PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
269 PPSMC_MSG_GetDriverIfVersion), 138 PPSMC_MSG_GetDriverIfVersion),
270 "Attempt to get SMC IF Version Number Failed!", 139 "Attempt to get SMC IF Version Number Failed!",
271 return -EINVAL); 140 return -EINVAL);
272 smc_driver_if_version = vega10_get_argument(hwmgr); 141 smc_driver_if_version = smu9_get_argument(hwmgr);
273 142
274 dev_id = adev->pdev->device; 143 dev_id = adev->pdev->device;
275 rev_id = adev->pdev->revision; 144 rev_id = adev->pdev->revision;
@@ -441,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
441 310
442static int vega10_start_smu(struct pp_hwmgr *hwmgr) 311static int vega10_start_smu(struct pp_hwmgr *hwmgr)
443{ 312{
444 if (!vega10_is_smc_ram_running(hwmgr)) 313 if (!smu9_is_smc_ram_running(hwmgr))
445 return -EINVAL; 314 return -EINVAL;
446 315
447 PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr), 316 PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
@@ -453,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
453 return 0; 322 return 0;
454} 323}
455 324
456static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) 325static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
326 uint16_t table_id, bool rw)
457{ 327{
458 int ret; 328 int ret;
459 329
@@ -470,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = {
470 .smu_fini = &vega10_smu_fini, 340 .smu_fini = &vega10_smu_fini,
471 .start_smu = &vega10_start_smu, 341 .start_smu = &vega10_start_smu,
472 .request_smu_load_specific_fw = NULL, 342 .request_smu_load_specific_fw = NULL,
473 .send_msg_to_smc = &vega10_send_msg_to_smc, 343 .send_msg_to_smc = &smu9_send_msg_to_smc,
474 .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter, 344 .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
475 .download_pptable_settings = NULL, 345 .download_pptable_settings = NULL,
476 .upload_pptable_settings = NULL, 346 .upload_pptable_settings = NULL,
477 .is_dpm_running = vega10_is_dpm_running, 347 .is_dpm_running = vega10_is_dpm_running,
478 .get_argument = vega10_get_argument, 348 .get_argument = smu9_get_argument,
479 .smc_table_manager = vega10_smc_table_manager, 349 .smc_table_manager = vega10_smc_table_manager,
480}; 350};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 7d9b40e8b1bf..7f0e2109f40d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -24,157 +24,14 @@
24#include "smumgr.h" 24#include "smumgr.h"
25#include "vega12_inc.h" 25#include "vega12_inc.h"
26#include "soc15_common.h" 26#include "soc15_common.h"
27#include "smu9_smumgr.h"
27#include "vega12_smumgr.h" 28#include "vega12_smumgr.h"
28#include "vega12_ppsmc.h" 29#include "vega12_ppsmc.h"
29#include "vega12/smu9_driver_if.h" 30#include "vega12/smu9_driver_if.h"
30
31#include "ppatomctrl.h" 31#include "ppatomctrl.h"
32#include "pp_debug.h" 32#include "pp_debug.h"
33 33
34 34
35/* MP Apertures */
36#define MP0_Public 0x03800000
37#define MP0_SRAM 0x03900000
38#define MP1_Public 0x03b00000
39#define MP1_SRAM 0x03c00004
40
41#define smnMP1_FIRMWARE_FLAGS 0x3010028
42#define smnMP0_FW_INTF 0x3010104
43#define smnMP1_PUB_CTRL 0x3010b14
44
45static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
46{
47 struct amdgpu_device *adev = hwmgr->adev;
48 uint32_t mp1_fw_flags;
49
50 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
51 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
52
53 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
54
55 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
56 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
57 return true;
58
59 return false;
60}
61
62/*
63 * Check if SMC has responded to previous message.
64 *
65 * @param smumgr the address of the powerplay hardware manager.
66 * @return TRUE SMC has responded, FALSE otherwise.
67 */
68static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
69{
70 struct amdgpu_device *adev = hwmgr->adev;
71 uint32_t reg;
72
73 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
74
75 phm_wait_for_register_unequal(hwmgr, reg,
76 0, MP1_C2PMSG_90__CONTENT_MASK);
77
78 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
79}
80
81/*
82 * Send a message to the SMC, and do not wait for its response.
83 * @param smumgr the address of the powerplay hardware manager.
84 * @param msg the message to send.
85 * @return Always return 0.
86 */
87int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
88 uint16_t msg)
89{
90 struct amdgpu_device *adev = hwmgr->adev;
91
92 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
93
94 return 0;
95}
96
97/*
98 * Send a message to the SMC, and wait for its response.
99 * @param hwmgr the address of the powerplay hardware manager.
100 * @param msg the message to send.
101 * @return Always return 0.
102 */
103int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
104{
105 struct amdgpu_device *adev = hwmgr->adev;
106
107 vega12_wait_for_response(hwmgr);
108
109 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
110
111 vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
112
113 if (vega12_wait_for_response(hwmgr) != 1)
114 pr_err("Failed to send message: 0x%x\n", msg);
115
116 return 0;
117}
118
119/*
120 * Send a message to the SMC with parameter
121 * @param hwmgr: the address of the powerplay hardware manager.
122 * @param msg: the message to send.
123 * @param parameter: the parameter to send
124 * @return Always return 0.
125 */
126int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
127 uint16_t msg, uint32_t parameter)
128{
129 struct amdgpu_device *adev = hwmgr->adev;
130
131 vega12_wait_for_response(hwmgr);
132
133 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
134
135 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
136
137 vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
138
139 if (vega12_wait_for_response(hwmgr) != 1)
140 pr_err("Failed to send message: 0x%x\n", msg);
141
142 return 0;
143}
144
145
146/*
147 * Send a message to the SMC with parameter, do not wait for response
148 * @param hwmgr: the address of the powerplay hardware manager.
149 * @param msg: the message to send.
150 * @param parameter: the parameter to send
151 * @return The response that came from the SMC.
152 */
153int vega12_send_msg_to_smc_with_parameter_without_waiting(
154 struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
155{
156 struct amdgpu_device *adev = hwmgr->adev;
157
158 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
159
160 return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
161}
162
163/*
164 * Retrieve an argument from SMC.
165 * @param hwmgr the address of the powerplay hardware manager.
166 * @param arg pointer to store the argument from SMC.
167 * @return Always return 0.
168 */
169int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
170{
171 struct amdgpu_device *adev = hwmgr->adev;
172
173 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
174
175 return 0;
176}
177
178/* 35/*
179 * Copy table from SMC into driver FB 36 * Copy table from SMC into driver FB
180 * @param hwmgr the address of the HW manager 37 * @param hwmgr the address of the HW manager
@@ -192,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
192 "Invalid SMU Table version!", return -EINVAL); 49 "Invalid SMU Table version!", return -EINVAL);
193 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 50 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
194 "Invalid SMU Table Length!", return -EINVAL); 51 "Invalid SMU Table Length!", return -EINVAL);
195 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 52 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
196 PPSMC_MSG_SetDriverDramAddrHigh, 53 PPSMC_MSG_SetDriverDramAddrHigh,
197 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 54 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
198 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); 55 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
199 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 56 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
200 PPSMC_MSG_SetDriverDramAddrLow, 57 PPSMC_MSG_SetDriverDramAddrLow,
201 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 58 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
202 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", 59 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
203 return -EINVAL); 60 return -EINVAL);
204 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 61 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
205 PPSMC_MSG_TransferTableSmu2Dram, 62 PPSMC_MSG_TransferTableSmu2Dram,
206 table_id) == 0, 63 table_id) == 0,
207 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", 64 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -234,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
234 memcpy(priv->smu_tables.entry[table_id].table, table, 91 memcpy(priv->smu_tables.entry[table_id].table, table,
235 priv->smu_tables.entry[table_id].size); 92 priv->smu_tables.entry[table_id].size);
236 93
237 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 94 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
238 PPSMC_MSG_SetDriverDramAddrHigh, 95 PPSMC_MSG_SetDriverDramAddrHigh,
239 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 96 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
240 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", 97 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
241 return -EINVAL;); 98 return -EINVAL;);
242 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 99 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
243 PPSMC_MSG_SetDriverDramAddrLow, 100 PPSMC_MSG_SetDriverDramAddrLow,
244 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 101 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
245 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", 102 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
246 return -EINVAL); 103 return -EINVAL);
247 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 104 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
248 PPSMC_MSG_TransferTableDram2Smu, 105 PPSMC_MSG_TransferTableDram2Smu,
249 table_id) == 0, 106 table_id) == 0,
250 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", 107 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -262,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
262 smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); 119 smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
263 120
264 if (enable) { 121 if (enable) {
265 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 122 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
266 PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, 123 PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
267 "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", 124 "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
268 return -EINVAL); 125 return -EINVAL);
269 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 126 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
270 PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, 127 PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
271 "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", 128 "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
272 return -EINVAL); 129 return -EINVAL);
273 } else { 130 } else {
274 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 131 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
275 PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, 132 PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
276 "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", 133 "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
277 return -EINVAL); 134 return -EINVAL);
278 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, 135 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
279 PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, 136 PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
280 "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", 137 "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
281 return -EINVAL); 138 return -EINVAL);
@@ -292,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
292 if (features_enabled == NULL) 149 if (features_enabled == NULL)
293 return -EINVAL; 150 return -EINVAL;
294 151
295 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr, 152 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
296 PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, 153 PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
297 "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", 154 "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
298 return -EINVAL); 155 return -EINVAL);
299 PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr, 156 smc_features_low = smu9_get_argument(hwmgr);
300 &smc_features_low) == 0, 157
301 "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!", 158 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
302 return -EINVAL);
303 PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
304 PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, 159 PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
305 "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", 160 "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
306 return -EINVAL); 161 return -EINVAL);
307 PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr, 162 smc_features_high = smu9_get_argument(hwmgr);
308 &smc_features_high) == 0,
309 "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
310 return -EINVAL);
311 163
312 *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | 164 *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
313 (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); 165 (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -333,39 +185,16 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
333 (struct vega12_smumgr *)(hwmgr->smu_backend); 185 (struct vega12_smumgr *)(hwmgr->smu_backend);
334 186
335 if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { 187 if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
336 if (!vega12_send_msg_to_smc_with_parameter(hwmgr, 188 if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
337 PPSMC_MSG_SetToolsDramAddrHigh, 189 PPSMC_MSG_SetToolsDramAddrHigh,
338 upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr))) 190 upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
339 vega12_send_msg_to_smc_with_parameter(hwmgr, 191 smu9_send_msg_to_smc_with_parameter(hwmgr,
340 PPSMC_MSG_SetToolsDramAddrLow, 192 PPSMC_MSG_SetToolsDramAddrLow,
341 lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); 193 lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
342 } 194 }
343 return 0; 195 return 0;
344} 196}
345 197
346#if 0 /* tentatively remove */
347static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr)
348{
349 uint32_t smc_driver_if_version;
350
351 PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr,
352 PPSMC_MSG_GetDriverIfVersion),
353 "Attempt to get SMC IF Version Number Failed!",
354 return -EINVAL);
355 vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version);
356
357 if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
358 pr_err("Your firmware(0x%x) doesn't match \
359 SMU9_DRIVER_IF_VERSION(0x%x). \
360 Please update your firmware!\n",
361 smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
362 return -EINVAL;
363 }
364
365 return 0;
366}
367#endif
368
369static int vega12_smu_init(struct pp_hwmgr *hwmgr) 198static int vega12_smu_init(struct pp_hwmgr *hwmgr)
370{ 199{
371 struct vega12_smumgr *priv; 200 struct vega12_smumgr *priv;
@@ -513,16 +342,10 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
513 342
514static int vega12_start_smu(struct pp_hwmgr *hwmgr) 343static int vega12_start_smu(struct pp_hwmgr *hwmgr)
515{ 344{
516 PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr), 345 PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr),
517 "SMC is not running!", 346 "SMC is not running!",
518 return -EINVAL); 347 return -EINVAL);
519 348
520#if 0 /* tentatively remove */
521 PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr),
522 "Failed to verify SMC interface!",
523 return -EINVAL);
524#endif
525
526 vega12_set_tools_address(hwmgr); 349 vega12_set_tools_address(hwmgr);
527 350
528 return 0; 351 return 0;
@@ -533,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = {
533 .smu_fini = &vega12_smu_fini, 356 .smu_fini = &vega12_smu_fini,
534 .start_smu = &vega12_start_smu, 357 .start_smu = &vega12_start_smu,
535 .request_smu_load_specific_fw = NULL, 358 .request_smu_load_specific_fw = NULL,
536 .send_msg_to_smc = &vega12_send_msg_to_smc, 359 .send_msg_to_smc = &smu9_send_msg_to_smc,
537 .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter, 360 .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
538 .download_pptable_settings = NULL, 361 .download_pptable_settings = NULL,
539 .upload_pptable_settings = NULL, 362 .upload_pptable_settings = NULL,
540 .is_dpm_running = vega12_is_dpm_running, 363 .is_dpm_running = vega12_is_dpm_running,
364 .get_argument = smu9_get_argument,
541}; 365};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
index 2810d387b611..b285cbc04019 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
@@ -48,7 +48,6 @@ struct vega12_smumgr {
48#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 48#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
49#define SMU_FEATURES_HIGH_SHIFT 32 49#define SMU_FEATURES_HIGH_SHIFT 32
50 50
51int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
52int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, 51int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
53 uint8_t *table, int16_t table_id); 52 uint8_t *table, int16_t table_id);
54int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, 53int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 45bfdf4cc107..36414ba56b22 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
49 49
50 for (i = 0; i < ETNA_MAX_PIPES; i++) { 50 for (i = 0; i < ETNA_MAX_PIPES; i++) {
51 struct etnaviv_gpu *gpu = priv->gpu[i]; 51 struct etnaviv_gpu *gpu = priv->gpu[i];
52 struct drm_sched_rq *rq;
52 53
53 if (gpu) { 54 if (gpu) {
54 drm_sched_entity_init(&gpu->sched, 55 rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
55 &ctx->sched_entity[i], 56 drm_sched_entity_init(&ctx->sched_entity[i],
56 &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], 57 &rq, 1, NULL);
57 NULL);
58 } 58 }
59 } 59 }
60 60
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 7d2560699b84..dac71e3b4514 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -69,11 +69,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
69 * 69 *
70 * Initializes a scheduler runqueue. 70 * Initializes a scheduler runqueue.
71 */ 71 */
72static void drm_sched_rq_init(struct drm_sched_rq *rq) 72static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
73 struct drm_sched_rq *rq)
73{ 74{
74 spin_lock_init(&rq->lock); 75 spin_lock_init(&rq->lock);
75 INIT_LIST_HEAD(&rq->entities); 76 INIT_LIST_HEAD(&rq->entities);
76 rq->current_entity = NULL; 77 rq->current_entity = NULL;
78 rq->sched = sched;
77} 79}
78 80
79/** 81/**
@@ -160,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
160 * drm_sched_entity_init - Init a context entity used by scheduler when 162 * drm_sched_entity_init - Init a context entity used by scheduler when
161 * submit to HW ring. 163 * submit to HW ring.
162 * 164 *
163 * @sched: scheduler instance
164 * @entity: scheduler entity to init 165 * @entity: scheduler entity to init
165 * @rq: the run queue this entity belongs 166 * @rq_list: the list of run queue on which jobs from this
167 * entity can be submitted
168 * @num_rq_list: number of run queue in rq_list
166 * @guilty: atomic_t set to 1 when a job on this queue 169 * @guilty: atomic_t set to 1 when a job on this queue
167 * is found to be guilty causing a timeout 170 * is found to be guilty causing a timeout
168 * 171 *
172 * Note: the rq_list should have atleast one element to schedule
173 * the entity
174 *
169 * Returns 0 on success or a negative error code on failure. 175 * Returns 0 on success or a negative error code on failure.
170*/ 176*/
171int drm_sched_entity_init(struct drm_gpu_scheduler *sched, 177int drm_sched_entity_init(struct drm_sched_entity *entity,
172 struct drm_sched_entity *entity, 178 struct drm_sched_rq **rq_list,
173 struct drm_sched_rq *rq, 179 unsigned int num_rq_list,
174 atomic_t *guilty) 180 atomic_t *guilty)
175{ 181{
176 if (!(sched && entity && rq)) 182 if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
177 return -EINVAL; 183 return -EINVAL;
178 184
179 memset(entity, 0, sizeof(struct drm_sched_entity)); 185 memset(entity, 0, sizeof(struct drm_sched_entity));
180 INIT_LIST_HEAD(&entity->list); 186 INIT_LIST_HEAD(&entity->list);
181 entity->rq = rq; 187 entity->rq = rq_list[0];
182 entity->sched = sched; 188 entity->sched = rq_list[0]->sched;
183 entity->guilty = guilty; 189 entity->guilty = guilty;
184 entity->last_scheduled = NULL; 190 entity->last_scheduled = NULL;
185 191
@@ -541,6 +547,11 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
541 if (first) { 547 if (first) {
542 /* Add the entity to the run queue */ 548 /* Add the entity to the run queue */
543 spin_lock(&entity->rq_lock); 549 spin_lock(&entity->rq_lock);
550 if (!entity->rq) {
551 DRM_ERROR("Trying to push to a killed entity\n");
552 spin_unlock(&entity->rq_lock);
553 return;
554 }
544 drm_sched_rq_add_entity(entity->rq, entity); 555 drm_sched_rq_add_entity(entity->rq, entity);
545 spin_unlock(&entity->rq_lock); 556 spin_unlock(&entity->rq_lock);
546 drm_sched_wakeup(sched); 557 drm_sched_wakeup(sched);
@@ -926,7 +937,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
926 sched->timeout = timeout; 937 sched->timeout = timeout;
927 sched->hang_limit = hang_limit; 938 sched->hang_limit = hang_limit;
928 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) 939 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
929 drm_sched_rq_init(&sched->sched_rq[i]); 940 drm_sched_rq_init(sched, &sched->sched_rq[i]);
930 941
931 init_waitqueue_head(&sched->wake_up_worker); 942 init_waitqueue_head(&sched->wake_up_worker);
932 init_waitqueue_head(&sched->job_scheduled); 943 init_waitqueue_head(&sched->job_scheduled);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5d8688e522d1..7c484729f9b2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -287,12 +287,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
287 287
288 if (ret) { 288 if (ret) {
289 if (bdev->driver->move_notify) { 289 if (bdev->driver->move_notify) {
290 struct ttm_mem_reg tmp_mem = *mem; 290 swap(*mem, bo->mem);
291 *mem = bo->mem;
292 bo->mem = tmp_mem;
293 bdev->driver->move_notify(bo, false, mem); 291 bdev->driver->move_notify(bo, false, mem);
294 bo->mem = *mem; 292 swap(*mem, bo->mem);
295 *mem = tmp_mem;
296 } 293 }
297 294
298 goto out_err; 295 goto out_err;
@@ -590,12 +587,18 @@ static void ttm_bo_release(struct kref *kref)
590 kref_put(&bo->list_kref, ttm_bo_release_list); 587 kref_put(&bo->list_kref, ttm_bo_release_list);
591} 588}
592 589
590void ttm_bo_put(struct ttm_buffer_object *bo)
591{
592 kref_put(&bo->kref, ttm_bo_release);
593}
594EXPORT_SYMBOL(ttm_bo_put);
595
593void ttm_bo_unref(struct ttm_buffer_object **p_bo) 596void ttm_bo_unref(struct ttm_buffer_object **p_bo)
594{ 597{
595 struct ttm_buffer_object *bo = *p_bo; 598 struct ttm_buffer_object *bo = *p_bo;
596 599
597 *p_bo = NULL; 600 *p_bo = NULL;
598 kref_put(&bo->kref, ttm_bo_release); 601 ttm_bo_put(bo);
599} 602}
600EXPORT_SYMBOL(ttm_bo_unref); 603EXPORT_SYMBOL(ttm_bo_unref);
601 604
@@ -1201,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1201 if (!resv) 1204 if (!resv)
1202 ttm_bo_unreserve(bo); 1205 ttm_bo_unreserve(bo);
1203 1206
1204 ttm_bo_unref(&bo); 1207 ttm_bo_put(bo);
1205 return ret; 1208 return ret;
1206 } 1209 }
1207 1210
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f2c167702eef..046a6dda690a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -463,7 +463,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
463 struct ttm_transfer_obj *fbo; 463 struct ttm_transfer_obj *fbo;
464 464
465 fbo = container_of(bo, struct ttm_transfer_obj, base); 465 fbo = container_of(bo, struct ttm_transfer_obj, base);
466 ttm_bo_unref(&fbo->bo); 466 ttm_bo_put(fbo->bo);
467 kfree(fbo); 467 kfree(fbo);
468} 468}
469 469
@@ -492,8 +492,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
492 if (!fbo) 492 if (!fbo)
493 return -ENOMEM; 493 return -ENOMEM;
494 494
495 ttm_bo_get(bo);
495 fbo->base = *bo; 496 fbo->base = *bo;
496 fbo->bo = ttm_bo_reference(bo); 497 fbo->bo = bo;
497 498
498 /** 499 /**
499 * Fix up members that we shouldn't copy directly: 500 * Fix up members that we shouldn't copy directly:
@@ -730,7 +731,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
730 bo->ttm = NULL; 731 bo->ttm = NULL;
731 732
732 ttm_bo_unreserve(ghost_obj); 733 ttm_bo_unreserve(ghost_obj);
733 ttm_bo_unref(&ghost_obj); 734 ttm_bo_put(ghost_obj);
734 } 735 }
735 736
736 *old_mem = *new_mem; 737 *old_mem = *new_mem;
@@ -786,7 +787,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
786 bo->ttm = NULL; 787 bo->ttm = NULL;
787 788
788 ttm_bo_unreserve(ghost_obj); 789 ttm_bo_unreserve(ghost_obj);
789 ttm_bo_unref(&ghost_obj); 790 ttm_bo_put(ghost_obj);
790 791
791 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { 792 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
792 793
@@ -851,7 +852,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
851 bo->ttm = NULL; 852 bo->ttm = NULL;
852 853
853 ttm_bo_unreserve(ghost); 854 ttm_bo_unreserve(ghost);
854 ttm_bo_unref(&ghost); 855 ttm_bo_put(ghost);
855 856
856 return 0; 857 return 0;
857} 858}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0ca0ec47334e..6fe91c1b692d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -68,11 +68,11 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
68 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 68 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
69 goto out_unlock; 69 goto out_unlock;
70 70
71 ttm_bo_reference(bo); 71 ttm_bo_get(bo);
72 up_read(&vmf->vma->vm_mm->mmap_sem); 72 up_read(&vmf->vma->vm_mm->mmap_sem);
73 (void) dma_fence_wait(bo->moving, true); 73 (void) dma_fence_wait(bo->moving, true);
74 ttm_bo_unreserve(bo); 74 ttm_bo_unreserve(bo);
75 ttm_bo_unref(&bo); 75 ttm_bo_put(bo);
76 goto out_unlock; 76 goto out_unlock;
77 } 77 }
78 78
@@ -138,10 +138,10 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
138 138
139 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 139 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
140 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 140 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
141 ttm_bo_reference(bo); 141 ttm_bo_get(bo);
142 up_read(&vmf->vma->vm_mm->mmap_sem); 142 up_read(&vmf->vma->vm_mm->mmap_sem);
143 (void) ttm_bo_wait_unreserved(bo); 143 (void) ttm_bo_wait_unreserved(bo);
144 ttm_bo_unref(&bo); 144 ttm_bo_put(bo);
145 } 145 }
146 146
147 return VM_FAULT_RETRY; 147 return VM_FAULT_RETRY;
@@ -302,14 +302,14 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
302 302
303 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); 303 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
304 304
305 (void)ttm_bo_reference(bo); 305 ttm_bo_get(bo);
306} 306}
307 307
308static void ttm_bo_vm_close(struct vm_area_struct *vma) 308static void ttm_bo_vm_close(struct vm_area_struct *vma)
309{ 309{
310 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; 310 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
311 311
312 ttm_bo_unref(&bo); 312 ttm_bo_put(bo);
313 vma->vm_private_data = NULL; 313 vma->vm_private_data = NULL;
314} 314}
315 315
@@ -461,7 +461,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
461 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 461 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
462 return 0; 462 return 0;
463out_unref: 463out_unref:
464 ttm_bo_unref(&bo); 464 ttm_bo_put(bo);
465 return ret; 465 return ret;
466} 466}
467EXPORT_SYMBOL(ttm_bo_mmap); 467EXPORT_SYMBOL(ttm_bo_mmap);
@@ -471,8 +471,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
471 if (vma->vm_pgoff != 0) 471 if (vma->vm_pgoff != 0)
472 return -EACCES; 472 return -EACCES;
473 473
474 ttm_bo_get(bo);
475
474 vma->vm_ops = &ttm_bo_vm_ops; 476 vma->vm_ops = &ttm_bo_vm_ops;
475 vma->vm_private_data = ttm_bo_reference(bo); 477 vma->vm_private_data = bo;
476 vma->vm_flags |= VM_MIXEDMAP; 478 vma->vm_flags |= VM_MIXEDMAP;
477 vma->vm_flags |= VM_IO | VM_DONTEXPAND; 479 vma->vm_flags |= VM_IO | VM_DONTEXPAND;
478 return 0; 480 return 0;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 567f7d46d912..1dceba2b42fd 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
123{ 123{
124 struct v3d_dev *v3d = to_v3d_dev(dev); 124 struct v3d_dev *v3d = to_v3d_dev(dev);
125 struct v3d_file_priv *v3d_priv; 125 struct v3d_file_priv *v3d_priv;
126 struct drm_sched_rq *rq;
126 int i; 127 int i;
127 128
128 v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); 129 v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
132 v3d_priv->v3d = v3d; 133 v3d_priv->v3d = v3d;
133 134
134 for (i = 0; i < V3D_MAX_QUEUES; i++) { 135 for (i = 0; i < V3D_MAX_QUEUES; i++) {
135 drm_sched_entity_init(&v3d->queue[i].sched, 136 rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
136 &v3d_priv->sched_entity[i], 137 drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
137 &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
138 NULL);
139 } 138 }
140 139
141 file->driver_priv = v3d_priv; 140 file->driver_priv = v3d_priv;
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 4214ceb71c05..2205e89722f6 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -93,6 +93,7 @@ struct drm_sched_entity {
93 * struct drm_sched_rq - queue of entities to be scheduled. 93 * struct drm_sched_rq - queue of entities to be scheduled.
94 * 94 *
95 * @lock: to modify the entities list. 95 * @lock: to modify the entities list.
96 * @sched: the scheduler to which this rq belongs to.
96 * @entities: list of the entities to be scheduled. 97 * @entities: list of the entities to be scheduled.
97 * @current_entity: the entity which is to be scheduled. 98 * @current_entity: the entity which is to be scheduled.
98 * 99 *
@@ -102,6 +103,7 @@ struct drm_sched_entity {
102 */ 103 */
103struct drm_sched_rq { 104struct drm_sched_rq {
104 spinlock_t lock; 105 spinlock_t lock;
106 struct drm_gpu_scheduler *sched;
105 struct list_head entities; 107 struct list_head entities;
106 struct drm_sched_entity *current_entity; 108 struct drm_sched_entity *current_entity;
107}; 109};
@@ -280,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
280 const char *name); 282 const char *name);
281void drm_sched_fini(struct drm_gpu_scheduler *sched); 283void drm_sched_fini(struct drm_gpu_scheduler *sched);
282 284
283int drm_sched_entity_init(struct drm_gpu_scheduler *sched, 285int drm_sched_entity_init(struct drm_sched_entity *entity,
284 struct drm_sched_entity *entity, 286 struct drm_sched_rq **rq_list,
285 struct drm_sched_rq *rq, 287 unsigned int num_rq_list,
286 atomic_t *guilty); 288 atomic_t *guilty);
287long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, 289long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
288 struct drm_sched_entity *entity, long timeout); 290 struct drm_sched_entity *entity, long timeout);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c67977aa1a0e..a01ba2032f0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -284,17 +284,29 @@ struct ttm_operation_ctx {
284#define TTM_OPT_FLAG_FORCE_ALLOC 0x2 284#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
285 285
286/** 286/**
287 * ttm_bo_get - reference a struct ttm_buffer_object
288 *
289 * @bo: The buffer object.
290 */
291static inline void ttm_bo_get(struct ttm_buffer_object *bo)
292{
293 kref_get(&bo->kref);
294}
295
296/**
287 * ttm_bo_reference - reference a struct ttm_buffer_object 297 * ttm_bo_reference - reference a struct ttm_buffer_object
288 * 298 *
289 * @bo: The buffer object. 299 * @bo: The buffer object.
290 * 300 *
291 * Returns a refcounted pointer to a buffer object. 301 * Returns a refcounted pointer to a buffer object.
302 *
303 * This function is deprecated. Use @ttm_bo_get instead.
292 */ 304 */
293 305
294static inline struct ttm_buffer_object * 306static inline struct ttm_buffer_object *
295ttm_bo_reference(struct ttm_buffer_object *bo) 307ttm_bo_reference(struct ttm_buffer_object *bo)
296{ 308{
297 kref_get(&bo->kref); 309 ttm_bo_get(bo);
298 return bo; 310 return bo;
299} 311}
300 312
@@ -346,11 +358,22 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
346 struct ttm_operation_ctx *ctx); 358 struct ttm_operation_ctx *ctx);
347 359
348/** 360/**
361 * ttm_bo_put
362 *
363 * @bo: The buffer object.
364 *
365 * Unreference a buffer object.
366 */
367void ttm_bo_put(struct ttm_buffer_object *bo);
368
369/**
349 * ttm_bo_unref 370 * ttm_bo_unref
350 * 371 *
351 * @bo: The buffer object. 372 * @bo: The buffer object.
352 * 373 *
353 * Unreference and clear a pointer to a buffer object. 374 * Unreference and clear a pointer to a buffer object.
375 *
376 * This function is deprecated. Use @ttm_bo_put instead.
354 */ 377 */
355void ttm_bo_unref(struct ttm_buffer_object **bo); 378void ttm_bo_unref(struct ttm_buffer_object **bo);
356 379
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 784b0fe470ee..1ceec56de015 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -516,6 +516,7 @@ struct drm_amdgpu_gem_va {
516#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 516#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
517#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 517#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
518#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 518#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
519#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
519 520
520struct drm_amdgpu_cs_chunk { 521struct drm_amdgpu_cs_chunk {
521 __u32 chunk_id; 522 __u32 chunk_id;