aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-03-20 21:46:05 -0400
committerDave Airlie <airlied@redhat.com>2018-03-20 21:46:05 -0400
commit287d2ac36b6f2830ea4ef66c110abc0f47a9a658 (patch)
tree04214f156461a95c2f7ca5a8821063cad7fc515e /drivers/gpu/drm/amd/display
parent963976cfe9c54d4d9e725e61c90c47a4af6b5ea2 (diff)
parent6da2b9332c572fcda94de9631f8fa514f574388a (diff)
Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Continued cleanup and restructuring of powerplay - Fetch VRAM type from vbios rather than hardcoding for SOC15 asics - Allow ttm to drop its backing store when drivers don't need it - DC bandwidth calc updates - Enable DC backlight control pre-DCE11 asics - Enable DC on all supported asics - DC Fixes for planes due to the way our hw is ordered vs what drm expects - DC CTM/regamma fixes - Misc cleanup and bug fixes * 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (89 commits) amdgpu/dm: Default PRE_VEGA ASIC support to 'y' drm/amd/pp: Remove the cgs wrapper for notify smu version on APU drm/amd/display: fix dereferencing possible ERR_PTR() drm/amd/display: Refine disable VGA drm/amdgpu: Improve documentation of bo_ptr in amdgpu_bo_create_kernel drm/radeon: Don't turn off DP sink when disconnected drm/amd/pp: Rename file name cz_* to smu8_* drm/amd/pp: Replace function/struct name cz_* with smu8_* drm/amd/pp: Remove unneeded void * casts in cz_hwmgr.c/cz_smumgr.c drm/amd/pp: Mv cz uvd/vce pg/dpm functions to cz_hwmgr.c drm/amd/pp: Remove dead header file pp_asicblocks.h drm/amd/pp: Delete dead code on cz_clockpowergating.c drm/amdgpu: Call amdgpu_ucode_fini_bo in amd_powerplay.c drm/amdgpu: Remove wrapper layer of smu ip functions drm/amdgpu: Don't compared ip_block_type with ip_block_index drm/amdgpu: Plus NULL function pointer check drm/amd/pp: Move helper functions to smu_help.c drm/amd/pp: Replace rv_* with smu10_* drm/amd/pp: Fix function parameter not correct drm/amd/pp: Add rv_copy_table_from/to_smc to smu backend function table ...
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c123
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c261
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_logger.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c155
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h65
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c334
44 files changed, 1360 insertions, 249 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index ec3285f65517..5b124a67404c 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -11,7 +11,7 @@ config DRM_AMD_DC
11 11
12config DRM_AMD_DC_PRE_VEGA 12config DRM_AMD_DC_PRE_VEGA
13 bool "DC support for Polaris and older ASICs" 13 bool "DC support for Polaris and older ASICs"
14 default n 14 default y
15 help 15 help
16 Choose this option to enable the new DC support for older asics 16 Choose this option to enable the new DC support for older asics
17 by default. This includes Polaris, Carrizo, Tonga, Bonaire, 17 by default. This includes Polaris, Carrizo, Tonga, Bonaire,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7e5c5c9eeb4f..ae512ecb65ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1131,7 +1131,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1131 1131
1132 if (adev->asic_type == CHIP_VEGA10 || 1132 if (adev->asic_type == CHIP_VEGA10 ||
1133 adev->asic_type == CHIP_RAVEN) 1133 adev->asic_type == CHIP_RAVEN)
1134 client_id = AMDGPU_IH_CLIENTID_DCE; 1134 client_id = SOC15_IH_CLIENTID_DCE;
1135 1135
1136 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 1136 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1137 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 1137 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -1231,7 +1231,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1231 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 1231 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1232 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 1232 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1233 i++) { 1233 i++) {
1234 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq); 1234 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1235 1235
1236 if (r) { 1236 if (r) {
1237 DRM_ERROR("Failed to add crtc irq id!\n"); 1237 DRM_ERROR("Failed to add crtc irq id!\n");
@@ -1255,7 +1255,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1255 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 1255 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1256 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; 1256 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1257 i++) { 1257 i++) {
1258 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 1258 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1259 if (r) { 1259 if (r) {
1260 DRM_ERROR("Failed to add page flip irq id!\n"); 1260 DRM_ERROR("Failed to add page flip irq id!\n");
1261 return r; 1261 return r;
@@ -1276,7 +1276,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1276 } 1276 }
1277 1277
1278 /* HPD */ 1278 /* HPD */
1279 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 1279 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1280 &adev->hpd_irq); 1280 &adev->hpd_irq);
1281 if (r) { 1281 if (r) {
1282 DRM_ERROR("Failed to add hpd irq id!\n"); 1282 DRM_ERROR("Failed to add hpd irq id!\n");
@@ -1365,6 +1365,43 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1365 1365
1366#endif 1366#endif
1367 1367
1368static int initialize_plane(struct amdgpu_display_manager *dm,
1369 struct amdgpu_mode_info *mode_info,
1370 int plane_id)
1371{
1372 struct amdgpu_plane *plane;
1373 unsigned long possible_crtcs;
1374 int ret = 0;
1375
1376 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1377 mode_info->planes[plane_id] = plane;
1378
1379 if (!plane) {
1380 DRM_ERROR("KMS: Failed to allocate plane\n");
1381 return -ENOMEM;
1382 }
1383 plane->base.type = mode_info->plane_type[plane_id];
1384
1385 /*
1386 * HACK: IGT tests expect that each plane can only have one
1387 * one possible CRTC. For now, set one CRTC for each
1388 * plane that is not an underlay, but still allow multiple
1389 * CRTCs for underlay planes.
1390 */
1391 possible_crtcs = 1 << plane_id;
1392 if (plane_id >= dm->dc->caps.max_streams)
1393 possible_crtcs = 0xff;
1394
1395 ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1396
1397 if (ret) {
1398 DRM_ERROR("KMS: Failed to initialize plane\n");
1399 return ret;
1400 }
1401
1402 return ret;
1403}
1404
1368/* In this architecture, the association 1405/* In this architecture, the association
1369 * connector -> encoder -> crtc 1406 * connector -> encoder -> crtc
1370 * id not really requried. The crtc and connector will hold the 1407 * id not really requried. The crtc and connector will hold the
@@ -1375,12 +1412,12 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1375static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 1412static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1376{ 1413{
1377 struct amdgpu_display_manager *dm = &adev->dm; 1414 struct amdgpu_display_manager *dm = &adev->dm;
1378 uint32_t i; 1415 int32_t i;
1379 struct amdgpu_dm_connector *aconnector = NULL; 1416 struct amdgpu_dm_connector *aconnector = NULL;
1380 struct amdgpu_encoder *aencoder = NULL; 1417 struct amdgpu_encoder *aencoder = NULL;
1381 struct amdgpu_mode_info *mode_info = &adev->mode_info; 1418 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1382 uint32_t link_cnt; 1419 uint32_t link_cnt;
1383 unsigned long possible_crtcs; 1420 int32_t total_overlay_planes, total_primary_planes;
1384 1421
1385 link_cnt = dm->dc->caps.max_links; 1422 link_cnt = dm->dc->caps.max_links;
1386 if (amdgpu_dm_mode_config_init(dm->adev)) { 1423 if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1388,30 +1425,22 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1388 return -1; 1425 return -1;
1389 } 1426 }
1390 1427
1391 for (i = 0; i < dm->dc->caps.max_planes; i++) { 1428 /* Identify the number of planes to be initialized */
1392 struct amdgpu_plane *plane; 1429 total_overlay_planes = dm->dc->caps.max_slave_planes;
1430 total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1393 1431
1394 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); 1432 /* First initialize overlay planes, index starting after primary planes */
1395 mode_info->planes[i] = plane; 1433 for (i = (total_overlay_planes - 1); i >= 0; i--) {
1396 1434 if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1397 if (!plane) { 1435 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1398 DRM_ERROR("KMS: Failed to allocate plane\n");
1399 goto fail; 1436 goto fail;
1400 } 1437 }
1401 plane->base.type = mode_info->plane_type[i]; 1438 }
1402
1403 /*
1404 * HACK: IGT tests expect that each plane can only have one
1405 * one possible CRTC. For now, set one CRTC for each
1406 * plane that is not an underlay, but still allow multiple
1407 * CRTCs for underlay planes.
1408 */
1409 possible_crtcs = 1 << i;
1410 if (i >= dm->dc->caps.max_streams)
1411 possible_crtcs = 0xff;
1412 1439
1413 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) { 1440 /* Initialize primary planes */
1414 DRM_ERROR("KMS: Failed to initialize plane\n"); 1441 for (i = (total_primary_planes - 1); i >= 0; i--) {
1442 if (initialize_plane(dm, mode_info, i)) {
1443 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1415 goto fail; 1444 goto fail;
1416 } 1445 }
1417 } 1446 }
@@ -1982,6 +2011,10 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
1982 * every time. 2011 * every time.
1983 */ 2012 */
1984 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state); 2013 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2014 if (ret) {
2015 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2016 dc_plane_state->in_transfer_func = NULL;
2017 }
1985 2018
1986 return ret; 2019 return ret;
1987} 2020}
@@ -4691,8 +4724,8 @@ static int dm_update_planes_state(struct dc *dc,
4691 int ret = 0; 4724 int ret = 0;
4692 4725
4693 4726
4694 /* Add new planes */ 4727 /* Add new planes, in reverse order as DC expectation */
4695 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4728 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
4696 new_plane_crtc = new_plane_state->crtc; 4729 new_plane_crtc = new_plane_state->crtc;
4697 old_plane_crtc = old_plane_state->crtc; 4730 old_plane_crtc = old_plane_state->crtc;
4698 dm_new_plane_state = to_dm_plane_state(new_plane_state); 4731 dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4737,6 +4770,7 @@ static int dm_update_planes_state(struct dc *dc,
4737 *lock_and_validation_needed = true; 4770 *lock_and_validation_needed = true;
4738 4771
4739 } else { /* Add new planes */ 4772 } else { /* Add new planes */
4773 struct dc_plane_state *dc_new_plane_state;
4740 4774
4741 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 4775 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4742 continue; 4776 continue;
@@ -4755,34 +4789,42 @@ static int dm_update_planes_state(struct dc *dc,
4755 4789
4756 WARN_ON(dm_new_plane_state->dc_state); 4790 WARN_ON(dm_new_plane_state->dc_state);
4757 4791
4758 dm_new_plane_state->dc_state = dc_create_plane_state(dc); 4792 dc_new_plane_state = dc_create_plane_state(dc);
4793 if (!dc_new_plane_state)
4794 return -ENOMEM;
4759 4795
4760 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", 4796 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4761 plane->base.id, new_plane_crtc->base.id); 4797 plane->base.id, new_plane_crtc->base.id);
4762 4798
4763 if (!dm_new_plane_state->dc_state) {
4764 ret = -EINVAL;
4765 return ret;
4766 }
4767
4768 ret = fill_plane_attributes( 4799 ret = fill_plane_attributes(
4769 new_plane_crtc->dev->dev_private, 4800 new_plane_crtc->dev->dev_private,
4770 dm_new_plane_state->dc_state, 4801 dc_new_plane_state,
4771 new_plane_state, 4802 new_plane_state,
4772 new_crtc_state); 4803 new_crtc_state);
4773 if (ret) 4804 if (ret) {
4805 dc_plane_state_release(dc_new_plane_state);
4774 return ret; 4806 return ret;
4807 }
4775 4808
4809 /*
4810 * Any atomic check errors that occur after this will
4811 * not need a release. The plane state will be attached
4812 * to the stream, and therefore part of the atomic
4813 * state. It'll be released when the atomic state is
4814 * cleaned.
4815 */
4776 if (!dc_add_plane_to_context( 4816 if (!dc_add_plane_to_context(
4777 dc, 4817 dc,
4778 dm_new_crtc_state->stream, 4818 dm_new_crtc_state->stream,
4779 dm_new_plane_state->dc_state, 4819 dc_new_plane_state,
4780 dm_state->context)) { 4820 dm_state->context)) {
4781 4821
4782 ret = -EINVAL; 4822 dc_plane_state_release(dc_new_plane_state);
4783 return ret; 4823 return -EINVAL;
4784 } 4824 }
4785 4825
4826 dm_new_plane_state->dc_state = dc_new_plane_state;
4827
4786 /* Tell DC to do a full surface update every time there 4828 /* Tell DC to do a full surface update every time there
4787 * is a plane change. Inefficient, but works for now. 4829 * is a plane change. Inefficient, but works for now.
4788 */ 4830 */
@@ -4812,6 +4854,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4812 return -EDEADLK; 4854 return -EDEADLK;
4813 4855
4814 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); 4856 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4857 if (IS_ERR(crtc_state))
4858 return PTR_ERR(crtc_state);
4859
4815 if (crtc->primary == plane && crtc_state->active) { 4860 if (crtc->primary == plane && crtc_state->active) {
4816 if (!plane_state->fb) 4861 if (!plane_state->fb)
4817 return -EINVAL; 4862 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index e845c511656e..f6cb502c303f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -193,6 +193,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
193 struct drm_property_blob *blob = crtc->base.ctm; 193 struct drm_property_blob *blob = crtc->base.ctm;
194 struct dc_stream_state *stream = crtc->stream; 194 struct dc_stream_state *stream = crtc->stream;
195 struct drm_color_ctm *ctm; 195 struct drm_color_ctm *ctm;
196 int64_t val;
196 int i; 197 int i;
197 198
198 if (!blob) { 199 if (!blob) {
@@ -206,7 +207,9 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
206 * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating 207 * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating
207 * with homogeneous coordinates, augment the matrix with 0's. 208 * with homogeneous coordinates, augment the matrix with 0's.
208 * 209 *
209 * The format provided is S31.32, which is the same as our fixed31_32. 210 * The format provided is S31.32, using signed-magnitude representation.
211 * Our fixed31_32 is also S31.32, but is using 2's complement. We have
212 * to convert from signed-magnitude to 2's complement.
210 */ 213 */
211 for (i = 0; i < 12; i++) { 214 for (i = 0; i < 12; i++) {
212 /* Skip 4th element */ 215 /* Skip 4th element */
@@ -214,8 +217,14 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
214 stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero; 217 stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
215 continue; 218 continue;
216 } 219 }
217 /* csc[i] = ctm[i - floor(i/4)] */ 220
218 stream->gamut_remap_matrix.matrix[i].value = ctm->matrix[i - (i/4)]; 221 /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
222 val = ctm->matrix[i - (i/4)];
223 /* If negative, convert to 2's complement. */
224 if (val & (1ULL << 63))
225 val = -(val & ~(1ULL << 63));
226
227 stream->gamut_remap_matrix.matrix[i].value = val;
219 } 228 }
220} 229}
221 230
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 39cfe0fbf1b9..8291d74f26bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -85,6 +85,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
85 enum ddc_result res; 85 enum ddc_result res;
86 ssize_t read_bytes; 86 ssize_t read_bytes;
87 87
88 if (WARN_ON(msg->size > 16))
89 return -E2BIG;
90
88 switch (msg->request & ~DP_AUX_I2C_MOT) { 91 switch (msg->request & ~DP_AUX_I2C_MOT) {
89 case DP_AUX_NATIVE_READ: 92 case DP_AUX_NATIVE_READ:
90 read_bytes = dal_ddc_service_read_dpcd_data( 93 read_bytes = dal_ddc_service_read_dpcd_data(
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index 180a9d69d351..31bee054f43a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -60,7 +60,8 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
60 {LOG_EVENT_LINK_LOSS, "LinkLoss"}, 60 {LOG_EVENT_LINK_LOSS, "LinkLoss"},
61 {LOG_EVENT_UNDERFLOW, "Underflow"}, 61 {LOG_EVENT_UNDERFLOW, "Underflow"},
62 {LOG_IF_TRACE, "InterfaceTrace"}, 62 {LOG_IF_TRACE, "InterfaceTrace"},
63 {LOG_DTN, "DTN"} 63 {LOG_DTN, "DTN"},
64 {LOG_PROFILING, "Profiling"}
64}; 65};
65 66
66 67
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 1689c670ca6f..e7680c41f117 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -44,7 +44,7 @@
44 44
45#include "bios_parser_common.h" 45#include "bios_parser_common.h"
46#define LAST_RECORD_TYPE 0xff 46#define LAST_RECORD_TYPE 0xff
47 47#define SMU9_SYSPLL0_ID 0
48 48
49struct i2c_id_config_access { 49struct i2c_id_config_access {
50 uint8_t bfI2C_LineMux:4; 50 uint8_t bfI2C_LineMux:4;
@@ -1220,7 +1220,7 @@ static unsigned int bios_parser_get_smu_clock_info(
1220 if (!bp->cmd_tbl.get_smu_clock_info) 1220 if (!bp->cmd_tbl.get_smu_clock_info)
1221 return BP_RESULT_FAILURE; 1221 return BP_RESULT_FAILURE;
1222 1222
1223 return bp->cmd_tbl.get_smu_clock_info(bp); 1223 return bp->cmd_tbl.get_smu_clock_info(bp, 0);
1224} 1224}
1225 1225
1226static enum bp_result bios_parser_program_crtc_timing( 1226static enum bp_result bios_parser_program_crtc_timing(
@@ -1376,7 +1376,7 @@ static enum bp_result get_firmware_info_v3_1(
1376 if (bp->cmd_tbl.get_smu_clock_info != NULL) { 1376 if (bp->cmd_tbl.get_smu_clock_info != NULL) {
1377 /* VBIOS gives in 10KHz */ 1377 /* VBIOS gives in 10KHz */
1378 info->smu_gpu_pll_output_freq = 1378 info->smu_gpu_pll_output_freq =
1379 bp->cmd_tbl.get_smu_clock_info(bp) * 10; 1379 bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
1380 } 1380 }
1381 1381
1382 return BP_RESULT_OK; 1382 return BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index e362658aa3ce..3f63f712c8a4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -796,7 +796,7 @@ static enum bp_result set_dce_clock_v2_1(
796 ****************************************************************************** 796 ******************************************************************************
797 *****************************************************************************/ 797 *****************************************************************************/
798 798
799static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp); 799static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id);
800 800
801static void init_get_smu_clock_info(struct bios_parser *bp) 801static void init_get_smu_clock_info(struct bios_parser *bp)
802{ 802{
@@ -805,12 +805,13 @@ static void init_get_smu_clock_info(struct bios_parser *bp)
805 805
806} 806}
807 807
808static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp) 808static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
809{ 809{
810 struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0}; 810 struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
811 struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output; 811 struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
812 812
813 smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ; 813 smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
814 smu_input.syspll_id = id;
814 815
815 /* Get Specific Clock */ 816 /* Get Specific Clock */
816 if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) { 817 if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index 59061b806df5..ec1c0c9f3f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,7 @@ struct cmd_tbl {
96 struct bios_parser *bp, 96 struct bios_parser *bp,
97 struct bp_set_dce_clock_parameters *bp_params); 97 struct bp_set_dce_clock_parameters *bp_params);
98 unsigned int (*get_smu_clock_info)( 98 unsigned int (*get_smu_clock_info)(
99 struct bios_parser *bp); 99 struct bios_parser *bp, uint8_t id);
100 100
101}; 101};
102 102
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 6d38b8f43198..0cbab81ab304 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -85,7 +85,6 @@ static void calculate_bandwidth(
85 const uint32_t s_mid5 = 5; 85 const uint32_t s_mid5 = 5;
86 const uint32_t s_mid6 = 6; 86 const uint32_t s_mid6 = 6;
87 const uint32_t s_high = 7; 87 const uint32_t s_high = 7;
88 const uint32_t bus_efficiency = 1;
89 const uint32_t dmif_chunk_buff_margin = 1; 88 const uint32_t dmif_chunk_buff_margin = 1;
90 89
91 uint32_t max_chunks_fbc_mode; 90 uint32_t max_chunks_fbc_mode;
@@ -592,7 +591,12 @@ static void calculate_bandwidth(
592 /* 1 = use channel 0 and 1*/ 591 /* 1 = use channel 0 and 1*/
593 /* 2 = use channel 0,1,2,3*/ 592 /* 2 = use channel 0,1,2,3*/
594 if ((fbc_enabled == 1 && lpt_enabled == 1)) { 593 if ((fbc_enabled == 1 && lpt_enabled == 1)) {
595 data->dram_efficiency = bw_int_to_fixed(1); 594 if (vbios->memory_type == bw_def_hbm)
595 data->dram_efficiency = bw_frc_to_fixed(5, 10);
596 else
597 data->dram_efficiency = bw_int_to_fixed(1);
598
599
596 if (dceip->low_power_tiling_mode == 0) { 600 if (dceip->low_power_tiling_mode == 0) {
597 data->number_of_dram_channels = 1; 601 data->number_of_dram_channels = 1;
598 } 602 }
@@ -607,7 +611,10 @@ static void calculate_bandwidth(
607 } 611 }
608 } 612 }
609 else { 613 else {
610 data->dram_efficiency = bw_frc_to_fixed(8, 10); 614 if (vbios->memory_type == bw_def_hbm)
615 data->dram_efficiency = bw_frc_to_fixed(5, 10);
616 else
617 data->dram_efficiency = bw_frc_to_fixed(8, 10);
611 } 618 }
612 /*memory request size and latency hiding:*/ 619 /*memory request size and latency hiding:*/
613 /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/ 620 /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
@@ -1171,9 +1178,9 @@ static void calculate_bandwidth(
1171 } 1178 }
1172 for (i = 0; i <= 2; i++) { 1179 for (i = 0; i <= 2; i++) {
1173 for (j = 0; j <= 7; j++) { 1180 for (j = 0; j <= 7; j++) {
1174 data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency))))); 1181 data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))));
1175 if (data->d1_display_write_back_dwb_enable == 1) { 1182 if (data->d1_display_write_back_dwb_enable == 1) {
1176 data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency))))); 1183 data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width))));
1177 } 1184 }
1178 } 1185 }
1179 } 1186 }
@@ -1258,6 +1265,16 @@ static void calculate_bandwidth(
1258 /* / (dispclk - display bw)*/ 1265 /* / (dispclk - display bw)*/
1259 /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/ 1266 /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
1260 /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/ 1267 /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
1268
1269 /*initialize variables*/
1270 number_of_displays_enabled = 0;
1271 number_of_displays_enabled_with_margin = 0;
1272 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1273 if (data->enable[k]) {
1274 number_of_displays_enabled = number_of_displays_enabled + 1;
1275 }
1276 data->display_pstate_change_enable[k] = 0;
1277 }
1261 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1278 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1262 if (data->enable[i]) { 1279 if (data->enable[i]) {
1263 if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) { 1280 if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
@@ -1276,7 +1293,10 @@ static void calculate_bandwidth(
1276 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1293 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1277 if (data->enable[i]) { 1294 if (data->enable[i]) {
1278 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) { 1295 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
1279 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); 1296 if (number_of_displays_enabled > 2)
1297 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1298 else
1299 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1280 } 1300 }
1281 else { 1301 else {
1282 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); 1302 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
@@ -1338,24 +1358,15 @@ static void calculate_bandwidth(
1338 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1358 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1339 if (data->enable[i]) { 1359 if (data->enable[i]) {
1340 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) { 1360 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) {
1341 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency)); 1361 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
1342 } 1362 }
1343 else { 1363 else {
1344 /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/ 1364 /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
1345 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency)); 1365 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
1346 } 1366 }
1347 data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]); 1367 data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
1348 } 1368 }
1349 } 1369 }
1350 /*initialize variables*/
1351 number_of_displays_enabled = 0;
1352 number_of_displays_enabled_with_margin = 0;
1353 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1354 if (data->enable[k]) {
1355 number_of_displays_enabled = number_of_displays_enabled + 1;
1356 }
1357 data->display_pstate_change_enable[k] = 0;
1358 }
1359 for (i = 0; i <= 2; i++) { 1370 for (i = 0; i <= 2; i++) {
1360 for (j = 0; j <= 7; j++) { 1371 for (j = 0; j <= 7; j++) {
1361 data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999); 1372 data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
@@ -1370,10 +1381,11 @@ static void calculate_bandwidth(
1370 /*determine the minimum dram clock change margin for each set of clock frequencies*/ 1381 /*determine the minimum dram clock change margin for each set of clock frequencies*/
1371 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1382 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1372 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1383 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1373 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); 1384 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
1374 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) { 1385 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
1375 data->display_pstate_change_enable[k] = 1; 1386 data->display_pstate_change_enable[k] = 1;
1376 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; 1387 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1388 data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
1377 } 1389 }
1378 } 1390 }
1379 } 1391 }
@@ -1383,10 +1395,11 @@ static void calculate_bandwidth(
1383 /*determine the minimum dram clock change margin for each display pipe*/ 1395 /*determine the minimum dram clock change margin for each display pipe*/
1384 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1396 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1385 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1397 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1386 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); 1398 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
1387 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) { 1399 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
1388 data->display_pstate_change_enable[k] = 1; 1400 data->display_pstate_change_enable[k] = 1;
1389 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; 1401 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1402 data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
1390 } 1403 }
1391 } 1404 }
1392 } 1405 }
@@ -1420,7 +1433,7 @@ static void calculate_bandwidth(
1420 data->displays_with_same_mode[i] = bw_int_to_fixed(0); 1433 data->displays_with_same_mode[i] = bw_int_to_fixed(0);
1421 if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) { 1434 if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
1422 for (j = 0; j <= maximum_number_of_surfaces - 1; j++) { 1435 for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
1423 if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) { 1436 if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
1424 data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1)); 1437 data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
1425 } 1438 }
1426 } 1439 }
@@ -1435,7 +1448,7 @@ static void calculate_bandwidth(
1435 /*aligned displays with the same timing.*/ 1448 /*aligned displays with the same timing.*/
1436 /*the display(s) with the negative margin can be switched in the v_blank region while the other*/ 1449 /*the display(s) with the negative margin can be switched in the v_blank region while the other*/
1437 /*displays are in v_blank or v_active.*/ 1450 /*displays are in v_blank or v_active.*/
1438 if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) { 1451 if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) {
1439 data->nbp_state_change_enable = bw_def_yes; 1452 data->nbp_state_change_enable = bw_def_yes;
1440 } 1453 }
1441 else { 1454 else {
@@ -1448,6 +1461,25 @@ static void calculate_bandwidth(
1448 else { 1461 else {
1449 nbp_state_change_enable_blank = bw_def_no; 1462 nbp_state_change_enable_blank = bw_def_no;
1450 } 1463 }
1464
1465 /*average bandwidth*/
1466 /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
1467 /*the average bandwidth with compression is the same, divided by the compression ratio*/
1468 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1469 if (data->enable[i]) {
1470 data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1471 data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
1472 }
1473 }
1474 data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
1475 data->total_average_bandwidth = bw_int_to_fixed(0);
1476 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1477 if (data->enable[i]) {
1478 data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
1479 data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
1480 }
1481 }
1482
1451 /*required yclk(pclk)*/ 1483 /*required yclk(pclk)*/
1452 /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/ 1484 /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
1453 /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/ 1485 /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
@@ -1497,17 +1529,20 @@ static void calculate_bandwidth(
1497 } 1529 }
1498 else { 1530 else {
1499 data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000)); 1531 data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
1500 if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) { 1532 if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1533 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
1501 yclk_message = bw_fixed_to_int(vbios->low_yclk); 1534 yclk_message = bw_fixed_to_int(vbios->low_yclk);
1502 data->y_clk_level = low; 1535 data->y_clk_level = low;
1503 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1536 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1504 } 1537 }
1505 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) { 1538 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1539 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
1506 yclk_message = bw_fixed_to_int(vbios->mid_yclk); 1540 yclk_message = bw_fixed_to_int(vbios->mid_yclk);
1507 data->y_clk_level = mid; 1541 data->y_clk_level = mid;
1508 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1542 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1509 } 1543 }
1510 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) { 1544 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1545 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
1511 yclk_message = bw_fixed_to_int(vbios->high_yclk); 1546 yclk_message = bw_fixed_to_int(vbios->high_yclk);
1512 data->y_clk_level = high; 1547 data->y_clk_level = high;
1513 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1548 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
@@ -1523,8 +1558,8 @@ static void calculate_bandwidth(
1523 /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/ 1558 /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
1524 /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/ 1559 /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
1525 /*for dmif, pte and cursor requests have to be included.*/ 1560 /*for dmif, pte and cursor requests have to be included.*/
1526 data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1561 data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
1527 data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1562 data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width);
1528 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { 1563 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
1529 data->required_sclk = bw_int_to_fixed(9999); 1564 data->required_sclk = bw_int_to_fixed(9999);
1530 sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; 1565 sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
@@ -1537,42 +1572,56 @@ static void calculate_bandwidth(
1537 } 1572 }
1538 else { 1573 else {
1539 data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk); 1574 data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
1540 if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) { 1575 if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width))
1576 && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
1541 sclk_message = bw_def_low; 1577 sclk_message = bw_def_low;
1542 data->sclk_level = s_low; 1578 data->sclk_level = s_low;
1543 data->required_sclk = vbios->low_sclk; 1579 data->required_sclk = vbios->low_sclk;
1544 } 1580 }
1545 else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) { 1581 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width))
1582 && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
1546 sclk_message = bw_def_mid; 1583 sclk_message = bw_def_mid;
1547 data->sclk_level = s_mid1; 1584 data->sclk_level = s_mid1;
1548 data->required_sclk = vbios->mid1_sclk; 1585 data->required_sclk = vbios->mid1_sclk;
1549 } 1586 }
1550 else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) { 1587 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width))
1588 && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
1551 sclk_message = bw_def_mid; 1589 sclk_message = bw_def_mid;
1552 data->sclk_level = s_mid2; 1590 data->sclk_level = s_mid2;
1553 data->required_sclk = vbios->mid2_sclk; 1591 data->required_sclk = vbios->mid2_sclk;
1554 } 1592 }
1555 else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) { 1593 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width))
1594 && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
1556 sclk_message = bw_def_mid; 1595 sclk_message = bw_def_mid;
1557 data->sclk_level = s_mid3; 1596 data->sclk_level = s_mid3;
1558 data->required_sclk = vbios->mid3_sclk; 1597 data->required_sclk = vbios->mid3_sclk;
1559 } 1598 }
1560 else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) { 1599 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width))
1600 && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
1561 sclk_message = bw_def_mid; 1601 sclk_message = bw_def_mid;
1562 data->sclk_level = s_mid4; 1602 data->sclk_level = s_mid4;
1563 data->required_sclk = vbios->mid4_sclk; 1603 data->required_sclk = vbios->mid4_sclk;
1564 } 1604 }
1565 else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) { 1605 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width))
1606 && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
1566 sclk_message = bw_def_mid; 1607 sclk_message = bw_def_mid;
1567 data->sclk_level = s_mid5; 1608 data->sclk_level = s_mid5;
1568 data->required_sclk = vbios->mid5_sclk; 1609 data->required_sclk = vbios->mid5_sclk;
1569 } 1610 }
1570 else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) { 1611 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width))
1612 && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
1571 sclk_message = bw_def_mid; 1613 sclk_message = bw_def_mid;
1572 data->sclk_level = s_mid6; 1614 data->sclk_level = s_mid6;
1573 data->required_sclk = vbios->mid6_sclk; 1615 data->required_sclk = vbios->mid6_sclk;
1574 } 1616 }
1575 else if (bw_ltn(data->required_sclk, sclk[s_high])) { 1617 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
1618 && bw_ltn(data->required_sclk, sclk[s_high])) {
1619 sclk_message = bw_def_high;
1620 data->sclk_level = s_high;
1621 data->required_sclk = vbios->high_sclk;
1622 }
1623 else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
1624 && bw_ltn(data->required_sclk, sclk[s_high])) {
1576 sclk_message = bw_def_high; 1625 sclk_message = bw_def_high;
1577 data->sclk_level = s_high; 1626 data->sclk_level = s_high;
1578 data->required_sclk = vbios->high_sclk; 1627 data->required_sclk = vbios->high_sclk;
@@ -1681,7 +1730,7 @@ static void calculate_bandwidth(
1681 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); 1730 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1682 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); 1731 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1683 } 1732 }
1684 if (data->nbp_state_change_enable == bw_def_yes) { 1733 if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) {
1685 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); 1734 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1686 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); 1735 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1687 } 1736 }
@@ -1861,23 +1910,6 @@ static void calculate_bandwidth(
1861 else { 1910 else {
1862 data->mcifwrdram_access_efficiency = bw_int_to_fixed(0); 1911 data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
1863 } 1912 }
1864 /*average bandwidth*/
1865 /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
1866 /*the average bandwidth with compression is the same, divided by the compression ratio*/
1867 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1868 if (data->enable[i]) {
1869 data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1870 data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
1871 }
1872 }
1873 data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
1874 data->total_average_bandwidth = bw_int_to_fixed(0);
1875 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1876 if (data->enable[i]) {
1877 data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
1878 data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
1879 }
1880 }
1881 /*stutter efficiency*/ 1913 /*stutter efficiency*/
1882 /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/ 1914 /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
1883 /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/ 1915 /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
@@ -1905,7 +1937,7 @@ static void calculate_bandwidth(
1905 data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size))); 1937 data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
1906 } 1938 }
1907 } 1939 }
1908 data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32)))); 1940 data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width));
1909 data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size; 1941 data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
1910 data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time); 1942 data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
1911 data->time_in_self_refresh = data->min_stutter_refresh_duration; 1943 data->time_in_self_refresh = data->min_stutter_refresh_duration;
@@ -1957,7 +1989,7 @@ static void calculate_bandwidth(
1957 for (i = 1; i <= 5; i++) { 1989 for (i = 1; i <= 5; i++) {
1958 data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i))); 1990 data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
1959 if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) { 1991 if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
1960 data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1992 data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
1961 } 1993 }
1962 else { 1994 else {
1963 data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na); 1995 data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
@@ -2036,6 +2068,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2036 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2068 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2037 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2069 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2038 2070
2071 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2072 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2073 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2039 dceip.large_cursor = false; 2074 dceip.large_cursor = false;
2040 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2075 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2041 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2076 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2146,6 +2181,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2146 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2181 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2147 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2182 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2148 2183
2184 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2185 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2186 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2149 dceip.large_cursor = false; 2187 dceip.large_cursor = false;
2150 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2188 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2151 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2189 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2259,6 +2297,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2259 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2297 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2260 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2298 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2261 2299
2300 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2301 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2302 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2262 dceip.large_cursor = false; 2303 dceip.large_cursor = false;
2263 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2304 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2264 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2305 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2369,6 +2410,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2369 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2410 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2370 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2411 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2371 2412
2413 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2414 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2415 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2372 dceip.large_cursor = false; 2416 dceip.large_cursor = false;
2373 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2417 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2374 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2418 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2479,6 +2523,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2479 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2523 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2480 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2524 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2481 2525
2526 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2527 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2528 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2482 dceip.large_cursor = false; 2529 dceip.large_cursor = false;
2483 dceip.dmif_request_buffer_size = bw_int_to_fixed(2304); 2530 dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
2484 dceip.dmif_pipe_en_fbc_chunk_tracker = true; 2531 dceip.dmif_pipe_en_fbc_chunk_tracker = true;
@@ -2597,6 +2644,7 @@ static void populate_initial_data(
2597 data->graphics_tiling_mode = bw_def_tiled; 2644 data->graphics_tiling_mode = bw_def_tiled;
2598 data->underlay_micro_tile_mode = bw_def_display_micro_tiling; 2645 data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
2599 data->graphics_micro_tile_mode = bw_def_display_micro_tiling; 2646 data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
2647 data->increase_voltage_to_support_mclk_switch = true;
2600 2648
2601 /* Pipes with underlay first */ 2649 /* Pipes with underlay first */
2602 for (i = 0; i < pipe_count; i++) { 2650 for (i = 0; i < pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 8020bc7742c1..4bb43a371292 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -983,8 +983,6 @@ bool dcn_validate_bandwidth(
983 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); 983 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
984 } 984 }
985 985
986 context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
987 context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
988 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); 986 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
989 context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000); 987 context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
990 988
@@ -998,7 +996,26 @@ bool dcn_validate_bandwidth(
998 dc->debug.min_disp_clk_khz; 996 dc->debug.min_disp_clk_khz;
999 } 997 }
1000 998
1001 context->bw.dcn.calc_clk.max_dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio; 999 context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
1000
1001 switch (v->voltage_level) {
1002 case 0:
1003 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1004 (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
1005 break;
1006 case 1:
1007 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1008 (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
1009 break;
1010 case 2:
1011 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1012 (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
1013 break;
1014 default:
1015 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1016 (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
1017 break;
1018 }
1002 1019
1003 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { 1020 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
1004 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1021 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8394d69b963f..63a3d468939a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -42,6 +42,7 @@
42#include "dmcu.h" 42#include "dmcu.h"
43#include "dpp.h" 43#include "dpp.h"
44#include "timing_generator.h" 44#include "timing_generator.h"
45#include "abm.h"
45#include "virtual/virtual_link_encoder.h" 46#include "virtual/virtual_link_encoder.h"
46 47
47#include "link_hwss.h" 48#include "link_hwss.h"
@@ -802,6 +803,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
802 if (!dcb->funcs->is_accelerated_mode(dcb)) 803 if (!dcb->funcs->is_accelerated_mode(dcb))
803 dc->hwss.enable_accelerated_mode(dc, context); 804 dc->hwss.enable_accelerated_mode(dc, context);
804 805
806 dc->hwss.set_bandwidth(dc, context, false);
807
805 /* re-program planes for existing stream, in case we need to 808 /* re-program planes for existing stream, in case we need to
806 * free up plane resource for later use 809 * free up plane resource for later use
807 */ 810 */
@@ -870,6 +873,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
870 873
871 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 874 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
872 875
876 /* pplib is notified if disp_num changed */
877 dc->hwss.set_bandwidth(dc, context, true);
878
873 dc_release_state(dc->current_state); 879 dc_release_state(dc->current_state);
874 880
875 dc->current_state = context; 881 dc->current_state = context;
@@ -1104,9 +1110,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1104 if (u->plane_info->input_tf != u->surface->input_tf) 1110 if (u->plane_info->input_tf != u->surface->input_tf)
1105 update_flags->bits.input_tf_change = 1; 1111 update_flags->bits.input_tf_change = 1;
1106 1112
1107 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level)
1108 update_flags->bits.output_tf_change = 1;
1109
1110 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) 1113 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1111 update_flags->bits.horizontal_mirror_change = 1; 1114 update_flags->bits.horizontal_mirror_change = 1;
1112 1115
@@ -1361,6 +1364,17 @@ static void commit_planes_for_stream(struct dc *dc,
1361 1364
1362 dc->hwss.apply_ctx_for_surface( 1365 dc->hwss.apply_ctx_for_surface(
1363 dc, pipe_ctx->stream, stream_status->plane_count, context); 1366 dc, pipe_ctx->stream, stream_status->plane_count, context);
1367
1368 if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
1369 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1370 // if otg funcs defined check if blanked before programming
1371 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1372 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1373 pipe_ctx->stream_res.abm, stream->abm_level);
1374 } else
1375 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1376 pipe_ctx->stream_res.abm, stream->abm_level);
1377 }
1364 } 1378 }
1365 } 1379 }
1366 1380
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index c15565092ca8..5a552cb3f8a7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,26 +36,22 @@
36#include "hw_sequencer.h" 36#include "hw_sequencer.h"
37 37
38#include "resource.h" 38#include "resource.h"
39#define DC_LOGGER \
40 logger
39 41
40#define SURFACE_TRACE(...) do {\ 42#define SURFACE_TRACE(...) do {\
41 if (dc->debug.surface_trace) \ 43 if (dc->debug.surface_trace) \
42 dm_logger_write(logger, \ 44 DC_LOG_IF_TRACE(__VA_ARGS__); \
43 LOG_IF_TRACE, \
44 ##__VA_ARGS__); \
45} while (0) 45} while (0)
46 46
47#define TIMING_TRACE(...) do {\ 47#define TIMING_TRACE(...) do {\
48 if (dc->debug.timing_trace) \ 48 if (dc->debug.timing_trace) \
49 dm_logger_write(logger, \ 49 DC_LOG_SYNC(__VA_ARGS__); \
50 LOG_SYNC, \
51 ##__VA_ARGS__); \
52} while (0) 50} while (0)
53 51
54#define CLOCK_TRACE(...) do {\ 52#define CLOCK_TRACE(...) do {\
55 if (dc->debug.clock_trace) \ 53 if (dc->debug.clock_trace) \
56 dm_logger_write(logger, \ 54 DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
57 LOG_BANDWIDTH_CALCS, \
58 ##__VA_ARGS__); \
59} while (0) 55} while (0)
60 56
61void pre_surface_trace( 57void pre_surface_trace(
@@ -362,25 +358,19 @@ void context_clock_trace(
362 struct dal_logger *logger = core_dc->ctx->logger; 358 struct dal_logger *logger = core_dc->ctx->logger;
363 359
364 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 360 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
365 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n" 361 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
366 "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
367 context->bw.dcn.calc_clk.dispclk_khz, 362 context->bw.dcn.calc_clk.dispclk_khz,
368 context->bw.dcn.calc_clk.max_dppclk_khz, 363 context->bw.dcn.calc_clk.dppclk_khz,
369 context->bw.dcn.calc_clk.dcfclk_khz, 364 context->bw.dcn.calc_clk.dcfclk_khz,
370 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 365 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
371 context->bw.dcn.calc_clk.fclk_khz, 366 context->bw.dcn.calc_clk.fclk_khz,
372 context->bw.dcn.calc_clk.socclk_khz, 367 context->bw.dcn.calc_clk.socclk_khz);
373 context->bw.dcn.calc_clk.dram_ccm_us,
374 context->bw.dcn.calc_clk.min_active_dram_ccm_us);
375 CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 368 CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
376 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n" 369 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
377 "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
378 context->bw.dcn.calc_clk.dispclk_khz, 370 context->bw.dcn.calc_clk.dispclk_khz,
379 context->bw.dcn.calc_clk.max_dppclk_khz, 371 context->bw.dcn.calc_clk.dppclk_khz,
380 context->bw.dcn.calc_clk.dcfclk_khz, 372 context->bw.dcn.calc_clk.dcfclk_khz,
381 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 373 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
382 context->bw.dcn.calc_clk.fclk_khz, 374 context->bw.dcn.calc_clk.fclk_khz);
383 context->bw.dcn.calc_clk.dram_ccm_us,
384 context->bw.dcn.calc_clk.min_active_dram_ccm_us);
385#endif 375#endif
386} 376}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f8c09273e0f1..eeb04471b2f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1960,6 +1960,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1960 (abm->funcs->set_backlight_level == NULL)) 1960 (abm->funcs->set_backlight_level == NULL))
1961 return false; 1961 return false;
1962 1962
1963 if (stream) {
1964 if (stream->bl_pwm_level == 0)
1965 frame_ramp = 0;
1966
1967 ((struct dc_stream_state *)stream)->bl_pwm_level = level;
1968 }
1969
1963 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 1970 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
1964 1971
1965 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level); 1972 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b9fc6d842931..ba3487e97361 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1124,6 +1124,7 @@ bool dc_add_plane_to_context(
1124 ASSERT(tail_pipe); 1124 ASSERT(tail_pipe);
1125 1125
1126 free_pipe->stream_res.tg = tail_pipe->stream_res.tg; 1126 free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
1127 free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
1127 free_pipe->stream_res.opp = tail_pipe->stream_res.opp; 1128 free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
1128 free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; 1129 free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
1129 free_pipe->stream_res.audio = tail_pipe->stream_res.audio; 1130 free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
@@ -1736,6 +1737,10 @@ enum dc_status resource_map_pool_resources(
1736 pipe_ctx->stream_res.audio, true); 1737 pipe_ctx->stream_res.audio, true);
1737 } 1738 }
1738 1739
1740 /* Add ABM to the resource if on EDP */
1741 if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
1742 pipe_ctx->stream_res.abm = pool->abm;
1743
1739 for (i = 0; i < context->stream_count; i++) 1744 for (i = 0; i < context->stream_count; i++)
1740 if (context->streams[i] == stream) { 1745 if (context->streams[i] == stream) {
1741 context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst; 1746 context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index cd5819789d76..ce0747ed0f00 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -198,8 +198,7 @@ bool dc_stream_set_cursor_attributes(
198 for (i = 0; i < MAX_PIPES; i++) { 198 for (i = 0; i < MAX_PIPES; i++) {
199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
200 200
201 if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && 201 if (pipe_ctx->stream != stream)
202 !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
203 continue; 202 continue;
204 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) 203 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
205 continue; 204 continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2cd97342bf0f..fa4b3c8b3bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.37" 41#define DC_VER "3.1.38"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
@@ -186,13 +186,12 @@ enum wm_report_mode {
186 186
187struct dc_clocks { 187struct dc_clocks {
188 int dispclk_khz; 188 int dispclk_khz;
189 int max_dppclk_khz; 189 int max_supported_dppclk_khz;
190 int dppclk_khz;
190 int dcfclk_khz; 191 int dcfclk_khz;
191 int socclk_khz; 192 int socclk_khz;
192 int dcfclk_deep_sleep_khz; 193 int dcfclk_deep_sleep_khz;
193 int fclk_khz; 194 int fclk_khz;
194 int dram_ccm_us;
195 int min_active_dram_ccm_us;
196}; 195};
197 196
198struct dc_debug { 197struct dc_debug {
@@ -447,6 +446,7 @@ union surface_update_flags {
447 446
448struct dc_plane_state { 447struct dc_plane_state {
449 struct dc_plane_address address; 448 struct dc_plane_address address;
449 struct dc_plane_flip_time time;
450 struct scaling_taps scaling_quality; 450 struct scaling_taps scaling_quality;
451 struct rect src_rect; 451 struct rect src_rect;
452 struct rect dst_rect; 452 struct rect dst_rect;
@@ -557,6 +557,7 @@ struct dc_transfer_func *dc_create_transfer_func(void);
557 */ 557 */
558struct dc_flip_addrs { 558struct dc_flip_addrs {
559 struct dc_plane_address address; 559 struct dc_plane_address address;
560 unsigned int flip_timestamp_in_us;
560 bool flip_immediate; 561 bool flip_immediate;
561 /* TODO: add flip duration for FreeSync */ 562 /* TODO: add flip duration for FreeSync */
562}; 563};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index e91ac6811990..b83a7dc2f5a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -692,8 +692,18 @@ struct crtc_trigger_info {
692 enum trigger_delay delay; 692 enum trigger_delay delay;
693}; 693};
694 694
695struct dc_crtc_timing { 695enum vrr_state {
696 VRR_STATE_OFF = 0,
697 VRR_STATE_VARIABLE,
698 VRR_STATE_FIXED,
699};
696 700
701struct dc_crtc_timing_adjust {
702 uint32_t v_total_min;
703 uint32_t v_total_max;
704};
705
706struct dc_crtc_timing {
697 uint32_t h_total; 707 uint32_t h_total;
698 uint32_t h_border_left; 708 uint32_t h_border_left;
699 uint32_t h_addressable; 709 uint32_t h_addressable;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index f44cd4d87b79..d017df56b2ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -48,6 +48,8 @@ struct dc_stream_status {
48struct dc_stream_state { 48struct dc_stream_state {
49 struct dc_sink *sink; 49 struct dc_sink *sink;
50 struct dc_crtc_timing timing; 50 struct dc_crtc_timing timing;
51 struct dc_crtc_timing_adjust timing_adjust;
52 struct vrr_params vrr_params;
51 53
52 struct rect src; /* composition area */ 54 struct rect src; /* composition area */
53 struct rect dst; /* stream addressable area */ 55 struct rect dst; /* stream addressable area */
@@ -74,6 +76,10 @@ struct dc_stream_state {
74 unsigned char psr_version; 76 unsigned char psr_version;
75 /* TODO: CEA VIC */ 77 /* TODO: CEA VIC */
76 78
79 /* DMCU info */
80 unsigned int abm_level;
81 unsigned int bl_pwm_level;
82
77 /* from core_stream struct */ 83 /* from core_stream struct */
78 struct dc_context *ctx; 84 struct dc_context *ctx;
79 85
@@ -106,6 +112,7 @@ struct dc_stream_update {
106 struct dc_transfer_func *out_transfer_func; 112 struct dc_transfer_func *out_transfer_func;
107 struct dc_hdr_static_metadata *hdr_static_metadata; 113 struct dc_hdr_static_metadata *hdr_static_metadata;
108 enum color_transfer_func color_output_tf; 114 enum color_transfer_func color_output_tf;
115 unsigned int *abm_level;
109}; 116};
110 117
111bool dc_is_stream_unchanged( 118bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 8811b6f86bff..9441305d3ab5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -521,6 +521,24 @@ struct audio_info {
521 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT]; 521 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
522}; 522};
523 523
524struct vrr_params {
525 enum vrr_state state;
526 uint32_t window_min;
527 uint32_t window_max;
528 uint32_t inserted_frame_duration_in_us;
529 uint32_t frames_to_insert;
530 uint32_t frame_counter;
531};
532
533#define DC_PLANE_UPDATE_TIMES_MAX 10
534
535struct dc_plane_flip_time {
536 unsigned int time_elapsed_in_us[DC_PLANE_UPDATE_TIMES_MAX];
537 unsigned int index;
538 unsigned int prev_update_time_in_us;
539};
540
541// Will combine with vrr_params at some point.
524struct freesync_context { 542struct freesync_context {
525 bool supported; 543 bool supported;
526 bool enabled; 544 bool enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 1d4546f23135..c24c0e5ea44e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -46,6 +46,23 @@
46 SR(SMU_INTERRUPT_CONTROL), \ 46 SR(SMU_INTERRUPT_CONTROL), \
47 SR(DC_DMCU_SCRATCH) 47 SR(DC_DMCU_SCRATCH)
48 48
49#define DMCU_DCE80_REG_LIST() \
50 SR(DMCU_CTRL), \
51 SR(DMCU_STATUS), \
52 SR(DMCU_RAM_ACCESS_CTRL), \
53 SR(DMCU_IRAM_WR_CTRL), \
54 SR(DMCU_IRAM_WR_DATA), \
55 SR(MASTER_COMM_DATA_REG1), \
56 SR(MASTER_COMM_DATA_REG2), \
57 SR(MASTER_COMM_DATA_REG3), \
58 SR(MASTER_COMM_CMD_REG), \
59 SR(MASTER_COMM_CNTL_REG), \
60 SR(DMCU_IRAM_RD_CTRL), \
61 SR(DMCU_IRAM_RD_DATA), \
62 SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
63 SR(SMU_INTERRUPT_CONTROL), \
64 SR(DC_DMCU_SCRATCH)
65
49#define DMCU_DCE110_COMMON_REG_LIST() \ 66#define DMCU_DCE110_COMMON_REG_LIST() \
50 DMCU_COMMON_REG_LIST_DCE_BASE(), \ 67 DMCU_COMMON_REG_LIST_DCE_BASE(), \
51 SR(DCI_MEM_PWR_STATUS) 68 SR(DCI_MEM_PWR_STATUS)
@@ -83,6 +100,24 @@
83 STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \ 100 STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
84 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh) 101 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
85 102
103#define DMCU_MASK_SH_LIST_DCE80(mask_sh) \
104 DMCU_SF(DMCU_CTRL, \
105 DMCU_ENABLE, mask_sh), \
106 DMCU_SF(DMCU_STATUS, \
107 UC_IN_STOP_MODE, mask_sh), \
108 DMCU_SF(DMCU_STATUS, \
109 UC_IN_RESET, mask_sh), \
110 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
111 IRAM_HOST_ACCESS_EN, mask_sh), \
112 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
113 IRAM_WR_ADDR_AUTO_INC, mask_sh), \
114 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
115 IRAM_RD_ADDR_AUTO_INC, mask_sh), \
116 DMCU_SF(MASTER_COMM_CMD_REG, \
117 MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
118 DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
119 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
120
86#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \ 121#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
87 DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \ 122 DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
88 DMCU_SF(DCI_MEM_PWR_STATUS, \ 123 DMCU_SF(DCI_MEM_PWR_STATUS, \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 4b8e7ce2de8c..487724345d9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -56,7 +56,7 @@ void dce_pipe_control_lock(struct dc *dc,
56 if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg)) 56 if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
57 return; 57 return;
58 58
59 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx], 59 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
60 BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph, 60 BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
61 BLND_SCL_V_UPDATE_LOCK, &scl, 61 BLND_SCL_V_UPDATE_LOCK, &scl,
62 BLND_BLND_V_UPDATE_LOCK, &blnd, 62 BLND_BLND_V_UPDATE_LOCK, &blnd,
@@ -67,19 +67,19 @@ void dce_pipe_control_lock(struct dc *dc,
67 blnd = lock_val; 67 blnd = lock_val;
68 update_lock_mode = lock_val; 68 update_lock_mode = lock_val;
69 69
70 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val, 70 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
71 BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph, 71 BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
72 BLND_SCL_V_UPDATE_LOCK, scl); 72 BLND_SCL_V_UPDATE_LOCK, scl);
73 73
74 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0) 74 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
75 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val, 75 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
76 BLND_BLND_V_UPDATE_LOCK, blnd, 76 BLND_BLND_V_UPDATE_LOCK, blnd,
77 BLND_V_UPDATE_LOCK_MODE, update_lock_mode); 77 BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
78 78
79 if (hws->wa.blnd_crtc_trigger) { 79 if (hws->wa.blnd_crtc_trigger) {
80 if (!lock) { 80 if (!lock) {
81 uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]); 81 uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst]);
82 REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value); 82 REG_WRITE(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst], value);
83 } 83 }
84 } 84 }
85} 85}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 3336428b1fed..057b8afd74bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -190,6 +190,7 @@
190 SR(D2VGA_CONTROL), \ 190 SR(D2VGA_CONTROL), \
191 SR(D3VGA_CONTROL), \ 191 SR(D3VGA_CONTROL), \
192 SR(D4VGA_CONTROL), \ 192 SR(D4VGA_CONTROL), \
193 SR(VGA_TEST_CONTROL), \
193 SR(DC_IP_REQUEST_CNTL), \ 194 SR(DC_IP_REQUEST_CNTL), \
194 BL_REG_LIST() 195 BL_REG_LIST()
195 196
@@ -261,6 +262,7 @@ struct dce_hwseq_registers {
261 uint32_t D2VGA_CONTROL; 262 uint32_t D2VGA_CONTROL;
262 uint32_t D3VGA_CONTROL; 263 uint32_t D3VGA_CONTROL;
263 uint32_t D4VGA_CONTROL; 264 uint32_t D4VGA_CONTROL;
265 uint32_t VGA_TEST_CONTROL;
264 /* MMHUB registers. read only. temporary hack */ 266 /* MMHUB registers. read only. temporary hack */
265 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; 267 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
266 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 268 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -327,6 +329,8 @@ struct dce_hwseq_registers {
327 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\ 329 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
328 SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\ 330 SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
329 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\ 331 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
332 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
333 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
330 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\ 334 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
331 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_) 335 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
332 336
@@ -403,7 +407,15 @@ struct dce_hwseq_registers {
403 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ 407 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
404 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 408 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
405 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 409 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
410 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
411 HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
412 HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
413 HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
414 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
415 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
406 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 416 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
417 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
418 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
407 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) 419 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
408 420
409#define HWSEQ_REG_FIELD_LIST(type) \ 421#define HWSEQ_REG_FIELD_LIST(type) \
@@ -436,7 +448,9 @@ struct dce_hwseq_registers {
436 type ENABLE_L1_TLB;\ 448 type ENABLE_L1_TLB;\
437 type SYSTEM_ACCESS_MODE;\ 449 type SYSTEM_ACCESS_MODE;\
438 type LVTMA_BLON;\ 450 type LVTMA_BLON;\
439 type LVTMA_PWRSEQ_TARGET_STATE_R; 451 type LVTMA_PWRSEQ_TARGET_STATE_R;\
452 type LVTMA_DIGON;\
453 type LVTMA_DIGON_OVRD;
440 454
441#define HWSEQ_DCN_REG_FIELD_LIST(type) \ 455#define HWSEQ_DCN_REG_FIELD_LIST(type) \
442 type HUBP_VTG_SEL; \ 456 type HUBP_VTG_SEL; \
@@ -483,7 +497,13 @@ struct dce_hwseq_registers {
483 type DCFCLK_GATE_DIS; \ 497 type DCFCLK_GATE_DIS; \
484 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ 498 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
485 type DENTIST_DPPCLK_WDIVIDER; \ 499 type DENTIST_DPPCLK_WDIVIDER; \
486 type DENTIST_DISPCLK_WDIVIDER; 500 type DENTIST_DISPCLK_WDIVIDER; \
501 type VGA_TEST_ENABLE; \
502 type VGA_TEST_RENDER_START; \
503 type D1VGA_MODE_ENABLE; \
504 type D2VGA_MODE_ENABLE; \
505 type D3VGA_MODE_ENABLE; \
506 type D4VGA_MODE_ENABLE;
487 507
488struct dce_hwseq_shift { 508struct dce_hwseq_shift {
489 HWSEQ_REG_FIELD_LIST(uint8_t) 509 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 81776e4797ed..8167cad7bcf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -767,8 +767,7 @@ void dce110_link_encoder_construct(
767 bp_cap_info.DP_HBR3_EN; 767 bp_cap_info.DP_HBR3_EN;
768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; 768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
769 } else { 769 } else {
770 dm_logger_write(enc110->base.ctx->logger, LOG_WARNING, 770 DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
771 "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
772 __func__, 771 __func__,
773 result); 772 result);
774 } 773 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3bdbed80f7f8..3092f76bdb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -51,6 +51,9 @@
51#include "dce/dce_10_0_d.h" 51#include "dce/dce_10_0_d.h"
52#include "dce/dce_10_0_sh_mask.h" 52#include "dce/dce_10_0_sh_mask.h"
53 53
54#include "dce/dce_dmcu.h"
55#include "dce/dce_abm.h"
56
54#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT 57#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
55#include "gmc/gmc_8_2_d.h" 58#include "gmc/gmc_8_2_d.h"
56#include "gmc/gmc_8_2_sh_mask.h" 59#include "gmc/gmc_8_2_sh_mask.h"
@@ -320,7 +323,29 @@ static const struct dce110_clk_src_mask cs_mask = {
320 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) 323 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
321}; 324};
322 325
326static const struct dce_dmcu_registers dmcu_regs = {
327 DMCU_DCE110_COMMON_REG_LIST()
328};
329
330static const struct dce_dmcu_shift dmcu_shift = {
331 DMCU_MASK_SH_LIST_DCE110(__SHIFT)
332};
333
334static const struct dce_dmcu_mask dmcu_mask = {
335 DMCU_MASK_SH_LIST_DCE110(_MASK)
336};
337
338static const struct dce_abm_registers abm_regs = {
339 ABM_DCE110_COMMON_REG_LIST()
340};
341
342static const struct dce_abm_shift abm_shift = {
343 ABM_MASK_SH_LIST_DCE110(__SHIFT)
344};
323 345
346static const struct dce_abm_mask abm_mask = {
347 ABM_MASK_SH_LIST_DCE110(_MASK)
348};
324 349
325#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 350#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
326 351
@@ -622,6 +647,12 @@ static void destruct(struct dce110_resource_pool *pool)
622 if (pool->base.display_clock != NULL) 647 if (pool->base.display_clock != NULL)
623 dce_disp_clk_destroy(&pool->base.display_clock); 648 dce_disp_clk_destroy(&pool->base.display_clock);
624 649
650 if (pool->base.abm != NULL)
651 dce_abm_destroy(&pool->base.abm);
652
653 if (pool->base.dmcu != NULL)
654 dce_dmcu_destroy(&pool->base.dmcu);
655
625 if (pool->base.irqs != NULL) 656 if (pool->base.irqs != NULL)
626 dal_irq_service_destroy(&pool->base.irqs); 657 dal_irq_service_destroy(&pool->base.irqs);
627} 658}
@@ -829,6 +860,25 @@ static bool construct(
829 goto res_create_fail; 860 goto res_create_fail;
830 } 861 }
831 862
863 pool->base.dmcu = dce_dmcu_create(ctx,
864 &dmcu_regs,
865 &dmcu_shift,
866 &dmcu_mask);
867 if (pool->base.dmcu == NULL) {
868 dm_error("DC: failed to create dmcu!\n");
869 BREAK_TO_DEBUGGER();
870 goto res_create_fail;
871 }
872
873 pool->base.abm = dce_abm_create(ctx,
874 &abm_regs,
875 &abm_shift,
876 &abm_mask);
877 if (pool->base.abm == NULL) {
878 dm_error("DC: failed to create abm!\n");
879 BREAK_TO_DEBUGGER();
880 goto res_create_fail;
881 }
832 882
833 /* get static clock information for PPLIB or firmware, save 883 /* get static clock information for PPLIB or firmware, save
834 * max_clock_state 884 * max_clock_state
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c2041a63cccd..30dd62f0f5fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -737,10 +737,14 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
737 737
738static bool is_panel_powered_on(struct dce_hwseq *hws) 738static bool is_panel_powered_on(struct dce_hwseq *hws)
739{ 739{
740 uint32_t value; 740 uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
741
742
743 REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
744
745 REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
741 746
742 REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value); 747 return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
743 return value == 1;
744} 748}
745 749
746static enum bp_result link_transmitter_control( 750static enum bp_result link_transmitter_control(
@@ -1002,8 +1006,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
1002 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 1006 if (dc_is_dp_signal(pipe_ctx->stream->signal))
1003 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params); 1007 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
1004 1008
1005 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) 1009 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1006 link->dc->hwss.edp_backlight_control(link, true); 1010 link->dc->hwss.edp_backlight_control(link, true);
1011 stream->bl_pwm_level = 0;
1012 }
1007} 1013}
1008void dce110_blank_stream(struct pipe_ctx *pipe_ctx) 1014void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
1009{ 1015{
@@ -1128,7 +1134,7 @@ static void build_audio_output(
1128static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx, 1134static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
1129 struct tg_color *color) 1135 struct tg_color *color)
1130{ 1136{
1131 uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4; 1137 uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
1132 1138
1133 switch (pipe_ctx->plane_res.scl_data.format) { 1139 switch (pipe_ctx->plane_res.scl_data.format) {
1134 case PIXEL_FORMAT_ARGB8888: 1140 case PIXEL_FORMAT_ARGB8888:
@@ -2106,9 +2112,6 @@ enum dc_status dce110_apply_ctx_to_hw(
2106 return status; 2112 return status;
2107 } 2113 }
2108 2114
2109 /* pplib is notified if disp_num changed */
2110 dc->hwss.set_bandwidth(dc, context, true);
2111
2112 /* to save power */ 2115 /* to save power */
2113 apply_min_clocks(dc, context, &clocks_state, false); 2116 apply_min_clocks(dc, context, &clocks_state, false);
2114 2117
@@ -2936,15 +2939,18 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2936{ 2939{
2937 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; 2940 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2938 2941
2939 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes) 2942 if (pipe_ctx->plane_res.ipp &&
2943 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
2940 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( 2944 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
2941 pipe_ctx->plane_res.ipp, attributes); 2945 pipe_ctx->plane_res.ipp, attributes);
2942 2946
2943 if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes) 2947 if (pipe_ctx->plane_res.mi &&
2948 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
2944 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( 2949 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
2945 pipe_ctx->plane_res.mi, attributes); 2950 pipe_ctx->plane_res.mi, attributes);
2946 2951
2947 if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes) 2952 if (pipe_ctx->plane_res.xfm &&
2953 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
2948 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( 2954 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
2949 pipe_ctx->plane_res.xfm, attributes); 2955 pipe_ctx->plane_res.xfm, attributes);
2950} 2956}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index a36c14d3d9a8..5d854a37a978 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -53,6 +53,8 @@
53 53
54#include "reg_helper.h" 54#include "reg_helper.h"
55 55
56#include "dce/dce_dmcu.h"
57#include "dce/dce_abm.h"
56/* TODO remove this include */ 58/* TODO remove this include */
57 59
58#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT 60#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -364,6 +366,29 @@ static const struct resource_caps res_cap_83 = {
364 .num_pll = 2, 366 .num_pll = 2,
365}; 367};
366 368
369static const struct dce_dmcu_registers dmcu_regs = {
370 DMCU_DCE80_REG_LIST()
371};
372
373static const struct dce_dmcu_shift dmcu_shift = {
374 DMCU_MASK_SH_LIST_DCE80(__SHIFT)
375};
376
377static const struct dce_dmcu_mask dmcu_mask = {
378 DMCU_MASK_SH_LIST_DCE80(_MASK)
379};
380static const struct dce_abm_registers abm_regs = {
381 ABM_DCE110_COMMON_REG_LIST()
382};
383
384static const struct dce_abm_shift abm_shift = {
385 ABM_MASK_SH_LIST_DCE110(__SHIFT)
386};
387
388static const struct dce_abm_mask abm_mask = {
389 ABM_MASK_SH_LIST_DCE110(_MASK)
390};
391
367#define CTX ctx 392#define CTX ctx
368#define REG(reg) mm ## reg 393#define REG(reg) mm ## reg
369 394
@@ -643,6 +668,12 @@ static void destruct(struct dce110_resource_pool *pool)
643 } 668 }
644 } 669 }
645 670
671 if (pool->base.abm != NULL)
672 dce_abm_destroy(&pool->base.abm);
673
674 if (pool->base.dmcu != NULL)
675 dce_dmcu_destroy(&pool->base.dmcu);
676
646 if (pool->base.dp_clock_source != NULL) 677 if (pool->base.dp_clock_source != NULL)
647 dce80_clock_source_destroy(&pool->base.dp_clock_source); 678 dce80_clock_source_destroy(&pool->base.dp_clock_source);
648 679
@@ -850,7 +881,25 @@ static bool dce80_construct(
850 goto res_create_fail; 881 goto res_create_fail;
851 } 882 }
852 883
884 pool->base.dmcu = dce_dmcu_create(ctx,
885 &dmcu_regs,
886 &dmcu_shift,
887 &dmcu_mask);
888 if (pool->base.dmcu == NULL) {
889 dm_error("DC: failed to create dmcu!\n");
890 BREAK_TO_DEBUGGER();
891 goto res_create_fail;
892 }
853 893
894 pool->base.abm = dce_abm_create(ctx,
895 &abm_regs,
896 &abm_shift,
897 &abm_mask);
898 if (pool->base.abm == NULL) {
899 dm_error("DC: failed to create abm!\n");
900 BREAK_TO_DEBUGGER();
901 goto res_create_fail;
902 }
854 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 903 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
855 pool->base.display_clock->max_clks_state = 904 pool->base.display_clock->max_clks_state =
856 static_clk_info.max_clocks_state; 905 static_clk_info.max_clocks_state;
@@ -1016,6 +1065,25 @@ static bool dce81_construct(
1016 goto res_create_fail; 1065 goto res_create_fail;
1017 } 1066 }
1018 1067
1068 pool->base.dmcu = dce_dmcu_create(ctx,
1069 &dmcu_regs,
1070 &dmcu_shift,
1071 &dmcu_mask);
1072 if (pool->base.dmcu == NULL) {
1073 dm_error("DC: failed to create dmcu!\n");
1074 BREAK_TO_DEBUGGER();
1075 goto res_create_fail;
1076 }
1077
1078 pool->base.abm = dce_abm_create(ctx,
1079 &abm_regs,
1080 &abm_shift,
1081 &abm_mask);
1082 if (pool->base.abm == NULL) {
1083 dm_error("DC: failed to create abm!\n");
1084 BREAK_TO_DEBUGGER();
1085 goto res_create_fail;
1086 }
1019 1087
1020 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1088 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1021 pool->base.display_clock->max_clks_state = 1089 pool->base.display_clock->max_clks_state =
@@ -1178,6 +1246,25 @@ static bool dce83_construct(
1178 goto res_create_fail; 1246 goto res_create_fail;
1179 } 1247 }
1180 1248
1249 pool->base.dmcu = dce_dmcu_create(ctx,
1250 &dmcu_regs,
1251 &dmcu_shift,
1252 &dmcu_mask);
1253 if (pool->base.dmcu == NULL) {
1254 dm_error("DC: failed to create dmcu!\n");
1255 BREAK_TO_DEBUGGER();
1256 goto res_create_fail;
1257 }
1258
1259 pool->base.abm = dce_abm_create(ctx,
1260 &abm_regs,
1261 &abm_shift,
1262 &abm_mask);
1263 if (pool->base.abm == NULL) {
1264 dm_error("DC: failed to create abm!\n");
1265 BREAK_TO_DEBUGGER();
1266 goto res_create_fail;
1267 }
1181 1268
1182 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1269 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1183 pool->base.display_clock->max_clks_state = 1270 pool->base.display_clock->max_clks_state =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index f0b798930b51..e305c28c98de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -464,6 +464,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
464 .set_cursor_attributes = dpp1_set_cursor_attributes, 464 .set_cursor_attributes = dpp1_set_cursor_attributes,
465 .set_cursor_position = dpp1_set_cursor_position, 465 .set_cursor_position = dpp1_set_cursor_position,
466 .dpp_dppclk_control = dpp1_dppclk_control, 466 .dpp_dppclk_control = dpp1_dppclk_control,
467 .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
467}; 468};
468 469
469static struct dpp_caps dcn10_dpp_cap = { 470static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 07003d9c6bba..17b062a8f88a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -113,7 +113,8 @@
113 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ 113 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
114 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ 114 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
115 SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ 115 SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
116 SRI(DPP_CONTROL, DPP_TOP, id) 116 SRI(DPP_CONTROL, DPP_TOP, id), \
117 SRI(CM_HDR_MULT_COEF, CM, id)
117 118
118 119
119 120
@@ -308,7 +309,8 @@
308 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ 309 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
309 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ 310 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
310 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ 311 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
311 TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh) 312 TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
313 TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
312 314
313#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\ 315#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
314 TF_REG_LIST_SH_MASK_DCN(mask_sh),\ 316 TF_REG_LIST_SH_MASK_DCN(mask_sh),\
@@ -1012,7 +1014,8 @@
1012 type CUR0_COLOR0; \ 1014 type CUR0_COLOR0; \
1013 type CUR0_COLOR1; \ 1015 type CUR0_COLOR1; \
1014 type DPPCLK_RATE_CONTROL; \ 1016 type DPPCLK_RATE_CONTROL; \
1015 type DPP_CLOCK_ENABLE; 1017 type DPP_CLOCK_ENABLE; \
1018 type CM_HDR_MULT_COEF;
1016 1019
1017struct dcn_dpp_shift { 1020struct dcn_dpp_shift {
1018 TF_REG_FIELD_LIST(uint8_t) 1021 TF_REG_FIELD_LIST(uint8_t)
@@ -1258,7 +1261,8 @@ struct dcn_dpp_mask {
1258 uint32_t CURSOR0_CONTROL; \ 1261 uint32_t CURSOR0_CONTROL; \
1259 uint32_t CURSOR0_COLOR0; \ 1262 uint32_t CURSOR0_COLOR0; \
1260 uint32_t CURSOR0_COLOR1; \ 1263 uint32_t CURSOR0_COLOR1; \
1261 uint32_t DPP_CONTROL; 1264 uint32_t DPP_CONTROL; \
1265 uint32_t CM_HDR_MULT_COEF;
1262 1266
1263struct dcn_dpp_registers { 1267struct dcn_dpp_registers {
1264 DPP_COMMON_REG_VARIABLE_LIST 1268 DPP_COMMON_REG_VARIABLE_LIST
@@ -1414,6 +1418,10 @@ void dpp1_dppclk_control(
1414 bool dppclk_div, 1418 bool dppclk_div,
1415 bool enable); 1419 bool enable);
1416 1420
1421void dpp1_set_hdr_multiplier(
1422 struct dpp *dpp_base,
1423 uint32_t multiplier);
1424
1417void dpp1_construct(struct dcn10_dpp *dpp1, 1425void dpp1_construct(struct dcn10_dpp *dpp1,
1418 struct dc_context *ctx, 1426 struct dc_context *ctx,
1419 uint32_t inst, 1427 uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index bd3fcdfb79c5..fb32975e4b67 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -804,3 +804,12 @@ void dpp1_program_input_lut(
804 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); 804 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
805 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); 805 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
806} 806}
807
808void dpp1_set_hdr_multiplier(
809 struct dpp *dpp_base,
810 uint32_t multiplier)
811{
812 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
813
814 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
815}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1907ade1574a..8b0f6b8a5627 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -220,10 +220,34 @@ static void enable_power_gating_plane(
220static void disable_vga( 220static void disable_vga(
221 struct dce_hwseq *hws) 221 struct dce_hwseq *hws)
222{ 222{
223 unsigned int in_vga1_mode = 0;
224 unsigned int in_vga2_mode = 0;
225 unsigned int in_vga3_mode = 0;
226 unsigned int in_vga4_mode = 0;
227
228 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
229 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
230 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
231 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
232
233 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
234 in_vga3_mode == 0 && in_vga4_mode == 0)
235 return;
236
223 REG_WRITE(D1VGA_CONTROL, 0); 237 REG_WRITE(D1VGA_CONTROL, 0);
224 REG_WRITE(D2VGA_CONTROL, 0); 238 REG_WRITE(D2VGA_CONTROL, 0);
225 REG_WRITE(D3VGA_CONTROL, 0); 239 REG_WRITE(D3VGA_CONTROL, 0);
226 REG_WRITE(D4VGA_CONTROL, 0); 240 REG_WRITE(D4VGA_CONTROL, 0);
241
242 /* HW Engineer's Notes:
243 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
244 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
245 *
246 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
247 * VGA_TEST_ENABLE, to leave it in the same state as before.
248 */
249 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
250 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
227} 251}
228 252
229static void dpp_pg_control( 253static void dpp_pg_control(
@@ -1685,16 +1709,22 @@ static void update_dchubp_dpp(
1685 union plane_size size = plane_state->plane_size; 1709 union plane_size size = plane_state->plane_size;
1686 1710
1687 /* depends on DML calculation, DPP clock value may change dynamically */ 1711 /* depends on DML calculation, DPP clock value may change dynamically */
1712 /* If request max dpp clk is lower than current dispclk, no need to
1713 * divided by 2
1714 */
1688 if (plane_state->update_flags.bits.full_update) { 1715 if (plane_state->update_flags.bits.full_update) {
1716 bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
1717 context->bw.dcn.cur_clk.dispclk_khz / 2;
1718
1689 dpp->funcs->dpp_dppclk_control( 1719 dpp->funcs->dpp_dppclk_control(
1690 dpp, 1720 dpp,
1691 context->bw.dcn.calc_clk.max_dppclk_khz < 1721 should_divided_by_2,
1692 context->bw.dcn.calc_clk.dispclk_khz,
1693 true); 1722 true);
1694 1723
1695 dc->current_state->bw.dcn.cur_clk.max_dppclk_khz = 1724 dc->current_state->bw.dcn.cur_clk.dppclk_khz =
1696 context->bw.dcn.calc_clk.max_dppclk_khz; 1725 should_divided_by_2 ?
1697 context->bw.dcn.cur_clk.max_dppclk_khz = context->bw.dcn.calc_clk.max_dppclk_khz; 1726 context->bw.dcn.cur_clk.dispclk_khz / 2 :
1727 context->bw.dcn.cur_clk.dispclk_khz;
1698 } 1728 }
1699 1729
1700 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1730 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -1780,14 +1810,62 @@ static void update_dchubp_dpp(
1780 hubp->funcs->set_blank(hubp, false); 1810 hubp->funcs->set_blank(hubp, false);
1781} 1811}
1782 1812
1813static void dcn10_otg_blank(
1814 struct dc *dc,
1815 struct stream_resource stream_res,
1816 struct dc_stream_state *stream,
1817 bool blank)
1818{
1819 enum dc_color_space color_space;
1820 struct tg_color black_color = {0};
1821
1822 /* program otg blank color */
1823 color_space = stream->output_color_space;
1824 color_space_to_black_color(dc, color_space, &black_color);
1825
1826 if (stream_res.tg->funcs->set_blank_color)
1827 stream_res.tg->funcs->set_blank_color(
1828 stream_res.tg,
1829 &black_color);
1830
1831 if (!blank) {
1832 if (stream_res.tg->funcs->set_blank)
1833 stream_res.tg->funcs->set_blank(stream_res.tg, blank);
1834 if (stream_res.abm)
1835 stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
1836 } else if (blank) {
1837 if (stream_res.abm)
1838 stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
1839 if (stream_res.tg->funcs->set_blank)
1840 stream_res.tg->funcs->set_blank(stream_res.tg, blank);
1841 }
1842}
1843
1844static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
1845{
1846 struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
1847 pipe_ctx->plane_state->sdr_white_level, 80);
1848 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
1849 struct custom_float_format fmt;
1850
1851 fmt.exponenta_bits = 6;
1852 fmt.mantissa_bits = 12;
1853 fmt.sign = true;
1854
1855 if (pipe_ctx->plane_state->sdr_white_level > 80)
1856 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
1857
1858 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
1859 pipe_ctx->plane_res.dpp, hw_mult);
1860}
1783 1861
1784static void program_all_pipe_in_tree( 1862static void program_all_pipe_in_tree(
1785 struct dc *dc, 1863 struct dc *dc,
1786 struct pipe_ctx *pipe_ctx, 1864 struct pipe_ctx *pipe_ctx,
1787 struct dc_state *context) 1865 struct dc_state *context)
1788{ 1866{
1789
1790 if (pipe_ctx->top_pipe == NULL) { 1867 if (pipe_ctx->top_pipe == NULL) {
1868 bool blank = !is_pipe_tree_visible(pipe_ctx);
1791 1869
1792 pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset; 1870 pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
1793 pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start; 1871 pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
@@ -1798,10 +1876,8 @@ static void program_all_pipe_in_tree(
1798 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1876 pipe_ctx->stream_res.tg->funcs->program_global_sync(
1799 pipe_ctx->stream_res.tg); 1877 pipe_ctx->stream_res.tg);
1800 1878
1801 if (pipe_ctx->stream_res.tg->funcs->set_blank) 1879 dcn10_otg_blank(dc, pipe_ctx->stream_res,
1802 pipe_ctx->stream_res.tg->funcs->set_blank( 1880 pipe_ctx->stream, blank);
1803 pipe_ctx->stream_res.tg,
1804 !is_pipe_tree_visible(pipe_ctx));
1805 } 1881 }
1806 1882
1807 if (pipe_ctx->plane_state != NULL) { 1883 if (pipe_ctx->plane_state != NULL) {
@@ -1810,6 +1886,8 @@ static void program_all_pipe_in_tree(
1810 1886
1811 update_dchubp_dpp(dc, pipe_ctx, context); 1887 update_dchubp_dpp(dc, pipe_ctx, context);
1812 1888
1889 set_hdr_multiplier(pipe_ctx);
1890
1813 if (pipe_ctx->plane_state->update_flags.bits.full_update || 1891 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
1814 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 1892 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
1815 pipe_ctx->plane_state->update_flags.bits.gamma_change) 1893 pipe_ctx->plane_state->update_flags.bits.gamma_change)
@@ -1836,16 +1914,10 @@ static void dcn10_pplib_apply_display_requirements(
1836{ 1914{
1837 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 1915 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
1838 1916
1839 pp_display_cfg->all_displays_in_sync = false;/*todo*/
1840 pp_display_cfg->nb_pstate_switch_disable = false;
1841 pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 1917 pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
1842 pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz; 1918 pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
1843 pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 1919 pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
1844 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 1920 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
1845 pp_display_cfg->avail_mclk_switch_time_us =
1846 context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
1847 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
1848 context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
1849 pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 1921 pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
1850 pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz; 1922 pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
1851 dce110_fill_display_configs(context, pp_display_cfg); 1923 dce110_fill_display_configs(context, pp_display_cfg);
@@ -1908,29 +1980,23 @@ static void dcn10_apply_ctx_for_surface(
1908{ 1980{
1909 int i; 1981 int i;
1910 struct timing_generator *tg; 1982 struct timing_generator *tg;
1911 struct output_pixel_processor *opp;
1912 bool removed_pipe[4] = { false }; 1983 bool removed_pipe[4] = { false };
1913 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000; 1984 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
1914 bool program_water_mark = false; 1985 bool program_water_mark = false;
1915 struct dc_context *ctx = dc->ctx; 1986 struct dc_context *ctx = dc->ctx;
1916
1917 struct pipe_ctx *top_pipe_to_program = 1987 struct pipe_ctx *top_pipe_to_program =
1918 find_top_pipe_for_stream(dc, context, stream); 1988 find_top_pipe_for_stream(dc, context, stream);
1919 1989
1920 if (!top_pipe_to_program) 1990 if (!top_pipe_to_program)
1921 return; 1991 return;
1922 1992
1923 opp = top_pipe_to_program->stream_res.opp;
1924
1925 tg = top_pipe_to_program->stream_res.tg; 1993 tg = top_pipe_to_program->stream_res.tg;
1926 1994
1927 dcn10_pipe_control_lock(dc, top_pipe_to_program, true); 1995 dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
1928 1996
1929 if (num_planes == 0) { 1997 if (num_planes == 0) {
1930
1931 /* OTG blank before remove all front end */ 1998 /* OTG blank before remove all front end */
1932 if (tg->funcs->set_blank) 1999 dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
1933 tg->funcs->set_blank(tg, true);
1934 } 2000 }
1935 2001
1936 /* Disconnect unused mpcc */ 2002 /* Disconnect unused mpcc */
@@ -2056,6 +2122,101 @@ static void dcn10_apply_ctx_for_surface(
2056*/ 2122*/
2057} 2123}
2058 2124
2125static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
2126{
2127 return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
2128}
2129
2130static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
2131{
2132 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2133 context->bw.dcn.calc_clk.dppclk_khz;
2134 bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
2135 context->bw.dcn.cur_clk.dispclk_khz;
2136 int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2137 bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
2138 context->bw.dcn.cur_clk.dppclk_khz;
2139
2140 /* increase clock, looking for div is 0 for current, request div is 1*/
2141 if (dispclk_increase) {
2142 /* already divided by 2, no need to reach target clk with 2 steps*/
2143 if (cur_dpp_div)
2144 return context->bw.dcn.calc_clk.dispclk_khz;
2145
2146 /* request disp clk is lower than maximum supported dpp clk,
2147 * no need to reach target clk with two steps.
2148 */
2149 if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
2150 return context->bw.dcn.calc_clk.dispclk_khz;
2151
2152 /* target dpp clk not request divided by 2, still within threshold */
2153 if (!request_dpp_div)
2154 return context->bw.dcn.calc_clk.dispclk_khz;
2155
2156 } else {
2157 /* decrease clock, looking for current dppclk divided by 2,
2158 * request dppclk not divided by 2.
2159 */
2160
2161 /* current dpp clk not divided by 2, no need to ramp*/
2162 if (!cur_dpp_div)
2163 return context->bw.dcn.calc_clk.dispclk_khz;
2164
2165 /* current disp clk is lower than current maximum dpp clk,
2166 * no need to ramp
2167 */
2168 if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
2169 return context->bw.dcn.calc_clk.dispclk_khz;
2170
2171 /* request dpp clk need to be divided by 2 */
2172 if (request_dpp_div)
2173 return context->bw.dcn.calc_clk.dispclk_khz;
2174 }
2175
2176 return disp_clk_threshold;
2177}
2178
2179static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
2180{
2181 int i;
2182 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2183 context->bw.dcn.calc_clk.dppclk_khz;
2184
2185 int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
2186
2187 /* set disp clk to dpp clk threshold */
2188 dc->res_pool->display_clock->funcs->set_clock(
2189 dc->res_pool->display_clock,
2190 dispclk_to_dpp_threshold);
2191
2192 /* update request dpp clk division option */
2193 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2194 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
2195
2196 if (!pipe_ctx->plane_state)
2197 continue;
2198
2199 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
2200 pipe_ctx->plane_res.dpp,
2201 request_dpp_div,
2202 true);
2203 }
2204
2205 /* If target clk not same as dppclk threshold, set to target clock */
2206 if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
2207 dc->res_pool->display_clock->funcs->set_clock(
2208 dc->res_pool->display_clock,
2209 context->bw.dcn.calc_clk.dispclk_khz);
2210 }
2211
2212 context->bw.dcn.cur_clk.dispclk_khz =
2213 context->bw.dcn.calc_clk.dispclk_khz;
2214 context->bw.dcn.cur_clk.dppclk_khz =
2215 context->bw.dcn.calc_clk.dppclk_khz;
2216 context->bw.dcn.cur_clk.max_supported_dppclk_khz =
2217 context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2218}
2219
2059static void dcn10_set_bandwidth( 2220static void dcn10_set_bandwidth(
2060 struct dc *dc, 2221 struct dc *dc,
2061 struct dc_state *context, 2222 struct dc_state *context,
@@ -2073,32 +2234,32 @@ static void dcn10_set_bandwidth(
2073 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) 2234 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
2074 return; 2235 return;
2075 2236
2076 if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz 2237 if (should_set_clock(
2077 > dc->current_state->bw.dcn.cur_clk.dispclk_khz) { 2238 decrease_allowed,
2078 dc->res_pool->display_clock->funcs->set_clock( 2239 context->bw.dcn.calc_clk.dcfclk_khz,
2079 dc->res_pool->display_clock, 2240 dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
2080 context->bw.dcn.calc_clk.dispclk_khz);
2081 context->bw.dcn.cur_clk.dispclk_khz =
2082 context->bw.dcn.calc_clk.dispclk_khz;
2083 }
2084 if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
2085 > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
2086 context->bw.dcn.cur_clk.dcfclk_khz = 2241 context->bw.dcn.cur_clk.dcfclk_khz =
2087 context->bw.dcn.calc_clk.dcfclk_khz; 2242 context->bw.dcn.calc_clk.dcfclk_khz;
2088 smu_req.hard_min_dcefclk_khz = 2243 smu_req.hard_min_dcefclk_khz =
2089 context->bw.dcn.calc_clk.dcfclk_khz; 2244 context->bw.dcn.calc_clk.dcfclk_khz;
2090 } 2245 }
2091 if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz 2246
2092 > dc->current_state->bw.dcn.cur_clk.fclk_khz) { 2247 if (should_set_clock(
2248 decrease_allowed,
2249 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
2250 dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
2251 context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
2252 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
2253 }
2254
2255 if (should_set_clock(
2256 decrease_allowed,
2257 context->bw.dcn.calc_clk.fclk_khz,
2258 dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
2093 context->bw.dcn.cur_clk.fclk_khz = 2259 context->bw.dcn.cur_clk.fclk_khz =
2094 context->bw.dcn.calc_clk.fclk_khz; 2260 context->bw.dcn.calc_clk.fclk_khz;
2095 smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz; 2261 smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
2096 } 2262 }
2097 if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
2098 > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
2099 context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
2100 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
2101 }
2102 2263
2103 smu_req.display_count = context->stream_count; 2264 smu_req.display_count = context->stream_count;
2104 2265
@@ -2107,17 +2268,17 @@ static void dcn10_set_bandwidth(
2107 2268
2108 *smu_req_cur = smu_req; 2269 *smu_req_cur = smu_req;
2109 2270
2110 /* Decrease in freq is increase in period so opposite comparison for dram_ccm */ 2271 /* make sure dcf clk is before dpp clk to
2111 if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us 2272 * make sure we have enough voltage to run dpp clk
2112 < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) { 2273 */
2113 context->bw.dcn.cur_clk.dram_ccm_us = 2274 if (should_set_clock(
2114 context->bw.dcn.calc_clk.dram_ccm_us; 2275 decrease_allowed,
2115 } 2276 context->bw.dcn.calc_clk.dispclk_khz,
2116 if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us 2277 dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
2117 < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) { 2278
2118 context->bw.dcn.cur_clk.min_active_dram_ccm_us = 2279 ramp_up_dispclk_with_dpp(dc, context);
2119 context->bw.dcn.calc_clk.min_active_dram_ccm_us;
2120 } 2280 }
2281
2121 dcn10_pplib_apply_display_requirements(dc, context); 2282 dcn10_pplib_apply_display_requirements(dc, context);
2122 2283
2123 if (dc->debug.sanity_checks) { 2284 if (dc->debug.sanity_checks) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index c4a564cb56b9..02bd664aed3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -440,7 +440,11 @@ static const struct dc_debug debug_defaults_drv = {
440 .timing_trace = false, 440 .timing_trace = false,
441 .clock_trace = true, 441 .clock_trace = true,
442 442
443 .min_disp_clk_khz = 300000, 443 /* raven smu dones't allow 0 disp clk,
444 * smu min disp clk limit is 50Mhz
445 * keep min disp clk 100Mhz avoid smu hang
446 */
447 .min_disp_clk_khz = 100000,
444 448
445 .disable_pplib_clock_request = true, 449 .disable_pplib_clock_request = true,
446 .disable_pplib_wm_range = false, 450 .disable_pplib_wm_range = false,
@@ -963,6 +967,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
963 967
964 idle_pipe->stream = head_pipe->stream; 968 idle_pipe->stream = head_pipe->stream;
965 idle_pipe->stream_res.tg = head_pipe->stream_res.tg; 969 idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
970 idle_pipe->stream_res.abm = head_pipe->stream_res.abm;
966 idle_pipe->stream_res.opp = head_pipe->stream_res.opp; 971 idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
967 972
968 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; 973 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 189052e911fc..48400d642610 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include "display_rq_dlg_helpers.h" 26#include "display_rq_dlg_helpers.h"
27#include "dml_logger.h"
27 28
28void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param) 29void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
29{ 30{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
index b2847bc469fe..f78cbae9db88 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -31,8 +31,6 @@
31#include "display_mode_structs.h" 31#include "display_mode_structs.h"
32#include "display_mode_enums.h" 32#include "display_mode_enums.h"
33 33
34#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
35#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
36 34
37double dml_round(double a); 35double dml_round(double a);
38 36
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index e68086b8a22f..f9cf08357989 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -28,6 +28,7 @@
28 28
29#include "dml_common_defs.h" 29#include "dml_common_defs.h"
30#include "../calcs/dcn_calc_math.h" 30#include "../calcs/dcn_calc_math.h"
31#include "dml_logger.h"
31 32
32static inline double dml_min(double a, double b) 33static inline double dml_min(double a, double b)
33{ 34{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
new file mode 100644
index 000000000000..465859b77248
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26
27#ifndef __DML_LOGGER_H_
28#define __DML_LOGGER_H_
29
30#define DC_LOGGER \
31 mode_lib->logger
32
33#define dml_print(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
34#define DTRACE(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
35
36#endif
37
38
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b8f05384a897..8c51ad70cace 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -194,6 +194,8 @@ struct stream_resource {
194 194
195 struct pixel_clk_params pix_clk_params; 195 struct pixel_clk_params pix_clk_params;
196 struct encoder_info_frame encoder_info_frame; 196 struct encoder_info_frame encoder_info_frame;
197
198 struct abm *abm;
197}; 199};
198 200
199struct plane_resource { 201struct plane_resource {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index ae2399f16d1c..a9bfe9ff8ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -130,6 +130,9 @@ enum bw_defines {
130 130
131struct bw_calcs_dceip { 131struct bw_calcs_dceip {
132 enum bw_calcs_version version; 132 enum bw_calcs_version version;
133 uint32_t percent_of_ideal_port_bw_received_after_urgent_latency;
134 uint32_t max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation;
135 uint32_t max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation;
133 bool large_cursor; 136 bool large_cursor;
134 uint32_t cursor_max_outstanding_group_num; 137 uint32_t cursor_max_outstanding_group_num;
135 bool dmif_pipe_en_fbc_chunk_tracker; 138 bool dmif_pipe_en_fbc_chunk_tracker;
@@ -230,6 +233,7 @@ struct bw_calcs_vbios {
230 233
231struct bw_calcs_data { 234struct bw_calcs_data {
232 /* data for all displays */ 235 /* data for all displays */
236 bool display_synchronization_enabled;
233 uint32_t number_of_displays; 237 uint32_t number_of_displays;
234 enum bw_defines underlay_surface_type; 238 enum bw_defines underlay_surface_type;
235 enum bw_defines panning_and_bezel_adjustment; 239 enum bw_defines panning_and_bezel_adjustment;
@@ -241,6 +245,7 @@ struct bw_calcs_data {
241 bool d1_display_write_back_dwb_enable; 245 bool d1_display_write_back_dwb_enable;
242 enum bw_defines d1_underlay_mode; 246 enum bw_defines d1_underlay_mode;
243 247
248 bool increase_voltage_to_support_mclk_switch;
244 bool cpup_state_change_enable; 249 bool cpup_state_change_enable;
245 bool cpuc_state_change_enable; 250 bool cpuc_state_change_enable;
246 bool nbp_state_change_enable; 251 bool nbp_state_change_enable;
@@ -449,6 +454,7 @@ struct bw_calcs_data {
449 struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8]; 454 struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
450 struct bw_fixed min_dram_speed_change_margin[3][8]; 455 struct bw_fixed min_dram_speed_change_margin[3][8];
451 struct bw_fixed dispclk_required_for_dram_speed_change[3][8]; 456 struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
457 struct bw_fixed dispclk_required_for_dram_speed_change_pipe[3][8];
452 struct bw_fixed blackout_duration_margin[3][8]; 458 struct bw_fixed blackout_duration_margin[3][8];
453 struct bw_fixed dispclk_required_for_blackout_duration[3][8]; 459 struct bw_fixed dispclk_required_for_blackout_duration[3][8];
454 struct bw_fixed dispclk_required_for_blackout_recovery[3][8]; 460 struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index c5aae2daf442..99995608b620 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -132,6 +132,9 @@ struct dpp_funcs {
132 const struct dc_cursor_mi_param *param, 132 const struct dc_cursor_mi_param *param,
133 uint32_t width 133 uint32_t width
134 ); 134 );
135 void (*dpp_set_hdr_multiplier)(
136 struct dpp *dpp_base,
137 uint32_t multiplier);
135 138
136 void (*dpp_dppclk_control)( 139 void (*dpp_dppclk_control)(
137 struct dpp *dpp_base, 140 struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index b727f5eeb3a9..427796bdc14a 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -98,6 +98,7 @@ enum dc_log_type {
98 LOG_EVENT_UNDERFLOW, 98 LOG_EVENT_UNDERFLOW,
99 LOG_IF_TRACE, 99 LOG_IF_TRACE,
100 LOG_PERF_TRACE, 100 LOG_PERF_TRACE,
101 LOG_PROFILING,
101 102
102 LOG_SECTION_TOTAL_COUNT 103 LOG_SECTION_TOTAL_COUNT
103}; 104};
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 57d5c2575de1..e7e374f56864 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1267,7 +1267,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
1267 bool ret = false; 1267 bool ret = false;
1268 struct pwl_float_data_ex *rgb_regamma = NULL; 1268 struct pwl_float_data_ex *rgb_regamma = NULL;
1269 1269
1270 if (trans == TRANSFER_FUNCTION_UNITY) { 1270 if (trans == TRANSFER_FUNCTION_UNITY ||
1271 trans == TRANSFER_FUNCTION_LINEAR) {
1271 points->end_exponent = 0; 1272 points->end_exponent = 0;
1272 points->x_point_at_y1_red = 1; 1273 points->x_point_at_y1_red = 1;
1273 points->x_point_at_y1_green = 1; 1274 points->x_point_at_y1_green = 1;
@@ -1337,7 +1338,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
1337 bool ret = false; 1338 bool ret = false;
1338 struct pwl_float_data_ex *rgb_degamma = NULL; 1339 struct pwl_float_data_ex *rgb_degamma = NULL;
1339 1340
1340 if (trans == TRANSFER_FUNCTION_UNITY) { 1341 if (trans == TRANSFER_FUNCTION_UNITY ||
1342 trans == TRANSFER_FUNCTION_LINEAR) {
1341 1343
1342 for (i = 0; i <= MAX_HW_POINTS ; i++) { 1344 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1343 points->red[i] = coordinates_x[i].x; 1345 points->red[i] = coordinates_x[i].x;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b4723af368a5..27d4003aa2c7 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -33,7 +33,7 @@
33/* Refresh rate ramp at a fixed rate of 65 Hz/second */ 33/* Refresh rate ramp at a fixed rate of 65 Hz/second */
34#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) 34#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
35/* Number of elements in the render times cache array */ 35/* Number of elements in the render times cache array */
36#define RENDER_TIMES_MAX_COUNT 20 36#define RENDER_TIMES_MAX_COUNT 10
37/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ 37/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
38#define BTR_EXIT_MARGIN 2000 38#define BTR_EXIT_MARGIN 2000
39/* Number of consecutive frames to check before entering/exiting fixed refresh*/ 39/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -46,13 +46,15 @@
46 46
47#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal" 47#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
48 48
49#define FREESYNC_DEFAULT_REGKEY "LCDFreeSyncDefault"
50
49struct gradual_static_ramp { 51struct gradual_static_ramp {
50 bool ramp_is_active; 52 bool ramp_is_active;
51 bool ramp_direction_is_up; 53 bool ramp_direction_is_up;
52 unsigned int ramp_current_frame_duration_in_ns; 54 unsigned int ramp_current_frame_duration_in_ns;
53}; 55};
54 56
55struct time_cache { 57struct freesync_time {
56 /* video (48Hz feature) related */ 58 /* video (48Hz feature) related */
57 unsigned int update_duration_in_ns; 59 unsigned int update_duration_in_ns;
58 60
@@ -64,6 +66,9 @@ struct time_cache {
64 66
65 unsigned int render_times_index; 67 unsigned int render_times_index;
66 unsigned int render_times[RENDER_TIMES_MAX_COUNT]; 68 unsigned int render_times[RENDER_TIMES_MAX_COUNT];
69
70 unsigned int min_window;
71 unsigned int max_window;
67}; 72};
68 73
69struct below_the_range { 74struct below_the_range {
@@ -98,11 +103,14 @@ struct freesync_state {
98 bool static_screen; 103 bool static_screen;
99 bool video; 104 bool video;
100 105
106 unsigned int vmin;
107 unsigned int vmax;
108
109 struct freesync_time time;
110
101 unsigned int nominal_refresh_rate_in_micro_hz; 111 unsigned int nominal_refresh_rate_in_micro_hz;
102 bool windowed_fullscreen; 112 bool windowed_fullscreen;
103 113
104 struct time_cache time;
105
106 struct gradual_static_ramp static_ramp; 114 struct gradual_static_ramp static_ramp;
107 struct below_the_range btr; 115 struct below_the_range btr;
108 struct fixed_refresh fixed_refresh; 116 struct fixed_refresh fixed_refresh;
@@ -119,14 +127,16 @@ struct freesync_entity {
119struct freesync_registry_options { 127struct freesync_registry_options {
120 bool drr_external_supported; 128 bool drr_external_supported;
121 bool drr_internal_supported; 129 bool drr_internal_supported;
130 bool lcd_freesync_default_set;
131 int lcd_freesync_default_value;
122}; 132};
123 133
124struct core_freesync { 134struct core_freesync {
125 struct mod_freesync public; 135 struct mod_freesync public;
126 struct dc *dc; 136 struct dc *dc;
137 struct freesync_registry_options opts;
127 struct freesync_entity *map; 138 struct freesync_entity *map;
128 int num_entities; 139 int num_entities;
129 struct freesync_registry_options opts;
130}; 140};
131 141
132#define MOD_FREESYNC_TO_CORE(mod_freesync)\ 142#define MOD_FREESYNC_TO_CORE(mod_freesync)\
@@ -146,7 +156,7 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
146 goto fail_alloc_context; 156 goto fail_alloc_context;
147 157
148 core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS, 158 core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
149 GFP_KERNEL); 159 GFP_KERNEL);
150 160
151 if (core_freesync->map == NULL) 161 if (core_freesync->map == NULL)
152 goto fail_alloc_map; 162 goto fail_alloc_map;
@@ -183,6 +193,16 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
183 (data & 1) ? false : true; 193 (data & 1) ? false : true;
184 } 194 }
185 195
196 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
197 FREESYNC_DEFAULT_REGKEY,
198 &data, sizeof(data), &flag)) {
199 core_freesync->opts.lcd_freesync_default_set = true;
200 core_freesync->opts.lcd_freesync_default_value = data;
201 } else {
202 core_freesync->opts.lcd_freesync_default_set = false;
203 core_freesync->opts.lcd_freesync_default_value = 0;
204 }
205
186 return &core_freesync->public; 206 return &core_freesync->public;
187 207
188fail_construct: 208fail_construct:
@@ -288,6 +308,18 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
288 core_freesync->map[core_freesync->num_entities].user_enable. 308 core_freesync->map[core_freesync->num_entities].user_enable.
289 enable_for_video = 309 enable_for_video =
290 (persistent_freesync_enable & 4) ? true : false; 310 (persistent_freesync_enable & 4) ? true : false;
311 /* If FreeSync display and LCDFreeSyncDefault is set, use as default values write back to userenable */
312 } else if (caps->supported && (core_freesync->opts.lcd_freesync_default_set)) {
313 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_gaming =
314 (core_freesync->opts.lcd_freesync_default_value & 1) ? true : false;
315 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_static =
316 (core_freesync->opts.lcd_freesync_default_value & 2) ? true : false;
317 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_video =
318 (core_freesync->opts.lcd_freesync_default_value & 4) ? true : false;
319 dm_write_persistent_data(dc->ctx, stream->sink,
320 FREESYNC_REGISTRY_NAME,
321 "userenable", &core_freesync->opts.lcd_freesync_default_value,
322 sizeof(int), &flag);
291 } else { 323 } else {
292 core_freesync->map[core_freesync->num_entities].user_enable. 324 core_freesync->map[core_freesync->num_entities].user_enable.
293 enable_for_gaming = false; 325 enable_for_gaming = false;
@@ -330,6 +362,25 @@ bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
330 return true; 362 return true;
331} 363}
332 364
365static void adjust_vmin_vmax(struct core_freesync *core_freesync,
366 struct dc_stream_state **streams,
367 int num_streams,
368 int map_index,
369 unsigned int v_total_min,
370 unsigned int v_total_max)
371{
372 if (num_streams == 0 || streams == NULL || num_streams > 1)
373 return;
374
375 core_freesync->map[map_index].state.vmin = v_total_min;
376 core_freesync->map[map_index].state.vmax = v_total_max;
377
378 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
379 num_streams, v_total_min,
380 v_total_max);
381}
382
383
333static void update_stream_freesync_context(struct core_freesync *core_freesync, 384static void update_stream_freesync_context(struct core_freesync *core_freesync,
334 struct dc_stream_state *stream) 385 struct dc_stream_state *stream)
335{ 386{
@@ -588,9 +639,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
588 update_stream_freesync_context(core_freesync, 639 update_stream_freesync_context(core_freesync,
589 streams[stream_idx]); 640 streams[stream_idx]);
590 641
591 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 642 adjust_vmin_vmax(core_freesync, streams,
592 num_streams, v_total_min, 643 num_streams, map_index,
593 v_total_max); 644 v_total_min,
645 v_total_max);
594 646
595 return true; 647 return true;
596 648
@@ -613,9 +665,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
613 core_freesync, 665 core_freesync,
614 streams[stream_idx]); 666 streams[stream_idx]);
615 667
616 dc_stream_adjust_vmin_vmax( 668 adjust_vmin_vmax(
617 core_freesync->dc, streams, 669 core_freesync, streams,
618 num_streams, v_total_nominal, 670 num_streams, map_index,
671 v_total_nominal,
619 v_total_nominal); 672 v_total_nominal);
620 } 673 }
621 return true; 674 return true;
@@ -632,9 +685,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
632 core_freesync, 685 core_freesync,
633 streams[stream_idx]); 686 streams[stream_idx]);
634 687
635 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 688 adjust_vmin_vmax(core_freesync, streams,
636 num_streams, v_total_nominal, 689 num_streams, map_index,
637 v_total_nominal); 690 v_total_nominal,
691 v_total_nominal);
638 692
639 /* Reset the cached variables */ 693 /* Reset the cached variables */
640 reset_freesync_state_variables(state); 694 reset_freesync_state_variables(state);
@@ -650,9 +704,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
650 * not support freesync because a former stream has 704 * not support freesync because a former stream has
651 * be programmed 705 * be programmed
652 */ 706 */
653 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 707 adjust_vmin_vmax(core_freesync, streams,
654 num_streams, v_total_nominal, 708 num_streams, map_index,
655 v_total_nominal); 709 v_total_nominal,
710 v_total_nominal);
656 /* Reset the cached variables */ 711 /* Reset the cached variables */
657 reset_freesync_state_variables(state); 712 reset_freesync_state_variables(state);
658 } 713 }
@@ -769,8 +824,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
769 vmin = inserted_frame_v_total; 824 vmin = inserted_frame_v_total;
770 825
771 /* Program V_TOTAL */ 826 /* Program V_TOTAL */
772 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 827 adjust_vmin_vmax(core_freesync, streams,
773 num_streams, vmin, vmax); 828 num_streams, index,
829 vmin, vmax);
774 } 830 }
775 831
776 if (state->btr.frame_counter > 0) 832 if (state->btr.frame_counter > 0)
@@ -804,9 +860,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
804 update_stream_freesync_context(core_freesync, streams[0]); 860 update_stream_freesync_context(core_freesync, streams[0]);
805 861
806 /* Program static screen ramp values */ 862 /* Program static screen ramp values */
807 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 863 adjust_vmin_vmax(core_freesync, streams,
808 num_streams, v_total, 864 num_streams, index,
809 v_total); 865 v_total,
866 v_total);
810 867
811 triggers.overlay_update = true; 868 triggers.overlay_update = true;
812 triggers.surface_update = true; 869 triggers.surface_update = true;
@@ -1063,9 +1120,9 @@ bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
1063 max_refresh); 1120 max_refresh);
1064 1121
1065 /* Program vtotal min/max */ 1122 /* Program vtotal min/max */
1066 dc_stream_adjust_vmin_vmax(core_freesync->dc, &streams, 1, 1123 adjust_vmin_vmax(core_freesync, &streams, 1, index,
1067 state->freesync_range.vmin, 1124 state->freesync_range.vmin,
1068 state->freesync_range.vmax); 1125 state->freesync_range.vmax);
1069 } 1126 }
1070 1127
1071 if (min_refresh != 0 && 1128 if (min_refresh != 0 &&
@@ -1399,11 +1456,9 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
1399 } else { 1456 } else {
1400 1457
1401 vmin = state->freesync_range.vmin; 1458 vmin = state->freesync_range.vmin;
1402
1403 vmax = vmin; 1459 vmax = vmin;
1404 1460 adjust_vmin_vmax(core_freesync, &stream, map_index,
1405 dc_stream_adjust_vmin_vmax(core_freesync->dc, &stream, 1461 1, vmin, vmax);
1406 1, vmin, vmax);
1407 } 1462 }
1408} 1463}
1409 1464
@@ -1457,3 +1512,43 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
1457 1512
1458 } 1513 }
1459} 1514}
1515
1516void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
1517 struct dc_stream_state **streams, int num_streams,
1518 unsigned int *v_total_min, unsigned int *v_total_max,
1519 unsigned int *event_triggers,
1520 unsigned int *window_min, unsigned int *window_max,
1521 unsigned int *lfc_mid_point_in_us,
1522 unsigned int *inserted_frames,
1523 unsigned int *inserted_duration_in_us)
1524{
1525 unsigned int stream_index, map_index;
1526 struct core_freesync *core_freesync = NULL;
1527
1528 if (mod_freesync == NULL)
1529 return;
1530
1531 core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
1532
1533 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1534
1535 map_index = map_index_from_stream(core_freesync,
1536 streams[stream_index]);
1537
1538 if (core_freesync->map[map_index].caps->supported) {
1539 struct freesync_state state =
1540 core_freesync->map[map_index].state;
1541 *v_total_min = state.vmin;
1542 *v_total_max = state.vmax;
1543 *event_triggers = 0;
1544 *window_min = state.time.min_window;
1545 *window_max = state.time.max_window;
1546 *lfc_mid_point_in_us = state.btr.mid_point_in_us;
1547 *inserted_frames = state.btr.frames_to_insert;
1548 *inserted_duration_in_us =
1549 state.btr.inserted_frame_duration_in_us;
1550 }
1551
1552 }
1553}
1554
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 84b53425f2c8..f083e1619dbe 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -164,4 +164,13 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
164 struct dc_stream_state **streams, int num_streams, 164 struct dc_stream_state **streams, int num_streams,
165 unsigned int curr_time_stamp); 165 unsigned int curr_time_stamp);
166 166
167void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
168 struct dc_stream_state **streams, int num_streams,
169 unsigned int *v_total_min, unsigned int *v_total_max,
170 unsigned int *event_triggers,
171 unsigned int *window_min, unsigned int *window_max,
172 unsigned int *lfc_mid_point_in_us,
173 unsigned int *inserted_frames,
174 unsigned int *inserted_duration_in_us);
175
167#endif 176#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
new file mode 100644
index 000000000000..3230e2adb870
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef MODULES_INC_MOD_STATS_H_
27#define MODULES_INC_MOD_STATS_H_
28
29#include "dm_services.h"
30
31struct mod_stats {
32 int dummy;
33};
34
35struct mod_stats_caps {
36 bool dummy;
37};
38
39struct mod_stats *mod_stats_create(struct dc *dc);
40
41void mod_stats_destroy(struct mod_stats *mod_stats);
42
43bool mod_stats_init(struct mod_stats *mod_stats);
44
45void mod_stats_dump(struct mod_stats *mod_stats);
46
47void mod_stats_reset_data(struct mod_stats *mod_stats);
48
49void mod_stats_update_flip(struct mod_stats *mod_stats,
50 unsigned long timestamp_in_ns);
51
52void mod_stats_update_vupdate(struct mod_stats *mod_stats,
53 unsigned long timestamp_in_ns);
54
55void mod_stats_update_freesync(struct mod_stats *mod_stats,
56 unsigned int v_total_min,
57 unsigned int v_total_max,
58 unsigned int event_triggers,
59 unsigned int window_min,
60 unsigned int window_max,
61 unsigned int lfc_mid_point_in_us,
62 unsigned int inserted_frames,
63 unsigned int inserted_frame_duration_in_us);
64
65#endif /* MODULES_INC_MOD_STATS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
new file mode 100644
index 000000000000..041f87b73d5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "mod_stats.h"
27#include "dm_services.h"
28#include "dc.h"
29#include "core_types.h"
30
31#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
32#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
33#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
34
35#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
36#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
37#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
38
39#define MOD_STATS_NUM_VSYNCS 5
40
41struct stats_time_cache {
42 unsigned long flip_timestamp_in_ns;
43 unsigned long vupdate_timestamp_in_ns;
44
45 unsigned int render_time_in_us;
46 unsigned int avg_render_time_in_us_last_ten;
47 unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
48 unsigned int num_vsync_between_flips;
49
50 unsigned int flip_to_vsync_time_in_us;
51 unsigned int vsync_to_flip_time_in_us;
52
53 unsigned int min_window;
54 unsigned int max_window;
55 unsigned int v_total_min;
56 unsigned int v_total_max;
57 unsigned int event_triggers;
58
59 unsigned int lfc_mid_point_in_us;
60 unsigned int num_frames_inserted;
61 unsigned int inserted_duration_in_us;
62
63 unsigned int flags;
64};
65
66struct core_stats {
67 struct mod_stats public;
68 struct dc *dc;
69
70 struct stats_time_cache *time;
71 unsigned int index;
72
73 bool enabled;
74 unsigned int entries;
75};
76
77#define MOD_STATS_TO_CORE(mod_stats)\
78 container_of(mod_stats, struct core_stats, public)
79
80bool mod_stats_init(struct mod_stats *mod_stats)
81{
82 bool result = false;
83 struct core_stats *core_stats = NULL;
84 struct dc *dc = NULL;
85
86 if (mod_stats == NULL)
87 return false;
88
89 core_stats = MOD_STATS_TO_CORE(mod_stats);
90 dc = core_stats->dc;
91
92 return result;
93}
94
95struct mod_stats *mod_stats_create(struct dc *dc)
96{
97 struct core_stats *core_stats = NULL;
98 struct persistent_data_flag flag;
99 unsigned int reg_data;
100 int i = 0;
101
102 core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
103
104 if (core_stats == NULL)
105 goto fail_alloc_context;
106
107 if (dc == NULL)
108 goto fail_construct;
109
110 core_stats->dc = dc;
111
112 core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
113 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
114 DAL_STATS_ENABLE_REGKEY,
115 &reg_data, sizeof(unsigned int), &flag))
116 core_stats->enabled = reg_data;
117
118 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
119 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
120 DAL_STATS_ENTRIES_REGKEY,
121 &reg_data, sizeof(unsigned int), &flag)) {
122 if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
123 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
124 else
125 core_stats->entries = reg_data;
126 }
127
128 core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
129 GFP_KERNEL);
130
131 if (core_stats->time == NULL)
132 goto fail_construct;
133
134 /* Purposely leave index 0 unused so we don't need special logic to
135 * handle calculation cases that depend on previous flip data.
136 */
137 core_stats->index = 1;
138
139 return &core_stats->public;
140
141fail_construct:
142 kfree(core_stats);
143
144fail_alloc_context:
145 return NULL;
146}
147
148void mod_stats_destroy(struct mod_stats *mod_stats)
149{
150 if (mod_stats != NULL) {
151 struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
152
153 if (core_stats->time != NULL)
154 kfree(core_stats->time);
155
156 kfree(core_stats);
157 }
158}
159
160void mod_stats_dump(struct mod_stats *mod_stats)
161{
162 struct dc *dc = NULL;
163 struct dal_logger *logger = NULL;
164 struct core_stats *core_stats = NULL;
165 struct stats_time_cache *time = NULL;
166 unsigned int index = 0;
167
168 if (mod_stats == NULL)
169 return;
170
171 core_stats = MOD_STATS_TO_CORE(mod_stats);
172 dc = core_stats->dc;
173 logger = dc->ctx->logger;
174 time = core_stats->time;
175
176 //LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
177
178 //if (!pLog->IsDummyEntry())
179 {
180 dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
181 dm_logger_write(logger, LOG_PROFILING, "\n");
182 dm_logger_write(logger, LOG_PROFILING, "\n");
183
184 dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
185 dm_logger_write(logger, LOG_PROFILING,
186 "render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
187
188 for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
189 dm_logger_write(logger, LOG_PROFILING,
190 "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
191 time[i].render_time_in_us,
192 time[i].avg_render_time_in_us_last_ten,
193 time[i].min_window,
194 time[i].lfc_mid_point_in_us,
195 time[i].max_window,
196 time[i].vsync_to_flip_time_in_us,
197 time[i].flip_to_vsync_time_in_us,
198 time[i].num_vsync_between_flips,
199 time[i].num_frames_inserted,
200 time[i].inserted_duration_in_us,
201 time[i].v_total_min,
202 time[i].v_total_max,
203 time[i].event_triggers,
204 time[i].v_sync_time_in_us[0],
205 time[i].v_sync_time_in_us[1],
206 time[i].v_sync_time_in_us[2],
207 time[i].v_sync_time_in_us[3],
208 time[i].v_sync_time_in_us[4],
209 time[i].flags);
210 }
211 }
212 //GetLog()->Close(pLog);
213 //GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
214}
215
216void mod_stats_reset_data(struct mod_stats *mod_stats)
217{
218 struct core_stats *core_stats = NULL;
219 struct stats_time_cache *time = NULL;
220 unsigned int index = 0;
221
222 if (mod_stats == NULL)
223 return;
224
225 core_stats = MOD_STATS_TO_CORE(mod_stats);
226
227 memset(core_stats->time, 0,
228 sizeof(struct stats_time_cache) * core_stats->entries);
229
230 core_stats->index = 0;
231}
232
233void mod_stats_update_flip(struct mod_stats *mod_stats,
234 unsigned long timestamp_in_ns)
235{
236 struct core_stats *core_stats = NULL;
237 struct stats_time_cache *time = NULL;
238 unsigned int index = 0;
239
240 if (mod_stats == NULL)
241 return;
242
243 core_stats = MOD_STATS_TO_CORE(mod_stats);
244
245 if (core_stats->index >= core_stats->entries)
246 return;
247
248 time = core_stats->time;
249 index = core_stats->index;
250
251 time[index].flip_timestamp_in_ns = timestamp_in_ns;
252 time[index].render_time_in_us =
253 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
254
255 if (index >= 10) {
256 for (unsigned int i = 0; i < 10; i++)
257 time[index].avg_render_time_in_us_last_ten +=
258 time[index - i].render_time_in_us;
259 time[index].avg_render_time_in_us_last_ten /= 10;
260 }
261
262 if (time[index].num_vsync_between_flips > 0)
263 time[index].vsync_to_flip_time_in_us =
264 timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
265 else
266 time[index].vsync_to_flip_time_in_us =
267 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
268
269 core_stats->index++;
270}
271
272void mod_stats_update_vupdate(struct mod_stats *mod_stats,
273 unsigned long timestamp_in_ns)
274{
275 struct core_stats *core_stats = NULL;
276 struct stats_time_cache *time = NULL;
277 unsigned int index = 0;
278
279 if (mod_stats == NULL)
280 return;
281
282 core_stats = MOD_STATS_TO_CORE(mod_stats);
283
284 if (core_stats->index >= core_stats->entries)
285 return;
286
287 time = core_stats->time;
288 index = core_stats->index;
289
290 time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
291 if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
292 time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
293 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
294 time[index].flip_to_vsync_time_in_us =
295 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
296
297 time[index].num_vsync_between_flips++;
298}
299
300void mod_stats_update_freesync(struct mod_stats *mod_stats,
301 unsigned int v_total_min,
302 unsigned int v_total_max,
303 unsigned int event_triggers,
304 unsigned int window_min,
305 unsigned int window_max,
306 unsigned int lfc_mid_point_in_us,
307 unsigned int inserted_frames,
308 unsigned int inserted_duration_in_us)
309{
310 struct core_stats *core_stats = NULL;
311 struct stats_time_cache *time = NULL;
312 unsigned int index = 0;
313
314 if (mod_stats == NULL)
315 return;
316
317 core_stats = MOD_STATS_TO_CORE(mod_stats);
318
319 if (core_stats->index >= core_stats->entries)
320 return;
321
322 time = core_stats->time;
323 index = core_stats->index;
324
325 time[index].v_total_min = v_total_min;
326 time[index].v_total_max = v_total_max;
327 time[index].event_triggers = event_triggers;
328 time[index].min_window = window_min;
329 time[index].max_window = window_max;
330 time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
331 time[index].num_frames_inserted = inserted_frames;
332 time[index].inserted_duration_in_us = inserted_duration_in_us;
333}
334