diff options
author | Aric Cyr <aric.cyr@amd.com> | 2016-12-29 15:27:12 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-09-26 17:09:40 -0400 |
commit | ab2541b67395088b9de8ebf3943ef9ef86bccc41 (patch) | |
tree | c4f25d2560d5f619d8a151f65c43a99f86592267 /drivers/gpu/drm/amd/display | |
parent | 624d7c4708b27be2dc095579394efadd80f090dd (diff) |
drm/amd/display: Remove dc_target object
dc_target does not fit well into DRM framework so removed it.
This will prevent the driver from leveraging the pipe-split
code for tiled displays, so will have to be handled at a higher
level. Most places that used dc_target now directly use dc_stream
instead.
Signed-off-by: Aric Cyr <aric.cyr@amd.com>
Acked-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display')
19 files changed, 974 insertions, 1334 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2af4ac0bffcb..214cd38b8135 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -68,12 +68,12 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) | |||
68 | else { | 68 | else { |
69 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; | 69 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; |
70 | 70 | ||
71 | if (NULL == acrtc->target) { | 71 | if (NULL == acrtc->stream) { |
72 | DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc); | 72 | DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc); |
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | return dc_target_get_vblank_counter(acrtc->target); | 76 | return dc_stream_get_vblank_counter(acrtc->stream); |
77 | } | 77 | } |
78 | } | 78 | } |
79 | 79 | ||
@@ -85,12 +85,12 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | |||
85 | else { | 85 | else { |
86 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; | 86 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; |
87 | 87 | ||
88 | if (NULL == acrtc->target) { | 88 | if (NULL == acrtc->stream) { |
89 | DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc); | 89 | DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc); |
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
92 | 92 | ||
93 | return dc_target_get_scanoutpos(acrtc->target, vbl, position); | 93 | return dc_stream_get_scanoutpos(acrtc->stream, vbl, position); |
94 | } | 94 | } |
95 | 95 | ||
96 | return 0; | 96 | return 0; |
@@ -461,7 +461,7 @@ static int dm_suspend(void *handle) | |||
461 | drm_modeset_lock_all(adev->ddev); | 461 | drm_modeset_lock_all(adev->ddev); |
462 | list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) { | 462 | list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) { |
463 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 463 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
464 | if (acrtc->target) | 464 | if (acrtc->stream) |
465 | drm_crtc_vblank_off(crtc); | 465 | drm_crtc_vblank_off(crtc); |
466 | } | 466 | } |
467 | drm_modeset_unlock_all(adev->ddev); | 467 | drm_modeset_unlock_all(adev->ddev); |
@@ -655,7 +655,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev ) | |||
655 | drm_modeset_lock_all(ddev); | 655 | drm_modeset_lock_all(ddev); |
656 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | 656 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { |
657 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 657 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
658 | if (acrtc->target) | 658 | if (acrtc->stream) |
659 | drm_crtc_vblank_on(crtc); | 659 | drm_crtc_vblank_on(crtc); |
660 | } | 660 | } |
661 | drm_modeset_unlock_all(ddev); | 661 | drm_modeset_unlock_all(ddev); |
@@ -740,7 +740,7 @@ void amdgpu_dm_update_connector_after_detect( | |||
740 | if (aconnector->base.force != DRM_FORCE_UNSPECIFIED | 740 | if (aconnector->base.force != DRM_FORCE_UNSPECIFIED |
741 | && aconnector->dc_em_sink) { | 741 | && aconnector->dc_em_sink) { |
742 | 742 | ||
743 | /* For S3 resume with headless use eml_sink to fake target | 743 | /* For S3 resume with headless use eml_sink to fake stream |
744 | * because on resume connecotr->sink is set ti NULL | 744 | * because on resume connecotr->sink is set ti NULL |
745 | */ | 745 | */ |
746 | mutex_lock(&dev->mode_config.mutex); | 746 | mutex_lock(&dev->mode_config.mutex); |
@@ -1184,7 +1184,7 @@ int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
1184 | return -1; | 1184 | return -1; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | for (i = 0; i < dm->dc->caps.max_targets; i++) { | 1187 | for (i = 0; i < dm->dc->caps.max_streams; i++) { |
1188 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); | 1188 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); |
1189 | if (!acrtc) | 1189 | if (!acrtc) |
1190 | goto fail; | 1190 | goto fail; |
@@ -1199,7 +1199,7 @@ int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
1199 | } | 1199 | } |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | dm->display_indexes_num = dm->dc->caps.max_targets; | 1202 | dm->display_indexes_num = dm->dc->caps.max_streams; |
1203 | 1203 | ||
1204 | /* loops over all connectors on the board */ | 1204 | /* loops over all connectors on the board */ |
1205 | for (i = 0; i < link_cnt; i++) { | 1205 | for (i = 0; i < link_cnt; i++) { |
@@ -1318,7 +1318,7 @@ static void dm_page_flip(struct amdgpu_device *adev, | |||
1318 | int crtc_id, u64 crtc_base, bool async) | 1318 | int crtc_id, u64 crtc_base, bool async) |
1319 | { | 1319 | { |
1320 | struct amdgpu_crtc *acrtc; | 1320 | struct amdgpu_crtc *acrtc; |
1321 | struct dc_target *target; | 1321 | const struct dc_stream *stream; |
1322 | struct dc_flip_addrs addr = { {0} }; | 1322 | struct dc_flip_addrs addr = { {0} }; |
1323 | 1323 | ||
1324 | /* | 1324 | /* |
@@ -1336,7 +1336,7 @@ static void dm_page_flip(struct amdgpu_device *adev, | |||
1336 | * a little longer to lock up all cores. | 1336 | * a little longer to lock up all cores. |
1337 | * | 1337 | * |
1338 | * The reason we should lock on dal_mutex is so that we can be sure | 1338 | * The reason we should lock on dal_mutex is so that we can be sure |
1339 | * nobody messes with acrtc->target after we read and check its value. | 1339 | * nobody messes with acrtc->stream after we read and check its value. |
1340 | * | 1340 | * |
1341 | * We might be able to fix our concurrency issues with a work queue | 1341 | * We might be able to fix our concurrency issues with a work queue |
1342 | * where we schedule all work items (mode_set, page_flip, etc.) and | 1342 | * where we schedule all work items (mode_set, page_flip, etc.) and |
@@ -1345,14 +1345,14 @@ static void dm_page_flip(struct amdgpu_device *adev, | |||
1345 | */ | 1345 | */ |
1346 | 1346 | ||
1347 | acrtc = adev->mode_info.crtcs[crtc_id]; | 1347 | acrtc = adev->mode_info.crtcs[crtc_id]; |
1348 | target = acrtc->target; | 1348 | stream = acrtc->stream; |
1349 | 1349 | ||
1350 | /* | 1350 | /* |
1351 | * Received a page flip call after the display has been reset. | 1351 | * Received a page flip call after the display has been reset. |
1352 | * Just return in this case. Everything should be clean-up on reset. | 1352 | * Just return in this case. Everything should be clean-up on reset. |
1353 | */ | 1353 | */ |
1354 | 1354 | ||
1355 | if (!target) { | 1355 | if (!stream) { |
1356 | WARN_ON(1); | 1356 | WARN_ON(1); |
1357 | return; | 1357 | return; |
1358 | } | 1358 | } |
@@ -1368,7 +1368,7 @@ static void dm_page_flip(struct amdgpu_device *adev, | |||
1368 | 1368 | ||
1369 | dc_flip_surface_addrs( | 1369 | dc_flip_surface_addrs( |
1370 | adev->dm.dc, | 1370 | adev->dm.dc, |
1371 | dc_target_get_status(target)->surfaces, | 1371 | dc_stream_get_status(stream)->surfaces, |
1372 | &addr, 1); | 1372 | &addr, 1); |
1373 | } | 1373 | } |
1374 | 1374 | ||
@@ -1376,25 +1376,22 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data, | |||
1376 | struct drm_file *filp) | 1376 | struct drm_file *filp) |
1377 | { | 1377 | { |
1378 | struct mod_freesync_params freesync_params; | 1378 | struct mod_freesync_params freesync_params; |
1379 | uint8_t num_targets; | 1379 | uint8_t num_streams; |
1380 | uint8_t i; | 1380 | uint8_t i; |
1381 | struct dc_target *target; | ||
1382 | 1381 | ||
1383 | struct amdgpu_device *adev = dev->dev_private; | 1382 | struct amdgpu_device *adev = dev->dev_private; |
1384 | int r = 0; | 1383 | int r = 0; |
1385 | 1384 | ||
1386 | /* Get freesync enable flag from DRM */ | 1385 | /* Get freesync enable flag from DRM */ |
1387 | 1386 | ||
1388 | num_targets = dc_get_current_target_count(adev->dm.dc); | 1387 | num_streams = dc_get_current_stream_count(adev->dm.dc); |
1389 | 1388 | ||
1390 | for (i = 0; i < num_targets; i++) { | 1389 | for (i = 0; i < num_streams; i++) { |
1391 | 1390 | const struct dc_stream *stream; | |
1392 | target = dc_get_target_at_index(adev->dm.dc, i); | 1391 | stream = dc_get_stream_at_index(adev->dm.dc, i); |
1393 | 1392 | ||
1394 | mod_freesync_update_state(adev->dm.freesync_module, | 1393 | mod_freesync_update_state(adev->dm.freesync_module, |
1395 | target->streams, | 1394 | &stream, 1, &freesync_params); |
1396 | target->stream_count, | ||
1397 | &freesync_params); | ||
1398 | } | 1395 | } |
1399 | 1396 | ||
1400 | return r; | 1397 | return r; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c index e1b5f7d7b6da..c32fc6d26088 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c | |||
@@ -120,14 +120,14 @@ static void dm_set_cursor( | |||
120 | position.x_hotspot = xorigin; | 120 | position.x_hotspot = xorigin; |
121 | position.y_hotspot = yorigin; | 121 | position.y_hotspot = yorigin; |
122 | 122 | ||
123 | if (!dc_target_set_cursor_attributes( | 123 | if (!dc_stream_set_cursor_attributes( |
124 | amdgpu_crtc->target, | 124 | amdgpu_crtc->stream, |
125 | &attributes)) { | 125 | &attributes)) { |
126 | DRM_ERROR("DC failed to set cursor attributes\n"); | 126 | DRM_ERROR("DC failed to set cursor attributes\n"); |
127 | } | 127 | } |
128 | 128 | ||
129 | if (!dc_target_set_cursor_position( | 129 | if (!dc_stream_set_cursor_position( |
130 | amdgpu_crtc->target, | 130 | amdgpu_crtc->stream, |
131 | &position)) { | 131 | &position)) { |
132 | DRM_ERROR("DC failed to set cursor position\n"); | 132 | DRM_ERROR("DC failed to set cursor position\n"); |
133 | } | 133 | } |
@@ -260,10 +260,10 @@ static int dm_crtc_cursor_set( | |||
260 | position.y = 0; | 260 | position.y = 0; |
261 | position.hot_spot_enable = false; | 261 | position.hot_spot_enable = false; |
262 | 262 | ||
263 | if (amdgpu_crtc->target) { | 263 | if (amdgpu_crtc->stream) { |
264 | /*set cursor visible false*/ | 264 | /*set cursor visible false*/ |
265 | dc_target_set_cursor_position( | 265 | dc_stream_set_cursor_position( |
266 | amdgpu_crtc->target, | 266 | amdgpu_crtc->stream, |
267 | &position); | 267 | &position); |
268 | } | 268 | } |
269 | /*unpin old cursor buffer and update cache*/ | 269 | /*unpin old cursor buffer and update cache*/ |
@@ -346,9 +346,9 @@ static int dm_crtc_cursor_move(struct drm_crtc *crtc, | |||
346 | position.x_hotspot = xorigin; | 346 | position.x_hotspot = xorigin; |
347 | position.y_hotspot = yorigin; | 347 | position.y_hotspot = yorigin; |
348 | 348 | ||
349 | if (amdgpu_crtc->target) { | 349 | if (amdgpu_crtc->stream) { |
350 | if (!dc_target_set_cursor_position( | 350 | if (!dc_stream_set_cursor_position( |
351 | amdgpu_crtc->target, | 351 | amdgpu_crtc->stream, |
352 | &position)) { | 352 | &position)) { |
353 | DRM_ERROR("DC failed to set cursor position\n"); | 353 | DRM_ERROR("DC failed to set cursor position\n"); |
354 | return -EINVAL; | 354 | return -EINVAL; |
@@ -367,7 +367,7 @@ static void dm_crtc_cursor_reset(struct drm_crtc *crtc) | |||
367 | __func__, | 367 | __func__, |
368 | amdgpu_crtc->cursor_bo); | 368 | amdgpu_crtc->cursor_bo); |
369 | 369 | ||
370 | if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) { | 370 | if (amdgpu_crtc->cursor_bo && amdgpu_crtc->stream) { |
371 | dm_set_cursor( | 371 | dm_set_cursor( |
372 | amdgpu_crtc, | 372 | amdgpu_crtc, |
373 | amdgpu_crtc->cursor_addr, | 373 | amdgpu_crtc->cursor_addr, |
@@ -635,7 +635,7 @@ static void update_stream_scaling_settings( | |||
635 | struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private; | 635 | struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private; |
636 | enum amdgpu_rmx_type rmx_type; | 636 | enum amdgpu_rmx_type rmx_type; |
637 | 637 | ||
638 | struct rect src = { 0 }; /* viewport in target space*/ | 638 | struct rect src = { 0 }; /* viewport in composition space*/ |
639 | struct rect dst = { 0 }; /* stream addressable area */ | 639 | struct rect dst = { 0 }; /* stream addressable area */ |
640 | 640 | ||
641 | /* Full screen scaling by default */ | 641 | /* Full screen scaling by default */ |
@@ -684,11 +684,11 @@ static void dm_dc_surface_commit( | |||
684 | struct dc_surface *dc_surface; | 684 | struct dc_surface *dc_surface; |
685 | const struct dc_surface *dc_surfaces[1]; | 685 | const struct dc_surface *dc_surfaces[1]; |
686 | const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 686 | const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
687 | struct dc_target *dc_target = acrtc->target; | 687 | const struct dc_stream *dc_stream = acrtc->stream; |
688 | 688 | ||
689 | if (!dc_target) { | 689 | if (!dc_stream) { |
690 | dm_error( | 690 | dm_error( |
691 | "%s: Failed to obtain target on crtc (%d)!\n", | 691 | "%s: Failed to obtain stream on crtc (%d)!\n", |
692 | __func__, | 692 | __func__, |
693 | acrtc->crtc_id); | 693 | acrtc->crtc_id); |
694 | goto fail; | 694 | goto fail; |
@@ -712,11 +712,11 @@ static void dm_dc_surface_commit( | |||
712 | 712 | ||
713 | dc_surfaces[0] = dc_surface; | 713 | dc_surfaces[0] = dc_surface; |
714 | 714 | ||
715 | if (false == dc_commit_surfaces_to_target( | 715 | if (false == dc_commit_surfaces_to_stream( |
716 | dc, | 716 | dc, |
717 | dc_surfaces, | 717 | dc_surfaces, |
718 | 1, | 718 | 1, |
719 | dc_target)) { | 719 | dc_stream)) { |
720 | dm_error( | 720 | dm_error( |
721 | "%s: Failed to attach surface!\n", | 721 | "%s: Failed to attach surface!\n", |
722 | __func__); | 722 | __func__); |
@@ -957,15 +957,14 @@ static void decide_crtc_timing_for_drm_display_mode( | |||
957 | } | 957 | } |
958 | } | 958 | } |
959 | 959 | ||
960 | static struct dc_target *create_target_for_sink( | 960 | static struct dc_stream *create_stream_for_sink( |
961 | const struct amdgpu_connector *aconnector, | 961 | const struct amdgpu_connector *aconnector, |
962 | const struct drm_display_mode *drm_mode, | 962 | const struct drm_display_mode *drm_mode, |
963 | const struct dm_connector_state *dm_state) | 963 | const struct dm_connector_state *dm_state) |
964 | { | 964 | { |
965 | struct drm_display_mode *preferred_mode = NULL; | 965 | struct drm_display_mode *preferred_mode = NULL; |
966 | const struct drm_connector *drm_connector; | 966 | const struct drm_connector *drm_connector; |
967 | struct dc_target *target = NULL; | 967 | struct dc_stream *stream = NULL; |
968 | struct dc_stream *stream; | ||
969 | struct drm_display_mode mode = *drm_mode; | 968 | struct drm_display_mode mode = *drm_mode; |
970 | bool native_mode_found = false; | 969 | bool native_mode_found = false; |
971 | 970 | ||
@@ -1022,19 +1021,10 @@ static struct dc_target *create_target_for_sink( | |||
1022 | drm_connector, | 1021 | drm_connector, |
1023 | aconnector->dc_sink); | 1022 | aconnector->dc_sink); |
1024 | 1023 | ||
1025 | target = dc_create_target_for_streams(&stream, 1); | 1024 | stream_create_fail: |
1026 | dc_stream_release(stream); | ||
1027 | |||
1028 | if (NULL == target) { | ||
1029 | DRM_ERROR("Failed to create target with streams!\n"); | ||
1030 | goto target_create_fail; | ||
1031 | } | ||
1032 | |||
1033 | dm_state_null: | 1025 | dm_state_null: |
1034 | drm_connector_null: | 1026 | drm_connector_null: |
1035 | target_create_fail: | 1027 | return stream; |
1036 | stream_create_fail: | ||
1037 | return target; | ||
1038 | } | 1028 | } |
1039 | 1029 | ||
1040 | void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) | 1030 | void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) |
@@ -1316,8 +1306,7 @@ int amdgpu_dm_connector_mode_valid( | |||
1316 | struct amdgpu_device *adev = connector->dev->dev_private; | 1306 | struct amdgpu_device *adev = connector->dev->dev_private; |
1317 | struct dc_validation_set val_set = { 0 }; | 1307 | struct dc_validation_set val_set = { 0 }; |
1318 | /* TODO: Unhardcode stream count */ | 1308 | /* TODO: Unhardcode stream count */ |
1319 | struct dc_stream *streams[1]; | 1309 | struct dc_stream *stream; |
1320 | struct dc_target *target; | ||
1321 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | 1310 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); |
1322 | 1311 | ||
1323 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | 1312 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || |
@@ -1335,39 +1324,31 @@ int amdgpu_dm_connector_mode_valid( | |||
1335 | 1324 | ||
1336 | if (NULL == dc_sink) { | 1325 | if (NULL == dc_sink) { |
1337 | DRM_ERROR("dc_sink is NULL!\n"); | 1326 | DRM_ERROR("dc_sink is NULL!\n"); |
1338 | goto stream_create_fail; | 1327 | goto null_sink; |
1339 | } | 1328 | } |
1340 | 1329 | ||
1341 | streams[0] = dc_create_stream_for_sink(dc_sink); | 1330 | stream = dc_create_stream_for_sink(dc_sink); |
1342 | 1331 | if (NULL == stream) { | |
1343 | if (NULL == streams[0]) { | ||
1344 | DRM_ERROR("Failed to create stream for sink!\n"); | 1332 | DRM_ERROR("Failed to create stream for sink!\n"); |
1345 | goto stream_create_fail; | 1333 | goto stream_create_fail; |
1346 | } | 1334 | } |
1347 | 1335 | ||
1348 | drm_mode_set_crtcinfo(mode, 0); | 1336 | drm_mode_set_crtcinfo(mode, 0); |
1349 | fill_stream_properties_from_drm_display_mode(streams[0], mode, connector); | 1337 | fill_stream_properties_from_drm_display_mode(stream, mode, connector); |
1350 | |||
1351 | target = dc_create_target_for_streams(streams, 1); | ||
1352 | val_set.target = target; | ||
1353 | |||
1354 | if (NULL == val_set.target) { | ||
1355 | DRM_ERROR("Failed to create target with stream!\n"); | ||
1356 | goto target_create_fail; | ||
1357 | } | ||
1358 | 1338 | ||
1339 | val_set.stream = stream; | ||
1359 | val_set.surface_count = 0; | 1340 | val_set.surface_count = 0; |
1360 | streams[0]->src.width = mode->hdisplay; | 1341 | stream->src.width = mode->hdisplay; |
1361 | streams[0]->src.height = mode->vdisplay; | 1342 | stream->src.height = mode->vdisplay; |
1362 | streams[0]->dst = streams[0]->src; | 1343 | stream->dst = stream->src; |
1363 | 1344 | ||
1364 | if (dc_validate_resources(adev->dm.dc, &val_set, 1)) | 1345 | if (dc_validate_resources(adev->dm.dc, &val_set, 1)) |
1365 | result = MODE_OK; | 1346 | result = MODE_OK; |
1366 | 1347 | ||
1367 | dc_target_release(target); | 1348 | dc_stream_release(stream); |
1368 | target_create_fail: | 1349 | |
1369 | dc_stream_release(streams[0]); | ||
1370 | stream_create_fail: | 1350 | stream_create_fail: |
1351 | null_sink: | ||
1371 | /* TODO: error handling*/ | 1352 | /* TODO: error handling*/ |
1372 | return result; | 1353 | return result; |
1373 | } | 1354 | } |
@@ -1562,15 +1543,14 @@ static void dm_plane_helper_cleanup_fb( | |||
1562 | } | 1543 | } |
1563 | } | 1544 | } |
1564 | 1545 | ||
1565 | int dm_create_validation_set_for_target(struct drm_connector *connector, | 1546 | int dm_create_validation_set_for_connector(struct drm_connector *connector, |
1566 | struct drm_display_mode *mode, struct dc_validation_set *val_set) | 1547 | struct drm_display_mode *mode, struct dc_validation_set *val_set) |
1567 | { | 1548 | { |
1568 | int result = MODE_ERROR; | 1549 | int result = MODE_ERROR; |
1569 | const struct dc_sink *dc_sink = | 1550 | const struct dc_sink *dc_sink = |
1570 | to_amdgpu_connector(connector)->dc_sink; | 1551 | to_amdgpu_connector(connector)->dc_sink; |
1571 | /* TODO: Unhardcode stream count */ | 1552 | /* TODO: Unhardcode stream count */ |
1572 | struct dc_stream *streams[1]; | 1553 | struct dc_stream *stream; |
1573 | struct dc_target *target; | ||
1574 | 1554 | ||
1575 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | 1555 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || |
1576 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) | 1556 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) |
@@ -1581,35 +1561,24 @@ int dm_create_validation_set_for_target(struct drm_connector *connector, | |||
1581 | return result; | 1561 | return result; |
1582 | } | 1562 | } |
1583 | 1563 | ||
1584 | streams[0] = dc_create_stream_for_sink(dc_sink); | 1564 | stream = dc_create_stream_for_sink(dc_sink); |
1585 | 1565 | ||
1586 | if (NULL == streams[0]) { | 1566 | if (NULL == stream) { |
1587 | DRM_ERROR("Failed to create stream for sink!\n"); | 1567 | DRM_ERROR("Failed to create stream for sink!\n"); |
1588 | return result; | 1568 | return result; |
1589 | } | 1569 | } |
1590 | 1570 | ||
1591 | drm_mode_set_crtcinfo(mode, 0); | 1571 | drm_mode_set_crtcinfo(mode, 0); |
1592 | 1572 | ||
1593 | fill_stream_properties_from_drm_display_mode(streams[0], mode, connector); | 1573 | fill_stream_properties_from_drm_display_mode(stream, mode, connector); |
1594 | 1574 | ||
1595 | target = dc_create_target_for_streams(streams, 1); | 1575 | val_set->stream = stream; |
1596 | val_set->target = target; | ||
1597 | 1576 | ||
1598 | if (NULL == val_set->target) { | 1577 | stream->src.width = mode->hdisplay; |
1599 | DRM_ERROR("Failed to create target with stream!\n"); | 1578 | stream->src.height = mode->vdisplay; |
1600 | goto fail; | 1579 | stream->dst = stream->src; |
1601 | } | ||
1602 | |||
1603 | streams[0]->src.width = mode->hdisplay; | ||
1604 | streams[0]->src.height = mode->vdisplay; | ||
1605 | streams[0]->dst = streams[0]->src; | ||
1606 | 1580 | ||
1607 | return MODE_OK; | 1581 | return MODE_OK; |
1608 | |||
1609 | fail: | ||
1610 | dc_stream_release(streams[0]); | ||
1611 | return result; | ||
1612 | |||
1613 | } | 1582 | } |
1614 | 1583 | ||
1615 | static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { | 1584 | static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { |
@@ -2262,23 +2231,21 @@ static bool is_scaling_state_different( | |||
2262 | return false; | 2231 | return false; |
2263 | } | 2232 | } |
2264 | 2233 | ||
2265 | static void remove_target(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc) | 2234 | static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc) |
2266 | { | 2235 | { |
2267 | int i; | ||
2268 | |||
2269 | /* | 2236 | /* |
2270 | * we evade vblanks and pflips on crtc that | 2237 | * we evade vblanks and pflips on crtc that |
2271 | * should be changed | 2238 | * should be changed |
2272 | */ | 2239 | */ |
2273 | manage_dm_interrupts(adev, acrtc, false); | 2240 | manage_dm_interrupts(adev, acrtc, false); |
2241 | |||
2274 | /* this is the update mode case */ | 2242 | /* this is the update mode case */ |
2275 | if (adev->dm.freesync_module) | 2243 | if (adev->dm.freesync_module) |
2276 | for (i = 0; i < acrtc->target->stream_count; i++) | 2244 | mod_freesync_remove_stream(adev->dm.freesync_module, |
2277 | mod_freesync_remove_stream( | 2245 | acrtc->stream); |
2278 | adev->dm.freesync_module, | 2246 | |
2279 | acrtc->target->streams[i]); | 2247 | dc_stream_release(acrtc->stream); |
2280 | dc_target_release(acrtc->target); | 2248 | acrtc->stream = NULL; |
2281 | acrtc->target = NULL; | ||
2282 | acrtc->otg_inst = -1; | 2249 | acrtc->otg_inst = -1; |
2283 | acrtc->enabled = false; | 2250 | acrtc->enabled = false; |
2284 | } | 2251 | } |
@@ -2293,20 +2260,20 @@ int amdgpu_dm_atomic_commit( | |||
2293 | struct drm_plane *plane; | 2260 | struct drm_plane *plane; |
2294 | struct drm_plane_state *new_plane_state; | 2261 | struct drm_plane_state *new_plane_state; |
2295 | struct drm_plane_state *old_plane_state; | 2262 | struct drm_plane_state *old_plane_state; |
2296 | uint32_t i, j; | 2263 | uint32_t i; |
2297 | int32_t ret = 0; | 2264 | int32_t ret = 0; |
2298 | uint32_t commit_targets_count = 0; | 2265 | uint32_t commit_streams_count = 0; |
2299 | uint32_t new_crtcs_count = 0; | 2266 | uint32_t new_crtcs_count = 0; |
2300 | uint32_t flip_crtcs_count = 0; | 2267 | uint32_t flip_crtcs_count = 0; |
2301 | struct drm_crtc *crtc; | 2268 | struct drm_crtc *crtc; |
2302 | struct drm_crtc_state *old_crtc_state; | 2269 | struct drm_crtc_state *old_crtc_state; |
2303 | 2270 | ||
2304 | struct dc_target *commit_targets[MAX_TARGETS]; | 2271 | const struct dc_stream *commit_streams[MAX_STREAMS]; |
2305 | struct amdgpu_crtc *new_crtcs[MAX_TARGETS]; | 2272 | struct amdgpu_crtc *new_crtcs[MAX_STREAMS]; |
2306 | struct dc_target *new_target; | 2273 | const struct dc_stream *new_stream; |
2307 | struct drm_crtc *flip_crtcs[MAX_TARGETS]; | 2274 | struct drm_crtc *flip_crtcs[MAX_STREAMS]; |
2308 | struct amdgpu_flip_work *work[MAX_TARGETS] = {0}; | 2275 | struct amdgpu_flip_work *work[MAX_STREAMS] = {0}; |
2309 | struct amdgpu_bo *new_abo[MAX_TARGETS] = {0}; | 2276 | struct amdgpu_bo *new_abo[MAX_STREAMS] = {0}; |
2310 | 2277 | ||
2311 | /* In this step all new fb would be pinned */ | 2278 | /* In this step all new fb would be pinned */ |
2312 | 2279 | ||
@@ -2422,19 +2389,19 @@ int amdgpu_dm_atomic_commit( | |||
2422 | case DM_COMMIT_ACTION_DPMS_ON: | 2389 | case DM_COMMIT_ACTION_DPMS_ON: |
2423 | case DM_COMMIT_ACTION_SET: { | 2390 | case DM_COMMIT_ACTION_SET: { |
2424 | struct dm_connector_state *dm_state = NULL; | 2391 | struct dm_connector_state *dm_state = NULL; |
2425 | new_target = NULL; | 2392 | new_stream = NULL; |
2426 | 2393 | ||
2427 | if (aconnector) | 2394 | if (aconnector) |
2428 | dm_state = to_dm_connector_state(aconnector->base.state); | 2395 | dm_state = to_dm_connector_state(aconnector->base.state); |
2429 | 2396 | ||
2430 | new_target = create_target_for_sink( | 2397 | new_stream = create_stream_for_sink( |
2431 | aconnector, | 2398 | aconnector, |
2432 | &crtc->state->mode, | 2399 | &crtc->state->mode, |
2433 | dm_state); | 2400 | dm_state); |
2434 | 2401 | ||
2435 | DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); | 2402 | DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); |
2436 | 2403 | ||
2437 | if (!new_target) { | 2404 | if (!new_stream) { |
2438 | /* | 2405 | /* |
2439 | * this could happen because of issues with | 2406 | * this could happen because of issues with |
2440 | * userspace notifications delivery. | 2407 | * userspace notifications delivery. |
@@ -2450,23 +2417,23 @@ int amdgpu_dm_atomic_commit( | |||
2450 | * have a sink to keep the pipe running so that | 2417 | * have a sink to keep the pipe running so that |
2451 | * hw state is consistent with the sw state | 2418 | * hw state is consistent with the sw state |
2452 | */ | 2419 | */ |
2453 | DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n", | 2420 | DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n", |
2454 | __func__, acrtc->base.base.id); | 2421 | __func__, acrtc->base.base.id); |
2455 | break; | 2422 | break; |
2456 | } | 2423 | } |
2457 | 2424 | ||
2458 | if (acrtc->target) | 2425 | if (acrtc->stream) |
2459 | remove_target(adev, acrtc); | 2426 | remove_stream(adev, acrtc); |
2460 | 2427 | ||
2461 | /* | 2428 | /* |
2462 | * this loop saves set mode crtcs | 2429 | * this loop saves set mode crtcs |
2463 | * we needed to enable vblanks once all | 2430 | * we needed to enable vblanks once all |
2464 | * resources acquired in dc after dc_commit_targets | 2431 | * resources acquired in dc after dc_commit_streams |
2465 | */ | 2432 | */ |
2466 | new_crtcs[new_crtcs_count] = acrtc; | 2433 | new_crtcs[new_crtcs_count] = acrtc; |
2467 | new_crtcs_count++; | 2434 | new_crtcs_count++; |
2468 | 2435 | ||
2469 | acrtc->target = new_target; | 2436 | acrtc->stream = new_stream; |
2470 | acrtc->enabled = true; | 2437 | acrtc->enabled = true; |
2471 | acrtc->hw_mode = crtc->state->mode; | 2438 | acrtc->hw_mode = crtc->state->mode; |
2472 | crtc->hwmode = crtc->state->mode; | 2439 | crtc->hwmode = crtc->state->mode; |
@@ -2483,10 +2450,8 @@ int amdgpu_dm_atomic_commit( | |||
2483 | dm_state = to_dm_connector_state(aconnector->base.state); | 2450 | dm_state = to_dm_connector_state(aconnector->base.state); |
2484 | 2451 | ||
2485 | /* Scaling update */ | 2452 | /* Scaling update */ |
2486 | update_stream_scaling_settings( | 2453 | update_stream_scaling_settings(&crtc->state->mode, |
2487 | &crtc->state->mode, | 2454 | dm_state, acrtc->stream); |
2488 | dm_state, | ||
2489 | acrtc->target->streams[0]); | ||
2490 | 2455 | ||
2491 | break; | 2456 | break; |
2492 | } | 2457 | } |
@@ -2494,8 +2459,8 @@ int amdgpu_dm_atomic_commit( | |||
2494 | case DM_COMMIT_ACTION_RESET: | 2459 | case DM_COMMIT_ACTION_RESET: |
2495 | DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); | 2460 | DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); |
2496 | /* i.e. reset mode */ | 2461 | /* i.e. reset mode */ |
2497 | if (acrtc->target) | 2462 | if (acrtc->stream) |
2498 | remove_target(adev, acrtc); | 2463 | remove_stream(adev, acrtc); |
2499 | break; | 2464 | break; |
2500 | } /* switch() */ | 2465 | } /* switch() */ |
2501 | } /* for_each_crtc_in_state() */ | 2466 | } /* for_each_crtc_in_state() */ |
@@ -2504,20 +2469,20 @@ int amdgpu_dm_atomic_commit( | |||
2504 | 2469 | ||
2505 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 2470 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
2506 | 2471 | ||
2507 | if (acrtc->target) { | 2472 | if (acrtc->stream) { |
2508 | commit_targets[commit_targets_count] = acrtc->target; | 2473 | commit_streams[commit_streams_count] = acrtc->stream; |
2509 | ++commit_targets_count; | 2474 | ++commit_streams_count; |
2510 | } | 2475 | } |
2511 | } | 2476 | } |
2512 | 2477 | ||
2513 | /* | 2478 | /* |
2514 | * Add streams after required streams from new and replaced targets | 2479 | * Add streams after required streams from new and replaced streams |
2515 | * are removed from freesync module | 2480 | * are removed from freesync module |
2516 | */ | 2481 | */ |
2517 | if (adev->dm.freesync_module) { | 2482 | if (adev->dm.freesync_module) { |
2518 | for (i = 0; i < new_crtcs_count; i++) { | 2483 | for (i = 0; i < new_crtcs_count; i++) { |
2519 | struct amdgpu_connector *aconnector = NULL; | 2484 | struct amdgpu_connector *aconnector = NULL; |
2520 | new_target = new_crtcs[i]->target; | 2485 | new_stream = new_crtcs[i]->stream; |
2521 | aconnector = | 2486 | aconnector = |
2522 | amdgpu_dm_find_first_crct_matching_connector( | 2487 | amdgpu_dm_find_first_crct_matching_connector( |
2523 | state, | 2488 | state, |
@@ -2531,22 +2496,20 @@ int amdgpu_dm_atomic_commit( | |||
2531 | continue; | 2496 | continue; |
2532 | } | 2497 | } |
2533 | 2498 | ||
2534 | for (j = 0; j < new_target->stream_count; j++) | 2499 | mod_freesync_add_stream(adev->dm.freesync_module, |
2535 | mod_freesync_add_stream( | 2500 | new_stream, &aconnector->caps); |
2536 | adev->dm.freesync_module, | ||
2537 | new_target->streams[j], &aconnector->caps); | ||
2538 | } | 2501 | } |
2539 | } | 2502 | } |
2540 | 2503 | ||
2541 | /* DC is optimized not to do anything if 'targets' didn't change. */ | 2504 | /* DC is optimized not to do anything if 'streams' didn't change. */ |
2542 | dc_commit_targets(dm->dc, commit_targets, commit_targets_count); | 2505 | dc_commit_streams(dm->dc, commit_streams, commit_streams_count); |
2543 | 2506 | ||
2544 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2507 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2545 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 2508 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
2546 | 2509 | ||
2547 | if (acrtc->target != NULL) | 2510 | if (acrtc->stream != NULL) |
2548 | acrtc->otg_inst = | 2511 | acrtc->otg_inst = |
2549 | dc_target_get_status(acrtc->target)->primary_otg_inst; | 2512 | dc_stream_get_status(acrtc->stream)->primary_otg_inst; |
2550 | } | 2513 | } |
2551 | 2514 | ||
2552 | /* update planes when needed */ | 2515 | /* update planes when needed */ |
@@ -2566,7 +2529,7 @@ int amdgpu_dm_atomic_commit( | |||
2566 | 2529 | ||
2567 | /* Surfaces are created under two scenarios: | 2530 | /* Surfaces are created under two scenarios: |
2568 | * 1. This commit is not a page flip. | 2531 | * 1. This commit is not a page flip. |
2569 | * 2. This commit is a page flip, and targets are created. | 2532 | * 2. This commit is a page flip, and streams are created. |
2570 | */ | 2533 | */ |
2571 | if (!page_flip_needed( | 2534 | if (!page_flip_needed( |
2572 | plane_state, | 2535 | plane_state, |
@@ -2618,13 +2581,9 @@ int amdgpu_dm_atomic_commit( | |||
2618 | */ | 2581 | */ |
2619 | struct amdgpu_crtc *acrtc = new_crtcs[i]; | 2582 | struct amdgpu_crtc *acrtc = new_crtcs[i]; |
2620 | 2583 | ||
2621 | if (adev->dm.freesync_module) { | 2584 | if (adev->dm.freesync_module) |
2622 | for (j = 0; j < acrtc->target->stream_count; j++) | 2585 | mod_freesync_notify_mode_change( |
2623 | mod_freesync_notify_mode_change( | 2586 | adev->dm.freesync_module, &acrtc->stream, 1); |
2624 | adev->dm.freesync_module, | ||
2625 | acrtc->target->streams, | ||
2626 | acrtc->target->stream_count); | ||
2627 | } | ||
2628 | 2587 | ||
2629 | manage_dm_interrupts(adev, acrtc, true); | 2588 | manage_dm_interrupts(adev, acrtc, true); |
2630 | dm_crtc_cursor_reset(&acrtc->base); | 2589 | dm_crtc_cursor_reset(&acrtc->base); |
@@ -2682,20 +2641,19 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector | |||
2682 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | 2641 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); |
2683 | struct amdgpu_crtc *disconnected_acrtc; | 2642 | struct amdgpu_crtc *disconnected_acrtc; |
2684 | const struct dc_sink *sink; | 2643 | const struct dc_sink *sink; |
2685 | struct dc_target *commit_targets[6]; | 2644 | const struct dc_stream *commit_streams[MAX_STREAMS]; |
2686 | struct dc_target *current_target; | 2645 | const struct dc_stream *current_stream; |
2687 | uint32_t commit_targets_count = 0; | 2646 | uint32_t commit_streams_count = 0; |
2688 | int i; | ||
2689 | 2647 | ||
2690 | if (!aconnector->dc_sink || !connector->state || !connector->encoder) | 2648 | if (!aconnector->dc_sink || !connector->state || !connector->encoder) |
2691 | return; | 2649 | return; |
2692 | 2650 | ||
2693 | disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); | 2651 | disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); |
2694 | 2652 | ||
2695 | if (!disconnected_acrtc || !disconnected_acrtc->target) | 2653 | if (!disconnected_acrtc || !disconnected_acrtc->stream) |
2696 | return; | 2654 | return; |
2697 | 2655 | ||
2698 | sink = disconnected_acrtc->target->streams[0]->sink; | 2656 | sink = disconnected_acrtc->stream->sink; |
2699 | 2657 | ||
2700 | /* | 2658 | /* |
2701 | * If the previous sink is not released and different from the current, | 2659 | * If the previous sink is not released and different from the current, |
@@ -2706,8 +2664,8 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector | |||
2706 | struct dm_connector_state *dm_state = | 2664 | struct dm_connector_state *dm_state = |
2707 | to_dm_connector_state(aconnector->base.state); | 2665 | to_dm_connector_state(aconnector->base.state); |
2708 | 2666 | ||
2709 | struct dc_target *new_target = | 2667 | struct dc_stream *new_stream = |
2710 | create_target_for_sink( | 2668 | create_stream_for_sink( |
2711 | aconnector, | 2669 | aconnector, |
2712 | &disconnected_acrtc->base.state->mode, | 2670 | &disconnected_acrtc->base.state->mode, |
2713 | dm_state); | 2671 | dm_state); |
@@ -2720,56 +2678,51 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector | |||
2720 | manage_dm_interrupts(adev, disconnected_acrtc, false); | 2678 | manage_dm_interrupts(adev, disconnected_acrtc, false); |
2721 | /* this is the update mode case */ | 2679 | /* this is the update mode case */ |
2722 | 2680 | ||
2723 | current_target = disconnected_acrtc->target; | 2681 | current_stream = disconnected_acrtc->stream; |
2724 | 2682 | ||
2725 | disconnected_acrtc->target = new_target; | 2683 | disconnected_acrtc->stream = new_stream; |
2726 | disconnected_acrtc->enabled = true; | 2684 | disconnected_acrtc->enabled = true; |
2727 | disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode; | 2685 | disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode; |
2728 | 2686 | ||
2729 | commit_targets_count = 0; | 2687 | commit_streams_count = 0; |
2730 | 2688 | ||
2731 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2689 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2732 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 2690 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
2733 | 2691 | ||
2734 | if (acrtc->target) { | 2692 | if (acrtc->stream) { |
2735 | commit_targets[commit_targets_count] = acrtc->target; | 2693 | commit_streams[commit_streams_count] = acrtc->stream; |
2736 | ++commit_targets_count; | 2694 | ++commit_streams_count; |
2737 | } | 2695 | } |
2738 | } | 2696 | } |
2739 | 2697 | ||
2740 | /* DC is optimized not to do anything if 'targets' didn't change. */ | 2698 | /* DC is optimized not to do anything if 'streams' didn't change. */ |
2741 | if (!dc_commit_targets(dc, commit_targets, | 2699 | if (!dc_commit_streams(dc, commit_streams, |
2742 | commit_targets_count)) { | 2700 | commit_streams_count)) { |
2743 | DRM_INFO("Failed to restore connector state!\n"); | 2701 | DRM_INFO("Failed to restore connector state!\n"); |
2744 | dc_target_release(disconnected_acrtc->target); | 2702 | dc_stream_release(disconnected_acrtc->stream); |
2745 | disconnected_acrtc->target = current_target; | 2703 | disconnected_acrtc->stream = current_stream; |
2746 | manage_dm_interrupts(adev, disconnected_acrtc, true); | 2704 | manage_dm_interrupts(adev, disconnected_acrtc, true); |
2747 | return; | 2705 | return; |
2748 | } | 2706 | } |
2749 | 2707 | ||
2750 | if (adev->dm.freesync_module) { | 2708 | if (adev->dm.freesync_module) { |
2709 | mod_freesync_remove_stream(adev->dm.freesync_module, | ||
2710 | current_stream); | ||
2751 | 2711 | ||
2752 | for (i = 0; i < current_target->stream_count; i++) | 2712 | mod_freesync_add_stream(adev->dm.freesync_module, |
2753 | mod_freesync_remove_stream( | 2713 | new_stream, &aconnector->caps); |
2754 | adev->dm.freesync_module, | ||
2755 | current_target->streams[i]); | ||
2756 | |||
2757 | for (i = 0; i < new_target->stream_count; i++) | ||
2758 | mod_freesync_add_stream( | ||
2759 | adev->dm.freesync_module, | ||
2760 | new_target->streams[i], | ||
2761 | &aconnector->caps); | ||
2762 | } | 2714 | } |
2715 | |||
2763 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2716 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2764 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 2717 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
2765 | 2718 | ||
2766 | if (acrtc->target != NULL) { | 2719 | if (acrtc->stream != NULL) { |
2767 | acrtc->otg_inst = | 2720 | acrtc->otg_inst = |
2768 | dc_target_get_status(acrtc->target)->primary_otg_inst; | 2721 | dc_stream_get_status(acrtc->stream)->primary_otg_inst; |
2769 | } | 2722 | } |
2770 | } | 2723 | } |
2771 | 2724 | ||
2772 | dc_target_release(current_target); | 2725 | dc_stream_release(current_stream); |
2773 | 2726 | ||
2774 | dm_dc_surface_commit(dc, &disconnected_acrtc->base); | 2727 | dm_dc_surface_commit(dc, &disconnected_acrtc->base); |
2775 | 2728 | ||
@@ -2782,13 +2735,13 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector | |||
2782 | static uint32_t add_val_sets_surface( | 2735 | static uint32_t add_val_sets_surface( |
2783 | struct dc_validation_set *val_sets, | 2736 | struct dc_validation_set *val_sets, |
2784 | uint32_t set_count, | 2737 | uint32_t set_count, |
2785 | const struct dc_target *target, | 2738 | const struct dc_stream *stream, |
2786 | const struct dc_surface *surface) | 2739 | const struct dc_surface *surface) |
2787 | { | 2740 | { |
2788 | uint32_t i = 0; | 2741 | uint32_t i = 0; |
2789 | 2742 | ||
2790 | while (i < set_count) { | 2743 | while (i < set_count) { |
2791 | if (val_sets[i].target == target) | 2744 | if (val_sets[i].stream == stream) |
2792 | break; | 2745 | break; |
2793 | ++i; | 2746 | ++i; |
2794 | } | 2747 | } |
@@ -2799,23 +2752,23 @@ static uint32_t add_val_sets_surface( | |||
2799 | return val_sets[i].surface_count; | 2752 | return val_sets[i].surface_count; |
2800 | } | 2753 | } |
2801 | 2754 | ||
2802 | static uint32_t update_in_val_sets_target( | 2755 | static uint32_t update_in_val_sets_stream( |
2803 | struct dc_validation_set *val_sets, | 2756 | struct dc_validation_set *val_sets, |
2804 | struct drm_crtc **crtcs, | 2757 | struct drm_crtc **crtcs, |
2805 | uint32_t set_count, | 2758 | uint32_t set_count, |
2806 | const struct dc_target *old_target, | 2759 | const struct dc_stream *old_stream, |
2807 | const struct dc_target *new_target, | 2760 | const struct dc_stream *new_stream, |
2808 | struct drm_crtc *crtc) | 2761 | struct drm_crtc *crtc) |
2809 | { | 2762 | { |
2810 | uint32_t i = 0; | 2763 | uint32_t i = 0; |
2811 | 2764 | ||
2812 | while (i < set_count) { | 2765 | while (i < set_count) { |
2813 | if (val_sets[i].target == old_target) | 2766 | if (val_sets[i].stream == old_stream) |
2814 | break; | 2767 | break; |
2815 | ++i; | 2768 | ++i; |
2816 | } | 2769 | } |
2817 | 2770 | ||
2818 | val_sets[i].target = new_target; | 2771 | val_sets[i].stream = new_stream; |
2819 | crtcs[i] = crtc; | 2772 | crtcs[i] = crtc; |
2820 | 2773 | ||
2821 | if (i == set_count) { | 2774 | if (i == set_count) { |
@@ -2829,12 +2782,12 @@ static uint32_t update_in_val_sets_target( | |||
2829 | static uint32_t remove_from_val_sets( | 2782 | static uint32_t remove_from_val_sets( |
2830 | struct dc_validation_set *val_sets, | 2783 | struct dc_validation_set *val_sets, |
2831 | uint32_t set_count, | 2784 | uint32_t set_count, |
2832 | const struct dc_target *target) | 2785 | const struct dc_stream *stream) |
2833 | { | 2786 | { |
2834 | int i; | 2787 | int i; |
2835 | 2788 | ||
2836 | for (i = 0; i < set_count; i++) | 2789 | for (i = 0; i < set_count; i++) |
2837 | if (val_sets[i].target == target) | 2790 | if (val_sets[i].stream == stream) |
2838 | break; | 2791 | break; |
2839 | 2792 | ||
2840 | if (i == set_count) { | 2793 | if (i == set_count) { |
@@ -2861,10 +2814,10 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
2861 | int i, j; | 2814 | int i, j; |
2862 | int ret; | 2815 | int ret; |
2863 | int set_count; | 2816 | int set_count; |
2864 | int new_target_count; | 2817 | int new_stream_count; |
2865 | struct dc_validation_set set[MAX_TARGETS] = {{ 0 }}; | 2818 | struct dc_validation_set set[MAX_STREAMS] = {{ 0 }}; |
2866 | struct dc_target *new_targets[MAX_TARGETS] = { 0 }; | 2819 | struct dc_stream *new_streams[MAX_STREAMS] = { 0 }; |
2867 | struct drm_crtc *crtc_set[MAX_TARGETS] = { 0 }; | 2820 | struct drm_crtc *crtc_set[MAX_STREAMS] = { 0 }; |
2868 | struct amdgpu_device *adev = dev->dev_private; | 2821 | struct amdgpu_device *adev = dev->dev_private; |
2869 | struct dc *dc = adev->dm.dc; | 2822 | struct dc *dc = adev->dm.dc; |
2870 | bool need_to_validate = false; | 2823 | bool need_to_validate = false; |
@@ -2880,14 +2833,14 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
2880 | ret = -EINVAL; | 2833 | ret = -EINVAL; |
2881 | 2834 | ||
2882 | /* copy existing configuration */ | 2835 | /* copy existing configuration */ |
2883 | new_target_count = 0; | 2836 | new_stream_count = 0; |
2884 | set_count = 0; | 2837 | set_count = 0; |
2885 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2838 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2886 | 2839 | ||
2887 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | 2840 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
2888 | 2841 | ||
2889 | if (acrtc->target) { | 2842 | if (acrtc->stream) { |
2890 | set[set_count].target = acrtc->target; | 2843 | set[set_count].stream = acrtc->stream; |
2891 | crtc_set[set_count] = crtc; | 2844 | crtc_set[set_count] = crtc; |
2892 | ++set_count; | 2845 | ++set_count; |
2893 | } | 2846 | } |
@@ -2908,7 +2861,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
2908 | switch (action) { | 2861 | switch (action) { |
2909 | case DM_COMMIT_ACTION_DPMS_ON: | 2862 | case DM_COMMIT_ACTION_DPMS_ON: |
2910 | case DM_COMMIT_ACTION_SET: { | 2863 | case DM_COMMIT_ACTION_SET: { |
2911 | struct dc_target *new_target = NULL; | 2864 | struct dc_stream *new_stream = NULL; |
2912 | struct drm_connector_state *conn_state = NULL; | 2865 | struct drm_connector_state *conn_state = NULL; |
2913 | struct dm_connector_state *dm_state = NULL; | 2866 | struct dm_connector_state *dm_state = NULL; |
2914 | 2867 | ||
@@ -2919,30 +2872,30 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
2919 | dm_state = to_dm_connector_state(conn_state); | 2872 | dm_state = to_dm_connector_state(conn_state); |
2920 | } | 2873 | } |
2921 | 2874 | ||
2922 | new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state); | 2875 | new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state); |
2923 | 2876 | ||
2924 | /* | 2877 | /* |
2925 | * we can have no target on ACTION_SET if a display | 2878 | * we can have no stream on ACTION_SET if a display |
2926 | * was disconnected during S3, in this case it not and | 2879 | * was disconnected during S3, in this case it not and |
2927 | * error, the OS will be updated after detection, and | 2880 | * error, the OS will be updated after detection, and |
2928 | * do the right thing on next atomic commit | 2881 | * do the right thing on next atomic commit |
2929 | */ | 2882 | */ |
2930 | if (!new_target) { | 2883 | if (!new_stream) { |
2931 | DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n", | 2884 | DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n", |
2932 | __func__, acrtc->base.base.id); | 2885 | __func__, acrtc->base.base.id); |
2933 | break; | 2886 | break; |
2934 | } | 2887 | } |
2935 | 2888 | ||
2936 | new_targets[new_target_count] = new_target; | 2889 | new_streams[new_stream_count] = new_stream; |
2937 | set_count = update_in_val_sets_target( | 2890 | set_count = update_in_val_sets_stream( |
2938 | set, | 2891 | set, |
2939 | crtc_set, | 2892 | crtc_set, |
2940 | set_count, | 2893 | set_count, |
2941 | acrtc->target, | 2894 | acrtc->stream, |
2942 | new_target, | 2895 | new_stream, |
2943 | crtc); | 2896 | crtc); |
2944 | 2897 | ||
2945 | new_target_count++; | 2898 | new_stream_count++; |
2946 | need_to_validate = true; | 2899 | need_to_validate = true; |
2947 | break; | 2900 | break; |
2948 | } | 2901 | } |
@@ -2952,7 +2905,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
2952 | struct drm_connector_state *conn_state = NULL; | 2905 | struct drm_connector_state *conn_state = NULL; |
2953 | struct dm_connector_state *dm_state = NULL; | 2906 | struct dm_connector_state *dm_state = NULL; |
2954 | struct dm_connector_state *old_dm_state = NULL; | 2907 | struct dm_connector_state *old_dm_state = NULL; |
2955 | struct dc_target *new_target; | 2908 | struct dc_stream *new_stream; |
2956 | 2909 | ||
2957 | if (!aconnector) | 2910 | if (!aconnector) |
2958 | break; | 2911 | break; |
@@ -2970,24 +2923,24 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
2970 | if (!is_scaling_state_different(dm_state, old_dm_state)) | 2923 | if (!is_scaling_state_different(dm_state, old_dm_state)) |
2971 | break; | 2924 | break; |
2972 | 2925 | ||
2973 | new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state); | 2926 | new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_state); |
2974 | 2927 | ||
2975 | if (!new_target) { | 2928 | if (!new_stream) { |
2976 | DRM_ERROR("%s: Failed to create new target for crtc %d\n", | 2929 | DRM_ERROR("%s: Failed to create new stream for crtc %d\n", |
2977 | __func__, acrtc->base.base.id); | 2930 | __func__, acrtc->base.base.id); |
2978 | break; | 2931 | break; |
2979 | } | 2932 | } |
2980 | 2933 | ||
2981 | new_targets[new_target_count] = new_target; | 2934 | new_streams[new_stream_count] = new_stream; |
2982 | set_count = update_in_val_sets_target( | 2935 | set_count = update_in_val_sets_stream( |
2983 | set, | 2936 | set, |
2984 | crtc_set, | 2937 | crtc_set, |
2985 | set_count, | 2938 | set_count, |
2986 | acrtc->target, | 2939 | acrtc->stream, |
2987 | new_target, | 2940 | new_stream, |
2988 | crtc); | 2941 | crtc); |
2989 | 2942 | ||
2990 | new_target_count++; | 2943 | new_stream_count++; |
2991 | need_to_validate = true; | 2944 | need_to_validate = true; |
2992 | 2945 | ||
2993 | break; | 2946 | break; |
@@ -2995,11 +2948,11 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
2995 | case DM_COMMIT_ACTION_DPMS_OFF: | 2948 | case DM_COMMIT_ACTION_DPMS_OFF: |
2996 | case DM_COMMIT_ACTION_RESET: | 2949 | case DM_COMMIT_ACTION_RESET: |
2997 | /* i.e. reset mode */ | 2950 | /* i.e. reset mode */ |
2998 | if (acrtc->target) { | 2951 | if (acrtc->stream) { |
2999 | set_count = remove_from_val_sets( | 2952 | set_count = remove_from_val_sets( |
3000 | set, | 2953 | set, |
3001 | set_count, | 2954 | set_count, |
3002 | acrtc->target); | 2955 | acrtc->stream); |
3003 | } | 2956 | } |
3004 | break; | 2957 | break; |
3005 | } | 2958 | } |
@@ -3035,7 +2988,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
3035 | 2988 | ||
3036 | /* Surfaces are created under two scenarios: | 2989 | /* Surfaces are created under two scenarios: |
3037 | * 1. This commit is not a page flip. | 2990 | * 1. This commit is not a page flip. |
3038 | * 2. This commit is a page flip, and targets are created. | 2991 | * 2. This commit is a page flip, and streams are created. |
3039 | */ | 2992 | */ |
3040 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | 2993 | crtc_state = drm_atomic_get_crtc_state(state, crtc); |
3041 | if (!page_flip_needed(plane_state, old_plane_state, | 2994 | if (!page_flip_needed(plane_state, old_plane_state, |
@@ -3080,7 +3033,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
3080 | add_val_sets_surface( | 3033 | add_val_sets_surface( |
3081 | set, | 3034 | set, |
3082 | set_count, | 3035 | set_count, |
3083 | set[i].target, | 3036 | set[i].stream, |
3084 | surface); | 3037 | surface); |
3085 | 3038 | ||
3086 | need_to_validate = true; | 3039 | need_to_validate = true; |
@@ -3097,8 +3050,8 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
3097 | dc_surface_release(set[i].surfaces[j]); | 3050 | dc_surface_release(set[i].surfaces[j]); |
3098 | } | 3051 | } |
3099 | } | 3052 | } |
3100 | for (i = 0; i < new_target_count; i++) | 3053 | for (i = 0; i < new_stream_count; i++) |
3101 | dc_target_release(new_targets[i]); | 3054 | dc_stream_release(new_streams[i]); |
3102 | 3055 | ||
3103 | if (ret != 0) | 3056 | if (ret != 0) |
3104 | DRM_ERROR("Atomic check failed.\n"); | 3057 | DRM_ERROR("Atomic check failed.\n"); |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h index 4f7bd3bae44e..6ed1480a8bc3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h | |||
@@ -59,7 +59,7 @@ int amdgpu_dm_atomic_commit( | |||
59 | int amdgpu_dm_atomic_check(struct drm_device *dev, | 59 | int amdgpu_dm_atomic_check(struct drm_device *dev, |
60 | struct drm_atomic_state *state); | 60 | struct drm_atomic_state *state); |
61 | 61 | ||
62 | int dm_create_validation_set_for_target( | 62 | int dm_create_validation_set_for_stream( |
63 | struct drm_connector *connector, | 63 | struct drm_connector *connector, |
64 | struct drm_display_mode *mode, | 64 | struct drm_display_mode *mode, |
65 | struct dc_validation_set *val_set); | 65 | struct dc_validation_set *val_set); |
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 26e2b50e4954..2df163bc83e9 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile | |||
@@ -13,7 +13,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI | |||
13 | 13 | ||
14 | include $(AMD_DC) | 14 | include $(AMD_DC) |
15 | 15 | ||
16 | DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_target.o dc_sink.o \ | 16 | DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ |
17 | dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o | 17 | dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o |
18 | 18 | ||
19 | AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE)) | 19 | AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE)) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index a7348573ebca..7d4299b9ee1f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
@@ -50,15 +50,6 @@ | |||
50 | #include "mem_input.h" | 50 | #include "mem_input.h" |
51 | 51 | ||
52 | /******************************************************************************* | 52 | /******************************************************************************* |
53 | * Private structures | ||
54 | ******************************************************************************/ | ||
55 | |||
56 | struct dc_target_sync_report { | ||
57 | uint32_t h_count; | ||
58 | uint32_t v_count; | ||
59 | }; | ||
60 | |||
61 | /******************************************************************************* | ||
62 | * Private functions | 53 | * Private functions |
63 | ******************************************************************************/ | 54 | ******************************************************************************/ |
64 | static void destroy_links(struct core_dc *dc) | 55 | static void destroy_links(struct core_dc *dc) |
@@ -221,7 +212,7 @@ static void stream_update_scaling( | |||
221 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); | 212 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); |
222 | struct core_dc *core_dc = DC_TO_CORE(dc); | 213 | struct core_dc *core_dc = DC_TO_CORE(dc); |
223 | struct validate_context *cur_ctx = core_dc->current_context; | 214 | struct validate_context *cur_ctx = core_dc->current_context; |
224 | int i, j; | 215 | int i; |
225 | 216 | ||
226 | if (src) | 217 | if (src) |
227 | stream->public.src = *src; | 218 | stream->public.src = *src; |
@@ -229,20 +220,18 @@ static void stream_update_scaling( | |||
229 | if (dst) | 220 | if (dst) |
230 | stream->public.dst = *dst; | 221 | stream->public.dst = *dst; |
231 | 222 | ||
232 | for (i = 0; i < cur_ctx->target_count; i++) { | 223 | for (i = 0; i < cur_ctx->stream_count; i++) { |
233 | struct core_target *target = cur_ctx->targets[i]; | 224 | struct core_stream *cur_stream = cur_ctx->streams[i]; |
234 | struct dc_target_status *status = &cur_ctx->target_status[i]; | ||
235 | 225 | ||
236 | for (j = 0; j < target->public.stream_count; j++) { | 226 | if (stream == cur_stream) { |
237 | if (target->public.streams[j] != dc_stream) | 227 | struct dc_stream_status *status = &cur_ctx->stream_status[i]; |
238 | continue; | ||
239 | 228 | ||
240 | if (status->surface_count) | 229 | if (status->surface_count) |
241 | if (!dc_commit_surfaces_to_target( | 230 | if (!dc_commit_surfaces_to_stream( |
242 | &core_dc->public, | 231 | &core_dc->public, |
243 | status->surfaces, | 232 | status->surfaces, |
244 | status->surface_count, | 233 | status->surface_count, |
245 | &target->public)) | 234 | &cur_stream->public)) |
246 | /* Need to debug validation */ | 235 | /* Need to debug validation */ |
247 | BREAK_TO_DEBUGGER(); | 236 | BREAK_TO_DEBUGGER(); |
248 | 237 | ||
@@ -634,7 +623,7 @@ struct dc *dc_create(const struct dc_init_data *init_params) | |||
634 | full_pipe_count = core_dc->res_pool->pipe_count; | 623 | full_pipe_count = core_dc->res_pool->pipe_count; |
635 | if (core_dc->res_pool->underlay_pipe_index >= 0) | 624 | if (core_dc->res_pool->underlay_pipe_index >= 0) |
636 | full_pipe_count--; | 625 | full_pipe_count--; |
637 | core_dc->public.caps.max_targets = min( | 626 | core_dc->public.caps.max_streams = min( |
638 | full_pipe_count, | 627 | full_pipe_count, |
639 | core_dc->res_pool->stream_enc_count); | 628 | core_dc->res_pool->stream_enc_count); |
640 | 629 | ||
@@ -675,20 +664,20 @@ static bool is_validation_required( | |||
675 | const struct validate_context *context = dc->current_context; | 664 | const struct validate_context *context = dc->current_context; |
676 | int i, j; | 665 | int i, j; |
677 | 666 | ||
678 | if (context->target_count != set_count) | 667 | if (context->stream_count != set_count) |
679 | return true; | 668 | return true; |
680 | 669 | ||
681 | for (i = 0; i < set_count; i++) { | 670 | for (i = 0; i < set_count; i++) { |
682 | 671 | ||
683 | if (set[i].surface_count != context->target_status[i].surface_count) | 672 | if (set[i].surface_count != context->stream_status[i].surface_count) |
684 | return true; | 673 | return true; |
685 | if (!is_target_unchanged(DC_TARGET_TO_CORE(set[i].target), context->targets[i])) | 674 | if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i])) |
686 | return true; | 675 | return true; |
687 | 676 | ||
688 | for (j = 0; j < set[i].surface_count; j++) { | 677 | for (j = 0; j < set[i].surface_count; j++) { |
689 | struct dc_surface temp_surf = { 0 }; | 678 | struct dc_surface temp_surf = { 0 }; |
690 | 679 | ||
691 | temp_surf = *context->target_status[i].surfaces[j]; | 680 | temp_surf = *context->stream_status[i].surfaces[j]; |
692 | temp_surf.clip_rect = set[i].surfaces[j]->clip_rect; | 681 | temp_surf.clip_rect = set[i].surfaces[j]->clip_rect; |
693 | temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x; | 682 | temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x; |
694 | temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y; | 683 | temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y; |
@@ -737,7 +726,7 @@ context_alloc_fail: | |||
737 | 726 | ||
738 | bool dc_validate_guaranteed( | 727 | bool dc_validate_guaranteed( |
739 | const struct dc *dc, | 728 | const struct dc *dc, |
740 | const struct dc_target *dc_target) | 729 | const struct dc_stream *stream) |
741 | { | 730 | { |
742 | struct core_dc *core_dc = DC_TO_CORE(dc); | 731 | struct core_dc *core_dc = DC_TO_CORE(dc); |
743 | enum dc_status result = DC_ERROR_UNEXPECTED; | 732 | enum dc_status result = DC_ERROR_UNEXPECTED; |
@@ -748,7 +737,7 @@ bool dc_validate_guaranteed( | |||
748 | goto context_alloc_fail; | 737 | goto context_alloc_fail; |
749 | 738 | ||
750 | result = core_dc->res_pool->funcs->validate_guaranteed( | 739 | result = core_dc->res_pool->funcs->validate_guaranteed( |
751 | core_dc, dc_target, context); | 740 | core_dc, stream, context); |
752 | 741 | ||
753 | resource_validate_ctx_destruct(context); | 742 | resource_validate_ctx_destruct(context); |
754 | dm_free(context); | 743 | dm_free(context); |
@@ -838,18 +827,18 @@ static void program_timing_sync( | |||
838 | } | 827 | } |
839 | } | 828 | } |
840 | 829 | ||
841 | static bool targets_changed( | 830 | static bool streams_changed( |
842 | struct core_dc *dc, | 831 | struct core_dc *dc, |
843 | struct dc_target *targets[], | 832 | const struct dc_stream *streams[], |
844 | uint8_t target_count) | 833 | uint8_t stream_count) |
845 | { | 834 | { |
846 | uint8_t i; | 835 | uint8_t i; |
847 | 836 | ||
848 | if (target_count != dc->current_context->target_count) | 837 | if (stream_count != dc->current_context->stream_count) |
849 | return true; | 838 | return true; |
850 | 839 | ||
851 | for (i = 0; i < dc->current_context->target_count; i++) { | 840 | for (i = 0; i < dc->current_context->stream_count; i++) { |
852 | if (&dc->current_context->targets[i]->public != targets[i]) | 841 | if (&dc->current_context->streams[i]->public != streams[i]) |
853 | return true; | 842 | return true; |
854 | } | 843 | } |
855 | 844 | ||
@@ -860,74 +849,72 @@ static void fill_display_configs( | |||
860 | const struct validate_context *context, | 849 | const struct validate_context *context, |
861 | struct dm_pp_display_configuration *pp_display_cfg) | 850 | struct dm_pp_display_configuration *pp_display_cfg) |
862 | { | 851 | { |
863 | uint8_t i, j, k; | 852 | int j; |
864 | uint8_t num_cfgs = 0; | 853 | int num_cfgs = 0; |
865 | |||
866 | for (i = 0; i < context->target_count; i++) { | ||
867 | const struct core_target *target = context->targets[i]; | ||
868 | |||
869 | for (j = 0; j < target->public.stream_count; j++) { | ||
870 | const struct core_stream *stream = | ||
871 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
872 | struct dm_pp_single_disp_config *cfg = | ||
873 | &pp_display_cfg->disp_configs[num_cfgs]; | ||
874 | const struct pipe_ctx *pipe_ctx = NULL; | ||
875 | |||
876 | for (k = 0; k < MAX_PIPES; k++) | ||
877 | if (stream == | ||
878 | context->res_ctx.pipe_ctx[k].stream) { | ||
879 | pipe_ctx = &context->res_ctx.pipe_ctx[k]; | ||
880 | break; | ||
881 | } | ||
882 | 854 | ||
883 | ASSERT(pipe_ctx != NULL); | 855 | for (j = 0; j < context->stream_count; j++) { |
884 | 856 | int k; | |
885 | num_cfgs++; | 857 | |
886 | cfg->signal = pipe_ctx->stream->signal; | 858 | const struct core_stream *stream = context->streams[j]; |
887 | cfg->pipe_idx = pipe_ctx->pipe_idx; | 859 | struct dm_pp_single_disp_config *cfg = |
888 | cfg->src_height = stream->public.src.height; | 860 | &pp_display_cfg->disp_configs[num_cfgs]; |
889 | cfg->src_width = stream->public.src.width; | 861 | const struct pipe_ctx *pipe_ctx = NULL; |
890 | cfg->ddi_channel_mapping = | 862 | |
891 | stream->sink->link->ddi_channel_mapping.raw; | 863 | for (k = 0; k < MAX_PIPES; k++) |
892 | cfg->transmitter = | 864 | if (stream == context->res_ctx.pipe_ctx[k].stream) { |
893 | stream->sink->link->link_enc->transmitter; | 865 | pipe_ctx = &context->res_ctx.pipe_ctx[k]; |
894 | cfg->link_settings.lane_count = stream->sink->link->public.cur_link_settings.lane_count; | 866 | break; |
895 | cfg->link_settings.link_rate = stream->sink->link->public.cur_link_settings.link_rate; | 867 | } |
896 | cfg->link_settings.link_spread = stream->sink->link->public.cur_link_settings.link_spread; | 868 | |
897 | cfg->sym_clock = stream->phy_pix_clk; | 869 | ASSERT(pipe_ctx != NULL); |
898 | /* Round v_refresh*/ | 870 | |
899 | cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000; | 871 | num_cfgs++; |
900 | cfg->v_refresh /= stream->public.timing.h_total; | 872 | cfg->signal = pipe_ctx->stream->signal; |
901 | cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2) | 873 | cfg->pipe_idx = pipe_ctx->pipe_idx; |
902 | / stream->public.timing.v_total; | 874 | cfg->src_height = stream->public.src.height; |
903 | } | 875 | cfg->src_width = stream->public.src.width; |
876 | cfg->ddi_channel_mapping = | ||
877 | stream->sink->link->ddi_channel_mapping.raw; | ||
878 | cfg->transmitter = | ||
879 | stream->sink->link->link_enc->transmitter; | ||
880 | cfg->link_settings.lane_count = | ||
881 | stream->sink->link->public.cur_link_settings.lane_count; | ||
882 | cfg->link_settings.link_rate = | ||
883 | stream->sink->link->public.cur_link_settings.link_rate; | ||
884 | cfg->link_settings.link_spread = | ||
885 | stream->sink->link->public.cur_link_settings.link_spread; | ||
886 | cfg->sym_clock = stream->phy_pix_clk; | ||
887 | /* Round v_refresh*/ | ||
888 | cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000; | ||
889 | cfg->v_refresh /= stream->public.timing.h_total; | ||
890 | cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2) | ||
891 | / stream->public.timing.v_total; | ||
904 | } | 892 | } |
893 | |||
905 | pp_display_cfg->display_count = num_cfgs; | 894 | pp_display_cfg->display_count = num_cfgs; |
906 | } | 895 | } |
907 | 896 | ||
908 | static uint32_t get_min_vblank_time_us(const struct validate_context *context) | 897 | static uint32_t get_min_vblank_time_us(const struct validate_context *context) |
909 | { | 898 | { |
910 | uint8_t i, j; | 899 | uint8_t j; |
911 | uint32_t min_vertical_blank_time = -1; | 900 | uint32_t min_vertical_blank_time = -1; |
912 | 901 | ||
913 | for (i = 0; i < context->target_count; i++) { | 902 | for (j = 0; j < context->stream_count; j++) { |
914 | const struct core_target *target = context->targets[i]; | 903 | const struct dc_stream *stream = &context->streams[j]->public; |
915 | |||
916 | for (j = 0; j < target->public.stream_count; j++) { | ||
917 | const struct dc_stream *stream = | ||
918 | target->public.streams[j]; | ||
919 | uint32_t vertical_blank_in_pixels = 0; | 904 | uint32_t vertical_blank_in_pixels = 0; |
920 | uint32_t vertical_blank_time = 0; | 905 | uint32_t vertical_blank_time = 0; |
921 | 906 | ||
922 | vertical_blank_in_pixels = stream->timing.h_total * | 907 | vertical_blank_in_pixels = stream->timing.h_total * |
923 | (stream->timing.v_total | 908 | (stream->timing.v_total |
924 | - stream->timing.v_addressable); | 909 | - stream->timing.v_addressable); |
910 | |||
925 | vertical_blank_time = vertical_blank_in_pixels | 911 | vertical_blank_time = vertical_blank_in_pixels |
926 | * 1000 / stream->timing.pix_clk_khz; | 912 | * 1000 / stream->timing.pix_clk_khz; |
913 | |||
927 | if (min_vertical_blank_time > vertical_blank_time) | 914 | if (min_vertical_blank_time > vertical_blank_time) |
928 | min_vertical_blank_time = vertical_blank_time; | 915 | min_vertical_blank_time = vertical_blank_time; |
929 | } | 916 | } |
930 | } | 917 | |
931 | return min_vertical_blank_time; | 918 | return min_vertical_blank_time; |
932 | } | 919 | } |
933 | 920 | ||
@@ -995,7 +982,7 @@ void pplib_apply_display_requirements( | |||
995 | /* TODO: is this still applicable?*/ | 982 | /* TODO: is this still applicable?*/ |
996 | if (pp_display_cfg->display_count == 1) { | 983 | if (pp_display_cfg->display_count == 1) { |
997 | const struct dc_crtc_timing *timing = | 984 | const struct dc_crtc_timing *timing = |
998 | &context->targets[0]->public.streams[0]->timing; | 985 | &context->streams[0]->public.timing; |
999 | 986 | ||
1000 | pp_display_cfg->crtc_index = | 987 | pp_display_cfg->crtc_index = |
1001 | pp_display_cfg->disp_configs[0].pipe_idx; | 988 | pp_display_cfg->disp_configs[0].pipe_idx; |
@@ -1011,34 +998,32 @@ void pplib_apply_display_requirements( | |||
1011 | 998 | ||
1012 | } | 999 | } |
1013 | 1000 | ||
1014 | bool dc_commit_targets( | 1001 | bool dc_commit_streams( |
1015 | struct dc *dc, | 1002 | struct dc *dc, |
1016 | struct dc_target *targets[], | 1003 | const struct dc_stream *streams[], |
1017 | uint8_t target_count) | 1004 | uint8_t stream_count) |
1018 | { | 1005 | { |
1019 | struct core_dc *core_dc = DC_TO_CORE(dc); | 1006 | struct core_dc *core_dc = DC_TO_CORE(dc); |
1020 | struct dc_bios *dcb = core_dc->ctx->dc_bios; | 1007 | struct dc_bios *dcb = core_dc->ctx->dc_bios; |
1021 | enum dc_status result = DC_ERROR_UNEXPECTED; | 1008 | enum dc_status result = DC_ERROR_UNEXPECTED; |
1022 | struct validate_context *context; | 1009 | struct validate_context *context; |
1023 | struct dc_validation_set set[MAX_TARGETS]; | 1010 | struct dc_validation_set set[MAX_STREAMS]; |
1024 | int i, j, k; | 1011 | int i, j, k; |
1025 | 1012 | ||
1026 | if (false == targets_changed(core_dc, targets, target_count)) | 1013 | if (false == streams_changed(core_dc, streams, stream_count)) |
1027 | return DC_OK; | 1014 | return DC_OK; |
1028 | 1015 | ||
1029 | dm_logger_write(core_dc->ctx->logger, LOG_DC, | 1016 | dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n", |
1030 | "%s: %d targets\n", | 1017 | __func__, stream_count); |
1031 | __func__, | ||
1032 | target_count); | ||
1033 | 1018 | ||
1034 | for (i = 0; i < target_count; i++) { | 1019 | for (i = 0; i < stream_count; i++) { |
1035 | struct dc_target *target = targets[i]; | 1020 | const struct dc_stream *stream = streams[i]; |
1036 | 1021 | ||
1037 | dc_target_log(target, | 1022 | dc_stream_log(stream, |
1038 | core_dc->ctx->logger, | 1023 | core_dc->ctx->logger, |
1039 | LOG_DC); | 1024 | LOG_DC); |
1040 | 1025 | ||
1041 | set[i].target = targets[i]; | 1026 | set[i].stream = stream; |
1042 | set[i].surface_count = 0; | 1027 | set[i].surface_count = 0; |
1043 | 1028 | ||
1044 | } | 1029 | } |
@@ -1047,7 +1032,7 @@ bool dc_commit_targets( | |||
1047 | if (context == NULL) | 1032 | if (context == NULL) |
1048 | goto context_alloc_fail; | 1033 | goto context_alloc_fail; |
1049 | 1034 | ||
1050 | result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, target_count, context); | 1035 | result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context); |
1051 | if (result != DC_OK){ | 1036 | if (result != DC_OK){ |
1052 | dm_logger_write(core_dc->ctx->logger, LOG_ERROR, | 1037 | dm_logger_write(core_dc->ctx->logger, LOG_ERROR, |
1053 | "%s: Context validation failed! dc_status:%d\n", | 1038 | "%s: Context validation failed! dc_status:%d\n", |
@@ -1068,13 +1053,12 @@ bool dc_commit_targets( | |||
1068 | 1053 | ||
1069 | program_timing_sync(core_dc, context); | 1054 | program_timing_sync(core_dc, context); |
1070 | 1055 | ||
1071 | for (i = 0; i < context->target_count; i++) { | 1056 | for (i = 0; i < context->stream_count; i++) { |
1072 | struct dc_target *dc_target = &context->targets[i]->public; | 1057 | const struct core_sink *sink = context->streams[i]->sink; |
1073 | struct core_sink *sink = DC_SINK_TO_CORE(dc_target->streams[0]->sink); | ||
1074 | 1058 | ||
1075 | for (j = 0; j < context->target_status[i].surface_count; j++) { | 1059 | for (j = 0; j < context->stream_status[i].surface_count; j++) { |
1076 | const struct dc_surface *dc_surface = | 1060 | const struct dc_surface *dc_surface = |
1077 | context->target_status[i].surfaces[j]; | 1061 | context->stream_status[i].surfaces[j]; |
1078 | 1062 | ||
1079 | for (k = 0; k < context->res_ctx.pool->pipe_count; k++) { | 1063 | for (k = 0; k < context->res_ctx.pool->pipe_count; k++) { |
1080 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k]; | 1064 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k]; |
@@ -1088,11 +1072,11 @@ bool dc_commit_targets( | |||
1088 | } | 1072 | } |
1089 | 1073 | ||
1090 | CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}", | 1074 | CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}", |
1091 | dc_target->streams[0]->timing.h_addressable, | 1075 | context->streams[i]->public.timing.h_addressable, |
1092 | dc_target->streams[0]->timing.v_addressable, | 1076 | context->streams[i]->public.timing.v_addressable, |
1093 | dc_target->streams[0]->timing.h_total, | 1077 | context->streams[i]->public.timing.h_total, |
1094 | dc_target->streams[0]->timing.v_total, | 1078 | context->streams[i]->public.timing.v_total, |
1095 | dc_target->streams[0]->timing.pix_clk_khz); | 1079 | context->streams[i]->public.timing.pix_clk_khz); |
1096 | } | 1080 | } |
1097 | 1081 | ||
1098 | pplib_apply_display_requirements(core_dc, | 1082 | pplib_apply_display_requirements(core_dc, |
@@ -1116,43 +1100,42 @@ context_alloc_fail: | |||
1116 | return (result == DC_OK); | 1100 | return (result == DC_OK); |
1117 | } | 1101 | } |
1118 | 1102 | ||
1119 | bool dc_pre_update_surfaces_to_target( | 1103 | bool dc_pre_update_surfaces_to_stream( |
1120 | struct dc *dc, | 1104 | struct dc *dc, |
1121 | const struct dc_surface *const *new_surfaces, | 1105 | const struct dc_surface *const *new_surfaces, |
1122 | uint8_t new_surface_count, | 1106 | uint8_t new_surface_count, |
1123 | struct dc_target *dc_target) | 1107 | const struct dc_stream *dc_stream) |
1124 | { | 1108 | { |
1125 | int i, j; | 1109 | int i, j; |
1126 | struct core_dc *core_dc = DC_TO_CORE(dc); | 1110 | struct core_dc *core_dc = DC_TO_CORE(dc); |
1127 | uint32_t prev_disp_clk = core_dc->current_context->bw_results.dispclk_khz; | 1111 | uint32_t prev_disp_clk = core_dc->current_context->bw_results.dispclk_khz; |
1128 | struct core_target *target = DC_TARGET_TO_CORE(dc_target); | 1112 | struct dc_stream_status *stream_status = NULL; |
1129 | struct dc_target_status *target_status = NULL; | ||
1130 | struct validate_context *context; | 1113 | struct validate_context *context; |
1131 | struct validate_context *temp_context; | 1114 | struct validate_context *temp_context; |
1132 | bool ret = true; | 1115 | bool ret = true; |
1133 | 1116 | ||
1134 | pre_surface_trace(dc, new_surfaces, new_surface_count); | 1117 | pre_surface_trace(dc, new_surfaces, new_surface_count); |
1135 | 1118 | ||
1136 | if (core_dc->current_context->target_count == 0) | 1119 | if (core_dc->current_context->stream_count == 0) |
1137 | return false; | 1120 | return false; |
1138 | 1121 | ||
1139 | /* Cannot commit surface to a target that is not commited */ | 1122 | /* Cannot commit surface to a stream that is not commited */ |
1140 | for (i = 0; i < core_dc->current_context->target_count; i++) | 1123 | for (i = 0; i < core_dc->current_context->stream_count; i++) |
1141 | if (target == core_dc->current_context->targets[i]) | 1124 | if (dc_stream == &core_dc->current_context->streams[i]->public) |
1142 | break; | 1125 | break; |
1143 | 1126 | ||
1144 | if (i == core_dc->current_context->target_count) | 1127 | if (i == core_dc->current_context->stream_count) |
1145 | return false; | 1128 | return false; |
1146 | 1129 | ||
1147 | target_status = &core_dc->current_context->target_status[i]; | 1130 | stream_status = &core_dc->current_context->stream_status[i]; |
1148 | 1131 | ||
1149 | if (new_surface_count == target_status->surface_count) { | 1132 | if (new_surface_count == stream_status->surface_count) { |
1150 | bool skip_pre = true; | 1133 | bool skip_pre = true; |
1151 | 1134 | ||
1152 | for (i = 0; i < target_status->surface_count; i++) { | 1135 | for (i = 0; i < stream_status->surface_count; i++) { |
1153 | struct dc_surface temp_surf = { 0 }; | 1136 | struct dc_surface temp_surf = { 0 }; |
1154 | 1137 | ||
1155 | temp_surf = *target_status->surfaces[i]; | 1138 | temp_surf = *stream_status->surfaces[i]; |
1156 | temp_surf.clip_rect = new_surfaces[i]->clip_rect; | 1139 | temp_surf.clip_rect = new_surfaces[i]->clip_rect; |
1157 | temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x; | 1140 | temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x; |
1158 | temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y; | 1141 | temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y; |
@@ -1178,13 +1161,13 @@ bool dc_pre_update_surfaces_to_target( | |||
1178 | resource_validate_ctx_copy_construct(core_dc->current_context, context); | 1161 | resource_validate_ctx_copy_construct(core_dc->current_context, context); |
1179 | 1162 | ||
1180 | dm_logger_write(core_dc->ctx->logger, LOG_DC, | 1163 | dm_logger_write(core_dc->ctx->logger, LOG_DC, |
1181 | "%s: commit %d surfaces to target 0x%x\n", | 1164 | "%s: commit %d surfaces to stream 0x%x\n", |
1182 | __func__, | 1165 | __func__, |
1183 | new_surface_count, | 1166 | new_surface_count, |
1184 | dc_target); | 1167 | dc_stream); |
1185 | 1168 | ||
1186 | if (!resource_attach_surfaces_to_context( | 1169 | if (!resource_attach_surfaces_to_context( |
1187 | new_surfaces, new_surface_count, dc_target, context)) { | 1170 | new_surfaces, new_surface_count, dc_stream, context)) { |
1188 | BREAK_TO_DEBUGGER(); | 1171 | BREAK_TO_DEBUGGER(); |
1189 | ret = false; | 1172 | ret = false; |
1190 | goto unexpected_fail; | 1173 | goto unexpected_fail; |
@@ -1256,7 +1239,7 @@ val_ctx_fail: | |||
1256 | return ret; | 1239 | return ret; |
1257 | } | 1240 | } |
1258 | 1241 | ||
1259 | bool dc_post_update_surfaces_to_target(struct dc *dc) | 1242 | bool dc_post_update_surfaces_to_stream(struct dc *dc) |
1260 | { | 1243 | { |
1261 | struct core_dc *core_dc = DC_TO_CORE(dc); | 1244 | struct core_dc *core_dc = DC_TO_CORE(dc); |
1262 | int i; | 1245 | int i; |
@@ -1282,22 +1265,27 @@ bool dc_post_update_surfaces_to_target(struct dc *dc) | |||
1282 | return true; | 1265 | return true; |
1283 | } | 1266 | } |
1284 | 1267 | ||
1285 | bool dc_commit_surfaces_to_target( | 1268 | bool dc_commit_surfaces_to_stream( |
1286 | struct dc *dc, | 1269 | struct dc *dc, |
1287 | const struct dc_surface **new_surfaces, | 1270 | const struct dc_surface **new_surfaces, |
1288 | uint8_t new_surface_count, | 1271 | uint8_t new_surface_count, |
1289 | struct dc_target *dc_target) | 1272 | const struct dc_stream *dc_stream) |
1290 | { | 1273 | { |
1291 | struct dc_surface_update updates[MAX_SURFACES] = { 0 }; | 1274 | struct dc_surface_update updates[MAX_SURFACES]; |
1292 | struct dc_flip_addrs flip_addr[MAX_SURFACES] = { 0 }; | 1275 | struct dc_flip_addrs flip_addr[MAX_SURFACES]; |
1293 | struct dc_plane_info plane_info[MAX_SURFACES] = { 0 }; | 1276 | struct dc_plane_info plane_info[MAX_SURFACES]; |
1294 | struct dc_scaling_info scaling_info[MAX_SURFACES] = { 0 }; | 1277 | struct dc_scaling_info scaling_info[MAX_SURFACES]; |
1295 | int i; | 1278 | int i; |
1296 | 1279 | ||
1297 | if (!dc_pre_update_surfaces_to_target( | 1280 | if (!dc_pre_update_surfaces_to_stream( |
1298 | dc, new_surfaces, new_surface_count, dc_target)) | 1281 | dc, new_surfaces, new_surface_count, dc_stream)) |
1299 | return false; | 1282 | return false; |
1300 | 1283 | ||
1284 | memset(updates, 0, sizeof(updates)); | ||
1285 | memset(flip_addr, 0, sizeof(flip_addr)); | ||
1286 | memset(plane_info, 0, sizeof(plane_info)); | ||
1287 | memset(scaling_info, 0, sizeof(scaling_info)); | ||
1288 | |||
1301 | for (i = 0; i < new_surface_count; i++) { | 1289 | for (i = 0; i < new_surface_count; i++) { |
1302 | updates[i].surface = new_surfaces[i]; | 1290 | updates[i].surface = new_surfaces[i]; |
1303 | updates[i].gamma = | 1291 | updates[i].gamma = |
@@ -1321,13 +1309,13 @@ bool dc_commit_surfaces_to_target( | |||
1321 | updates[i].plane_info = &plane_info[i]; | 1309 | updates[i].plane_info = &plane_info[i]; |
1322 | updates[i].scaling_info = &scaling_info[i]; | 1310 | updates[i].scaling_info = &scaling_info[i]; |
1323 | } | 1311 | } |
1324 | dc_update_surfaces_for_target(dc, updates, new_surface_count, dc_target); | 1312 | dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream); |
1325 | 1313 | ||
1326 | return dc_post_update_surfaces_to_target(dc); | 1314 | return dc_post_update_surfaces_to_stream(dc); |
1327 | } | 1315 | } |
1328 | 1316 | ||
1329 | void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *updates, | 1317 | void dc_update_surfaces_for_stream(struct dc *dc, struct dc_surface_update *updates, |
1330 | int surface_count, struct dc_target *dc_target) | 1318 | int surface_count, const struct dc_stream *dc_stream) |
1331 | { | 1319 | { |
1332 | struct core_dc *core_dc = DC_TO_CORE(dc); | 1320 | struct core_dc *core_dc = DC_TO_CORE(dc); |
1333 | struct validate_context *context = core_dc->temp_flip_context; | 1321 | struct validate_context *context = core_dc->temp_flip_context; |
@@ -1377,21 +1365,21 @@ void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *upda | |||
1377 | can_skip_context_building = false; | 1365 | can_skip_context_building = false; |
1378 | } | 1366 | } |
1379 | 1367 | ||
1380 | if (!can_skip_context_building && dc_target) { | 1368 | if (!can_skip_context_building && dc_stream) { |
1381 | struct core_target *target = DC_TARGET_TO_CORE(dc_target); | 1369 | const struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); |
1382 | 1370 | ||
1383 | if (core_dc->current_context->target_count == 0) | 1371 | if (core_dc->current_context->stream_count == 0) |
1384 | return; | 1372 | return; |
1385 | 1373 | ||
1386 | /* Cannot commit surface to a target that is not commited */ | 1374 | /* Cannot commit surface to a stream that is not commited */ |
1387 | for (i = 0; i < core_dc->current_context->target_count; i++) | 1375 | for (i = 0; i < core_dc->current_context->stream_count; i++) |
1388 | if (target == core_dc->current_context->targets[i]) | 1376 | if (stream == core_dc->current_context->streams[i]) |
1389 | break; | 1377 | break; |
1390 | if (i == core_dc->current_context->target_count) | 1378 | if (i == core_dc->current_context->stream_count) |
1391 | return; | 1379 | return; |
1392 | 1380 | ||
1393 | if (!resource_attach_surfaces_to_context( | 1381 | if (!resource_attach_surfaces_to_context( |
1394 | new_surfaces, surface_count, dc_target, context)) { | 1382 | new_surfaces, surface_count, dc_stream, context)) { |
1395 | BREAK_TO_DEBUGGER(); | 1383 | BREAK_TO_DEBUGGER(); |
1396 | return; | 1384 | return; |
1397 | } | 1385 | } |
@@ -1578,17 +1566,17 @@ void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *upda | |||
1578 | core_dc->current_context = context; | 1566 | core_dc->current_context = context; |
1579 | } | 1567 | } |
1580 | 1568 | ||
1581 | uint8_t dc_get_current_target_count(const struct dc *dc) | 1569 | uint8_t dc_get_current_stream_count(const struct dc *dc) |
1582 | { | 1570 | { |
1583 | struct core_dc *core_dc = DC_TO_CORE(dc); | 1571 | struct core_dc *core_dc = DC_TO_CORE(dc); |
1584 | return core_dc->current_context->target_count; | 1572 | return core_dc->current_context->stream_count; |
1585 | } | 1573 | } |
1586 | 1574 | ||
1587 | struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i) | 1575 | struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i) |
1588 | { | 1576 | { |
1589 | struct core_dc *core_dc = DC_TO_CORE(dc); | 1577 | struct core_dc *core_dc = DC_TO_CORE(dc); |
1590 | if (i < core_dc->current_context->target_count) | 1578 | if (i < core_dc->current_context->stream_count) |
1591 | return &(core_dc->current_context->targets[i]->public); | 1579 | return &(core_dc->current_context->streams[i]->public); |
1592 | return NULL; | 1580 | return NULL; |
1593 | } | 1581 | } |
1594 | 1582 | ||
@@ -1687,8 +1675,8 @@ void dc_set_power_state( | |||
1687 | core_dc->hwss.init_hw(core_dc); | 1675 | core_dc->hwss.init_hw(core_dc); |
1688 | break; | 1676 | break; |
1689 | default: | 1677 | default: |
1690 | /* NULL means "reset/release all DC targets" */ | 1678 | /* NULL means "reset/release all DC streams" */ |
1691 | dc_commit_targets(dc, NULL, 0); | 1679 | dc_commit_streams(dc, NULL, 0); |
1692 | 1680 | ||
1693 | core_dc->hwss.power_down(core_dc); | 1681 | core_dc->hwss.power_down(core_dc); |
1694 | 1682 | ||
@@ -1882,11 +1870,3 @@ void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink | |||
1882 | } | 1870 | } |
1883 | } | 1871 | } |
1884 | 1872 | ||
1885 | const struct dc_stream_status *dc_stream_get_status( | ||
1886 | const struct dc_stream *dc_stream) | ||
1887 | { | ||
1888 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); | ||
1889 | |||
1890 | return &stream->status; | ||
1891 | } | ||
1892 | |||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 4bb6b1d9c970..1f87b948678b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
@@ -591,12 +591,12 @@ enum dc_status resource_build_scaling_params_for_context( | |||
591 | return DC_OK; | 591 | return DC_OK; |
592 | } | 592 | } |
593 | 593 | ||
594 | static void detach_surfaces_for_target( | 594 | static void detach_surfaces_for_stream( |
595 | struct validate_context *context, | 595 | struct validate_context *context, |
596 | const struct dc_target *dc_target) | 596 | const struct dc_stream *dc_stream) |
597 | { | 597 | { |
598 | int i; | 598 | int i; |
599 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]); | 599 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); |
600 | 600 | ||
601 | for (i = 0; i < context->res_ctx.pool->pipe_count; i++) { | 601 | for (i = 0; i < context->res_ctx.pool->pipe_count; i++) { |
602 | struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; | 602 | struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; |
@@ -646,15 +646,15 @@ struct pipe_ctx *resource_get_head_pipe_for_stream( | |||
646 | } | 646 | } |
647 | 647 | ||
648 | /* | 648 | /* |
649 | * A free_pipe for a target is defined here as a pipe with a stream that belongs | 649 | * A free_pipe for a stream is defined here as a pipe |
650 | * to the target but has no surface attached yet | 650 | * that has no surface attached yet |
651 | */ | 651 | */ |
652 | static struct pipe_ctx *acquire_free_pipe_for_target( | 652 | static struct pipe_ctx *acquire_free_pipe_for_stream( |
653 | struct resource_context *res_ctx, | 653 | struct resource_context *res_ctx, |
654 | const struct dc_target *dc_target) | 654 | const struct dc_stream *dc_stream) |
655 | { | 655 | { |
656 | int i; | 656 | int i; |
657 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]); | 657 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); |
658 | 658 | ||
659 | struct pipe_ctx *head_pipe = NULL; | 659 | struct pipe_ctx *head_pipe = NULL; |
660 | 660 | ||
@@ -688,12 +688,12 @@ static struct pipe_ctx *acquire_free_pipe_for_target( | |||
688 | 688 | ||
689 | } | 689 | } |
690 | 690 | ||
691 | static void release_free_pipes_for_target( | 691 | static void release_free_pipes_for_stream( |
692 | struct resource_context *res_ctx, | 692 | struct resource_context *res_ctx, |
693 | const struct dc_target *dc_target) | 693 | const struct dc_stream *dc_stream) |
694 | { | 694 | { |
695 | int i; | 695 | int i; |
696 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]); | 696 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); |
697 | 697 | ||
698 | for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) { | 698 | for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) { |
699 | if (res_ctx->pipe_ctx[i].stream == stream && | 699 | if (res_ctx->pipe_ctx[i].stream == stream && |
@@ -706,12 +706,12 @@ static void release_free_pipes_for_target( | |||
706 | bool resource_attach_surfaces_to_context( | 706 | bool resource_attach_surfaces_to_context( |
707 | const struct dc_surface * const *surfaces, | 707 | const struct dc_surface * const *surfaces, |
708 | int surface_count, | 708 | int surface_count, |
709 | const struct dc_target *dc_target, | 709 | const struct dc_stream *dc_stream, |
710 | struct validate_context *context) | 710 | struct validate_context *context) |
711 | { | 711 | { |
712 | int i; | 712 | int i; |
713 | struct pipe_ctx *tail_pipe; | 713 | struct pipe_ctx *tail_pipe; |
714 | struct dc_target_status *target_status = NULL; | 714 | struct dc_stream_status *stream_status = NULL; |
715 | 715 | ||
716 | 716 | ||
717 | if (surface_count > MAX_SURFACE_NUM) { | 717 | if (surface_count > MAX_SURFACE_NUM) { |
@@ -720,13 +720,13 @@ bool resource_attach_surfaces_to_context( | |||
720 | return false; | 720 | return false; |
721 | } | 721 | } |
722 | 722 | ||
723 | for (i = 0; i < context->target_count; i++) | 723 | for (i = 0; i < context->stream_count; i++) |
724 | if (&context->targets[i]->public == dc_target) { | 724 | if (&context->streams[i]->public == dc_stream) { |
725 | target_status = &context->target_status[i]; | 725 | stream_status = &context->stream_status[i]; |
726 | break; | 726 | break; |
727 | } | 727 | } |
728 | if (target_status == NULL) { | 728 | if (stream_status == NULL) { |
729 | dm_error("Existing target not found; failed to attach surfaces\n"); | 729 | dm_error("Existing stream not found; failed to attach surfaces\n"); |
730 | return false; | 730 | return false; |
731 | } | 731 | } |
732 | 732 | ||
@@ -734,16 +734,16 @@ bool resource_attach_surfaces_to_context( | |||
734 | for (i = 0; i < surface_count; i++) | 734 | for (i = 0; i < surface_count; i++) |
735 | dc_surface_retain(surfaces[i]); | 735 | dc_surface_retain(surfaces[i]); |
736 | 736 | ||
737 | detach_surfaces_for_target(context, dc_target); | 737 | detach_surfaces_for_stream(context, dc_stream); |
738 | 738 | ||
739 | /* release existing surfaces*/ | 739 | /* release existing surfaces*/ |
740 | for (i = 0; i < target_status->surface_count; i++) | 740 | for (i = 0; i < stream_status->surface_count; i++) |
741 | dc_surface_release(target_status->surfaces[i]); | 741 | dc_surface_release(stream_status->surfaces[i]); |
742 | 742 | ||
743 | for (i = surface_count; i < target_status->surface_count; i++) | 743 | for (i = surface_count; i < stream_status->surface_count; i++) |
744 | target_status->surfaces[i] = NULL; | 744 | stream_status->surfaces[i] = NULL; |
745 | 745 | ||
746 | target_status->surface_count = 0; | 746 | stream_status->surface_count = 0; |
747 | 747 | ||
748 | if (surface_count == 0) | 748 | if (surface_count == 0) |
749 | return true; | 749 | return true; |
@@ -751,11 +751,11 @@ bool resource_attach_surfaces_to_context( | |||
751 | tail_pipe = NULL; | 751 | tail_pipe = NULL; |
752 | for (i = 0; i < surface_count; i++) { | 752 | for (i = 0; i < surface_count; i++) { |
753 | struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]); | 753 | struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]); |
754 | struct pipe_ctx *free_pipe = acquire_free_pipe_for_target( | 754 | struct pipe_ctx *free_pipe = acquire_free_pipe_for_stream( |
755 | &context->res_ctx, dc_target); | 755 | &context->res_ctx, dc_stream); |
756 | 756 | ||
757 | if (!free_pipe) { | 757 | if (!free_pipe) { |
758 | target_status->surfaces[i] = NULL; | 758 | stream_status->surfaces[i] = NULL; |
759 | return false; | 759 | return false; |
760 | } | 760 | } |
761 | 761 | ||
@@ -769,13 +769,13 @@ bool resource_attach_surfaces_to_context( | |||
769 | tail_pipe = free_pipe; | 769 | tail_pipe = free_pipe; |
770 | } | 770 | } |
771 | 771 | ||
772 | release_free_pipes_for_target(&context->res_ctx, dc_target); | 772 | release_free_pipes_for_stream(&context->res_ctx, dc_stream); |
773 | 773 | ||
774 | /* assign new surfaces*/ | 774 | /* assign new surfaces*/ |
775 | for (i = 0; i < surface_count; i++) | 775 | for (i = 0; i < surface_count; i++) |
776 | target_status->surfaces[i] = surfaces[i]; | 776 | stream_status->surfaces[i] = surfaces[i]; |
777 | 777 | ||
778 | target_status->surface_count = surface_count; | 778 | stream_status->surface_count = surface_count; |
779 | 779 | ||
780 | return true; | 780 | return true; |
781 | } | 781 | } |
@@ -819,25 +819,14 @@ static bool are_stream_backends_same( | |||
819 | return true; | 819 | return true; |
820 | } | 820 | } |
821 | 821 | ||
822 | bool is_target_unchanged( | 822 | bool is_stream_unchanged( |
823 | const struct core_target *old_target, const struct core_target *target) | 823 | const struct core_stream *old_stream, const struct core_stream *stream) |
824 | { | 824 | { |
825 | int i; | 825 | if (old_stream == stream) |
826 | |||
827 | if (old_target == target) | ||
828 | return true; | 826 | return true; |
829 | if (old_target->public.stream_count != target->public.stream_count) | ||
830 | return false; | ||
831 | |||
832 | for (i = 0; i < old_target->public.stream_count; i++) { | ||
833 | const struct core_stream *old_stream = DC_STREAM_TO_CORE( | ||
834 | old_target->public.streams[i]); | ||
835 | const struct core_stream *stream = DC_STREAM_TO_CORE( | ||
836 | target->public.streams[i]); | ||
837 | 827 | ||
838 | if (!are_stream_backends_same(old_stream, stream)) | 828 | if (!are_stream_backends_same(old_stream, stream)) |
839 | return false; | 829 | return false; |
840 | } | ||
841 | 830 | ||
842 | return true; | 831 | return true; |
843 | } | 832 | } |
@@ -851,23 +840,23 @@ bool resource_validate_attach_surfaces( | |||
851 | int i, j; | 840 | int i, j; |
852 | 841 | ||
853 | for (i = 0; i < set_count; i++) { | 842 | for (i = 0; i < set_count; i++) { |
854 | for (j = 0; j < old_context->target_count; j++) | 843 | for (j = 0; j < old_context->stream_count; j++) |
855 | if (is_target_unchanged( | 844 | if (is_stream_unchanged( |
856 | old_context->targets[j], | 845 | old_context->streams[j], |
857 | context->targets[i])) { | 846 | context->streams[i])) { |
858 | if (!resource_attach_surfaces_to_context( | 847 | if (!resource_attach_surfaces_to_context( |
859 | old_context->target_status[j].surfaces, | 848 | old_context->stream_status[j].surfaces, |
860 | old_context->target_status[j].surface_count, | 849 | old_context->stream_status[j].surface_count, |
861 | &context->targets[i]->public, | 850 | &context->streams[i]->public, |
862 | context)) | 851 | context)) |
863 | return false; | 852 | return false; |
864 | context->target_status[i] = old_context->target_status[j]; | 853 | context->stream_status[i] = old_context->stream_status[j]; |
865 | } | 854 | } |
866 | if (set[i].surface_count != 0) | 855 | if (set[i].surface_count != 0) |
867 | if (!resource_attach_surfaces_to_context( | 856 | if (!resource_attach_surfaces_to_context( |
868 | set[i].surfaces, | 857 | set[i].surfaces, |
869 | set[i].surface_count, | 858 | set[i].surface_count, |
870 | &context->targets[i]->public, | 859 | &context->streams[i]->public, |
871 | context)) | 860 | context)) |
872 | return false; | 861 | return false; |
873 | 862 | ||
@@ -1001,20 +990,15 @@ static void update_stream_signal(struct core_stream *stream) | |||
1001 | } | 990 | } |
1002 | 991 | ||
1003 | bool resource_is_stream_unchanged( | 992 | bool resource_is_stream_unchanged( |
1004 | const struct validate_context *old_context, struct core_stream *stream) | 993 | const struct validate_context *old_context, const struct core_stream *stream) |
1005 | { | 994 | { |
1006 | int i, j; | 995 | int i; |
1007 | |||
1008 | for (i = 0; i < old_context->target_count; i++) { | ||
1009 | struct core_target *old_target = old_context->targets[i]; | ||
1010 | 996 | ||
1011 | for (j = 0; j < old_target->public.stream_count; j++) { | 997 | for (i = 0; i < old_context->stream_count; i++) { |
1012 | struct core_stream *old_stream = | 998 | const struct core_stream *old_stream = old_context->streams[i]; |
1013 | DC_STREAM_TO_CORE(old_target->public.streams[j]); | ||
1014 | 999 | ||
1015 | if (are_stream_backends_same(old_stream, stream)) | 1000 | if (are_stream_backends_same(old_stream, stream)) |
1016 | return true; | 1001 | return true; |
1017 | } | ||
1018 | } | 1002 | } |
1019 | 1003 | ||
1020 | return false; | 1004 | return false; |
@@ -1036,23 +1020,19 @@ static struct core_stream *find_pll_sharable_stream( | |||
1036 | const struct core_stream *stream_needs_pll, | 1020 | const struct core_stream *stream_needs_pll, |
1037 | struct validate_context *context) | 1021 | struct validate_context *context) |
1038 | { | 1022 | { |
1039 | int i, j; | 1023 | int i; |
1040 | 1024 | ||
1041 | for (i = 0; i < context->target_count; i++) { | 1025 | for (i = 0; i < context->stream_count; i++) { |
1042 | struct core_target *target = context->targets[i]; | 1026 | struct core_stream *stream_has_pll = context->streams[i]; |
1043 | 1027 | ||
1044 | for (j = 0; j < target->public.stream_count; j++) { | 1028 | /* We are looking for non dp, non virtual stream */ |
1045 | struct core_stream *stream_has_pll = | 1029 | if (resource_are_streams_timing_synchronizable( |
1046 | DC_STREAM_TO_CORE(target->public.streams[j]); | 1030 | stream_needs_pll, stream_has_pll) |
1031 | && !dc_is_dp_signal(stream_has_pll->signal) | ||
1032 | && stream_has_pll->sink->link->public.connector_signal | ||
1033 | != SIGNAL_TYPE_VIRTUAL) | ||
1034 | return stream_has_pll; | ||
1047 | 1035 | ||
1048 | /* We are looking for non dp, non virtual stream */ | ||
1049 | if (resource_are_streams_timing_synchronizable( | ||
1050 | stream_needs_pll, stream_has_pll) | ||
1051 | && !dc_is_dp_signal(stream_has_pll->signal) | ||
1052 | && stream_has_pll->sink->link->public.connector_signal | ||
1053 | != SIGNAL_TYPE_VIRTUAL) | ||
1054 | return stream_has_pll; | ||
1055 | } | ||
1056 | } | 1036 | } |
1057 | 1037 | ||
1058 | return NULL; | 1038 | return NULL; |
@@ -1091,25 +1071,20 @@ static void calculate_phy_pix_clks( | |||
1091 | const struct core_dc *dc, | 1071 | const struct core_dc *dc, |
1092 | struct validate_context *context) | 1072 | struct validate_context *context) |
1093 | { | 1073 | { |
1094 | int i, j; | 1074 | int i; |
1095 | |||
1096 | for (i = 0; i < context->target_count; i++) { | ||
1097 | struct core_target *target = context->targets[i]; | ||
1098 | 1075 | ||
1099 | for (j = 0; j < target->public.stream_count; j++) { | 1076 | for (i = 0; i < context->stream_count; i++) { |
1100 | struct core_stream *stream = | 1077 | struct core_stream *stream = context->streams[i]; |
1101 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
1102 | 1078 | ||
1103 | update_stream_signal(stream); | 1079 | update_stream_signal(stream); |
1104 | 1080 | ||
1105 | /* update actual pixel clock on all streams */ | 1081 | /* update actual pixel clock on all streams */ |
1106 | if (dc_is_hdmi_signal(stream->signal)) | 1082 | if (dc_is_hdmi_signal(stream->signal)) |
1107 | stream->phy_pix_clk = get_norm_pix_clk( | 1083 | stream->phy_pix_clk = get_norm_pix_clk( |
1108 | &stream->public.timing); | 1084 | &stream->public.timing); |
1109 | else | 1085 | else |
1110 | stream->phy_pix_clk = | 1086 | stream->phy_pix_clk = |
1111 | stream->public.timing.pix_clk_khz; | 1087 | stream->public.timing.pix_clk_khz; |
1112 | } | ||
1113 | } | 1088 | } |
1114 | } | 1089 | } |
1115 | 1090 | ||
@@ -1117,136 +1092,122 @@ enum dc_status resource_map_pool_resources( | |||
1117 | const struct core_dc *dc, | 1092 | const struct core_dc *dc, |
1118 | struct validate_context *context) | 1093 | struct validate_context *context) |
1119 | { | 1094 | { |
1120 | int i, j, k; | 1095 | int i, j; |
1121 | 1096 | ||
1122 | calculate_phy_pix_clks(dc, context); | 1097 | calculate_phy_pix_clks(dc, context); |
1123 | 1098 | ||
1124 | for (i = 0; i < context->target_count; i++) { | 1099 | for (i = 0; i < context->stream_count; i++) { |
1125 | struct core_target *target = context->targets[i]; | 1100 | struct core_stream *stream = context->streams[i]; |
1126 | |||
1127 | for (j = 0; j < target->public.stream_count; j++) { | ||
1128 | struct core_stream *stream = | ||
1129 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
1130 | |||
1131 | if (!resource_is_stream_unchanged(dc->current_context, stream)) | ||
1132 | continue; | ||
1133 | |||
1134 | /* mark resources used for stream that is already active */ | ||
1135 | for (k = 0; k < MAX_PIPES; k++) { | ||
1136 | struct pipe_ctx *pipe_ctx = | ||
1137 | &context->res_ctx.pipe_ctx[k]; | ||
1138 | const struct pipe_ctx *old_pipe_ctx = | ||
1139 | &dc->current_context->res_ctx.pipe_ctx[k]; | ||
1140 | 1101 | ||
1141 | if (!are_stream_backends_same(old_pipe_ctx->stream, stream)) | 1102 | if (!resource_is_stream_unchanged(dc->current_context, stream)) |
1142 | continue; | 1103 | continue; |
1143 | 1104 | ||
1144 | pipe_ctx->stream = stream; | 1105 | /* mark resources used for stream that is already active */ |
1145 | copy_pipe_ctx(old_pipe_ctx, pipe_ctx); | 1106 | for (j = 0; j < MAX_PIPES; j++) { |
1107 | struct pipe_ctx *pipe_ctx = | ||
1108 | &context->res_ctx.pipe_ctx[j]; | ||
1109 | const struct pipe_ctx *old_pipe_ctx = | ||
1110 | &dc->current_context->res_ctx.pipe_ctx[j]; | ||
1146 | 1111 | ||
1147 | /* Split pipe resource, do not acquire back end */ | 1112 | if (!are_stream_backends_same(old_pipe_ctx->stream, stream)) |
1148 | if (!pipe_ctx->stream_enc) | 1113 | continue; |
1149 | continue; | ||
1150 | 1114 | ||
1151 | set_stream_engine_in_use( | 1115 | pipe_ctx->stream = stream; |
1152 | &context->res_ctx, | 1116 | copy_pipe_ctx(old_pipe_ctx, pipe_ctx); |
1153 | pipe_ctx->stream_enc); | ||
1154 | |||
1155 | /* Switch to dp clock source only if there is | ||
1156 | * no non dp stream that shares the same timing | ||
1157 | * with the dp stream. | ||
1158 | */ | ||
1159 | if (dc_is_dp_signal(pipe_ctx->stream->signal) && | ||
1160 | !find_pll_sharable_stream(stream, context)) | ||
1161 | pipe_ctx->clock_source = | ||
1162 | context->res_ctx.pool->dp_clock_source; | ||
1163 | 1117 | ||
1164 | resource_reference_clock_source( | 1118 | /* Split pipe resource, do not acquire back end */ |
1165 | &context->res_ctx, | 1119 | if (!pipe_ctx->stream_enc) |
1166 | pipe_ctx->clock_source); | 1120 | continue; |
1167 | 1121 | ||
1168 | set_audio_in_use(&context->res_ctx, | 1122 | set_stream_engine_in_use( |
1169 | pipe_ctx->audio); | 1123 | &context->res_ctx, |
1170 | } | 1124 | pipe_ctx->stream_enc); |
1125 | |||
1126 | /* Switch to dp clock source only if there is | ||
1127 | * no non dp stream that shares the same timing | ||
1128 | * with the dp stream. | ||
1129 | */ | ||
1130 | if (dc_is_dp_signal(pipe_ctx->stream->signal) && | ||
1131 | !find_pll_sharable_stream(stream, context)) | ||
1132 | pipe_ctx->clock_source = | ||
1133 | context->res_ctx.pool->dp_clock_source; | ||
1134 | |||
1135 | resource_reference_clock_source( | ||
1136 | &context->res_ctx, | ||
1137 | pipe_ctx->clock_source); | ||
1138 | |||
1139 | set_audio_in_use(&context->res_ctx, | ||
1140 | pipe_ctx->audio); | ||
1171 | } | 1141 | } |
1172 | } | 1142 | } |
1173 | 1143 | ||
1174 | for (i = 0; i < context->target_count; i++) { | 1144 | for (i = 0; i < context->stream_count; i++) { |
1175 | struct core_target *target = context->targets[i]; | 1145 | struct core_stream *stream = context->streams[i]; |
1176 | 1146 | struct pipe_ctx *pipe_ctx = NULL; | |
1177 | for (j = 0; j < target->public.stream_count; j++) { | 1147 | int pipe_idx = -1; |
1178 | struct core_stream *stream = | ||
1179 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
1180 | struct pipe_ctx *pipe_ctx = NULL; | ||
1181 | int pipe_idx = -1; | ||
1182 | |||
1183 | if (resource_is_stream_unchanged(dc->current_context, stream)) | ||
1184 | continue; | ||
1185 | /* acquire new resources */ | ||
1186 | pipe_idx = acquire_first_free_pipe( | ||
1187 | &context->res_ctx, stream); | ||
1188 | if (pipe_idx < 0) | ||
1189 | return DC_NO_CONTROLLER_RESOURCE; | ||
1190 | |||
1191 | |||
1192 | pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; | ||
1193 | 1148 | ||
1194 | pipe_ctx->stream_enc = | 1149 | if (resource_is_stream_unchanged(dc->current_context, stream)) |
1195 | find_first_free_match_stream_enc_for_link( | 1150 | continue; |
1196 | &context->res_ctx, stream); | 1151 | /* acquire new resources */ |
1197 | 1152 | pipe_idx = acquire_first_free_pipe(&context->res_ctx, stream); | |
1198 | if (!pipe_ctx->stream_enc) | 1153 | if (pipe_idx < 0) |
1199 | return DC_NO_STREAM_ENG_RESOURCE; | 1154 | return DC_NO_CONTROLLER_RESOURCE; |
1200 | 1155 | ||
1201 | set_stream_engine_in_use( | 1156 | |
1157 | pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; | ||
1158 | |||
1159 | pipe_ctx->stream_enc = | ||
1160 | find_first_free_match_stream_enc_for_link( | ||
1161 | &context->res_ctx, stream); | ||
1162 | |||
1163 | if (!pipe_ctx->stream_enc) | ||
1164 | return DC_NO_STREAM_ENG_RESOURCE; | ||
1165 | |||
1166 | set_stream_engine_in_use( | ||
1167 | &context->res_ctx, | ||
1168 | pipe_ctx->stream_enc); | ||
1169 | |||
1170 | /* TODO: Add check if ASIC support and EDID audio */ | ||
1171 | if (!stream->sink->converter_disable_audio && | ||
1172 | dc_is_audio_capable_signal(pipe_ctx->stream->signal) && | ||
1173 | stream->public.audio_info.mode_count) { | ||
1174 | pipe_ctx->audio = find_first_free_audio( | ||
1175 | &context->res_ctx); | ||
1176 | |||
1177 | /* | ||
1178 | * Audio assigned in order first come first get. | ||
1179 | * There are asics which has number of audio | ||
1180 | * resources less then number of pipes | ||
1181 | */ | ||
1182 | if (pipe_ctx->audio) | ||
1183 | set_audio_in_use( | ||
1202 | &context->res_ctx, | 1184 | &context->res_ctx, |
1203 | pipe_ctx->stream_enc); | 1185 | pipe_ctx->audio); |
1204 | |||
1205 | /* TODO: Add check if ASIC support and EDID audio */ | ||
1206 | if (!stream->sink->converter_disable_audio && | ||
1207 | dc_is_audio_capable_signal(pipe_ctx->stream->signal) && | ||
1208 | stream->public.audio_info.mode_count) { | ||
1209 | pipe_ctx->audio = find_first_free_audio( | ||
1210 | &context->res_ctx); | ||
1211 | |||
1212 | /* | ||
1213 | * Audio assigned in order first come first get. | ||
1214 | * There are asics which has number of audio | ||
1215 | * resources less then number of pipes | ||
1216 | */ | ||
1217 | if (pipe_ctx->audio) | ||
1218 | set_audio_in_use( | ||
1219 | &context->res_ctx, | ||
1220 | pipe_ctx->audio); | ||
1221 | } | ||
1222 | |||
1223 | if (j == 0) { | ||
1224 | context->target_status[i].primary_otg_inst = | ||
1225 | pipe_ctx->tg->inst; | ||
1226 | } | ||
1227 | } | 1186 | } |
1187 | |||
1188 | context->stream_status[i].primary_otg_inst = pipe_ctx->tg->inst; | ||
1228 | } | 1189 | } |
1229 | 1190 | ||
1230 | return DC_OK; | 1191 | return DC_OK; |
1231 | } | 1192 | } |
1232 | 1193 | ||
1233 | /* first target in the context is used to populate the rest */ | 1194 | /* first stream in the context is used to populate the rest */ |
1234 | void validate_guaranteed_copy_target( | 1195 | void validate_guaranteed_copy_streams( |
1235 | struct validate_context *context, | 1196 | struct validate_context *context, |
1236 | int max_targets) | 1197 | int max_streams) |
1237 | { | 1198 | { |
1238 | int i; | 1199 | int i; |
1239 | 1200 | ||
1240 | for (i = 1; i < max_targets; i++) { | 1201 | for (i = 1; i < max_streams; i++) { |
1241 | context->targets[i] = context->targets[0]; | 1202 | context->streams[i] = context->streams[0]; |
1242 | 1203 | ||
1243 | copy_pipe_ctx(&context->res_ctx.pipe_ctx[0], | 1204 | copy_pipe_ctx(&context->res_ctx.pipe_ctx[0], |
1244 | &context->res_ctx.pipe_ctx[i]); | 1205 | &context->res_ctx.pipe_ctx[i]); |
1245 | context->res_ctx.pipe_ctx[i].stream = | 1206 | context->res_ctx.pipe_ctx[i].stream = |
1246 | context->res_ctx.pipe_ctx[0].stream; | 1207 | context->res_ctx.pipe_ctx[0].stream; |
1247 | 1208 | ||
1248 | dc_target_retain(&context->targets[i]->public); | 1209 | dc_stream_retain(&context->streams[i]->public); |
1249 | context->target_count++; | 1210 | context->stream_count++; |
1250 | } | 1211 | } |
1251 | } | 1212 | } |
1252 | 1213 | ||
@@ -1875,18 +1836,19 @@ void resource_validate_ctx_destruct(struct validate_context *context) | |||
1875 | { | 1836 | { |
1876 | int i, j; | 1837 | int i, j; |
1877 | 1838 | ||
1878 | for (i = 0; i < context->target_count; i++) { | 1839 | for (i = 0; i < context->stream_count; i++) { |
1879 | for (j = 0; j < context->target_status[i].surface_count; j++) | 1840 | for (j = 0; j < context->stream_status[i].surface_count; j++) |
1880 | dc_surface_release( | 1841 | dc_surface_release( |
1881 | context->target_status[i].surfaces[j]); | 1842 | context->stream_status[i].surfaces[j]); |
1882 | 1843 | ||
1883 | context->target_status[i].surface_count = 0; | 1844 | context->stream_status[i].surface_count = 0; |
1884 | dc_target_release(&context->targets[i]->public); | 1845 | dc_stream_release(&context->streams[i]->public); |
1846 | context->streams[i] = NULL; | ||
1885 | } | 1847 | } |
1886 | } | 1848 | } |
1887 | 1849 | ||
1888 | /* | 1850 | /* |
1889 | * Copy src_ctx into dst_ctx and retain all surfaces and targets referenced | 1851 | * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced |
1890 | * by the src_ctx | 1852 | * by the src_ctx |
1891 | */ | 1853 | */ |
1892 | void resource_validate_ctx_copy_construct( | 1854 | void resource_validate_ctx_copy_construct( |
@@ -1908,11 +1870,11 @@ void resource_validate_ctx_copy_construct( | |||
1908 | 1870 | ||
1909 | } | 1871 | } |
1910 | 1872 | ||
1911 | for (i = 0; i < dst_ctx->target_count; i++) { | 1873 | for (i = 0; i < dst_ctx->stream_count; i++) { |
1912 | dc_target_retain(&dst_ctx->targets[i]->public); | 1874 | dc_stream_retain(&dst_ctx->streams[i]->public); |
1913 | for (j = 0; j < dst_ctx->target_status[i].surface_count; j++) | 1875 | for (j = 0; j < dst_ctx->stream_status[i].surface_count; j++) |
1914 | dc_surface_retain( | 1876 | dc_surface_retain( |
1915 | dst_ctx->target_status[i].surfaces[j]); | 1877 | dst_ctx->stream_status[i].surfaces[j]); |
1916 | } | 1878 | } |
1917 | } | 1879 | } |
1918 | 1880 | ||
@@ -1968,53 +1930,48 @@ enum dc_status resource_map_clock_resources( | |||
1968 | const struct core_dc *dc, | 1930 | const struct core_dc *dc, |
1969 | struct validate_context *context) | 1931 | struct validate_context *context) |
1970 | { | 1932 | { |
1971 | int i, j, k; | 1933 | int i, j; |
1972 | 1934 | ||
1973 | /* acquire new resources */ | 1935 | /* acquire new resources */ |
1974 | for (i = 0; i < context->target_count; i++) { | 1936 | for (i = 0; i < context->stream_count; i++) { |
1975 | struct core_target *target = context->targets[i]; | 1937 | const struct core_stream *stream = context->streams[i]; |
1976 | 1938 | ||
1977 | for (j = 0; j < target->public.stream_count; j++) { | 1939 | if (resource_is_stream_unchanged(dc->current_context, stream)) |
1978 | struct core_stream *stream = | 1940 | continue; |
1979 | DC_STREAM_TO_CORE(target->public.streams[j]); | 1941 | |
1942 | for (j = 0; j < MAX_PIPES; j++) { | ||
1943 | struct pipe_ctx *pipe_ctx = | ||
1944 | &context->res_ctx.pipe_ctx[j]; | ||
1980 | 1945 | ||
1981 | if (resource_is_stream_unchanged(dc->current_context, stream)) | 1946 | if (context->res_ctx.pipe_ctx[j].stream != stream) |
1982 | continue; | 1947 | continue; |
1983 | 1948 | ||
1984 | for (k = 0; k < MAX_PIPES; k++) { | 1949 | if (dc_is_dp_signal(pipe_ctx->stream->signal) |
1985 | struct pipe_ctx *pipe_ctx = | 1950 | || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL) |
1986 | &context->res_ctx.pipe_ctx[k]; | 1951 | pipe_ctx->clock_source = |
1952 | context->res_ctx.pool->dp_clock_source; | ||
1953 | else { | ||
1954 | pipe_ctx->clock_source = NULL; | ||
1987 | 1955 | ||
1988 | if (context->res_ctx.pipe_ctx[k].stream != stream) | 1956 | if (!dc->public.config.disable_disp_pll_sharing) |
1989 | continue; | 1957 | resource_find_used_clk_src_for_sharing( |
1958 | &context->res_ctx, | ||
1959 | pipe_ctx); | ||
1990 | 1960 | ||
1991 | if (dc_is_dp_signal(pipe_ctx->stream->signal) | 1961 | if (pipe_ctx->clock_source == NULL) |
1992 | || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL) | ||
1993 | pipe_ctx->clock_source = | 1962 | pipe_ctx->clock_source = |
1994 | context->res_ctx.pool->dp_clock_source; | 1963 | dc_resource_find_first_free_pll(&context->res_ctx); |
1995 | else { | 1964 | } |
1996 | pipe_ctx->clock_source = NULL; | ||
1997 | |||
1998 | if (!dc->public.config.disable_disp_pll_sharing) | ||
1999 | resource_find_used_clk_src_for_sharing( | ||
2000 | &context->res_ctx, | ||
2001 | pipe_ctx); | ||
2002 | |||
2003 | if (pipe_ctx->clock_source == NULL) | ||
2004 | pipe_ctx->clock_source = | ||
2005 | dc_resource_find_first_free_pll(&context->res_ctx); | ||
2006 | } | ||
2007 | 1965 | ||
2008 | if (pipe_ctx->clock_source == NULL) | 1966 | if (pipe_ctx->clock_source == NULL) |
2009 | return DC_NO_CLOCK_SOURCE_RESOURCE; | 1967 | return DC_NO_CLOCK_SOURCE_RESOURCE; |
2010 | 1968 | ||
2011 | resource_reference_clock_source( | 1969 | resource_reference_clock_source( |
2012 | &context->res_ctx, | 1970 | &context->res_ctx, |
2013 | pipe_ctx->clock_source); | 1971 | pipe_ctx->clock_source); |
2014 | 1972 | ||
2015 | /* only one cs per stream regardless of mpo */ | 1973 | /* only one cs per stream regardless of mpo */ |
2016 | break; | 1974 | break; |
2017 | } | ||
2018 | } | 1975 | } |
2019 | } | 1976 | } |
2020 | 1977 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index cda67a78dbfd..bc1f387d1992 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include "dc.h" | 27 | #include "dc.h" |
28 | #include "core_types.h" | 28 | #include "core_types.h" |
29 | #include "resource.h" | 29 | #include "resource.h" |
30 | #include "ipp.h" | ||
31 | #include "timing_generator.h" | ||
30 | 32 | ||
31 | /******************************************************************************* | 33 | /******************************************************************************* |
32 | * Private definitions | 34 | * Private definitions |
@@ -146,3 +148,184 @@ construct_fail: | |||
146 | alloc_fail: | 148 | alloc_fail: |
147 | return NULL; | 149 | return NULL; |
148 | } | 150 | } |
151 | |||
152 | const struct dc_stream_status *dc_stream_get_status( | ||
153 | const struct dc_stream *dc_stream) | ||
154 | { | ||
155 | uint8_t i; | ||
156 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); | ||
157 | struct core_dc *dc = DC_TO_CORE(stream->ctx->dc); | ||
158 | |||
159 | for (i = 0; i < dc->current_context->stream_count; i++) | ||
160 | if (stream == dc->current_context->streams[i]) | ||
161 | return &dc->current_context->stream_status[i]; | ||
162 | |||
163 | return NULL; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * Update the cursor attributes and set cursor surface address | ||
168 | */ | ||
169 | bool dc_stream_set_cursor_attributes( | ||
170 | const struct dc_stream *dc_stream, | ||
171 | const struct dc_cursor_attributes *attributes) | ||
172 | { | ||
173 | int i; | ||
174 | struct core_stream *stream; | ||
175 | struct core_dc *core_dc; | ||
176 | struct resource_context *res_ctx; | ||
177 | bool ret = false; | ||
178 | |||
179 | if (NULL == dc_stream) { | ||
180 | dm_error("DC: dc_stream is NULL!\n"); | ||
181 | return false; | ||
182 | } | ||
183 | if (NULL == attributes) { | ||
184 | dm_error("DC: attributes is NULL!\n"); | ||
185 | return false; | ||
186 | } | ||
187 | |||
188 | stream = DC_STREAM_TO_CORE(dc_stream); | ||
189 | core_dc = DC_TO_CORE(stream->ctx->dc); | ||
190 | res_ctx = &core_dc->current_context->res_ctx; | ||
191 | |||
192 | for (i = 0; i < MAX_PIPES; i++) { | ||
193 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | ||
194 | |||
195 | if (pipe_ctx->stream == stream) { | ||
196 | struct input_pixel_processor *ipp = pipe_ctx->ipp; | ||
197 | |||
198 | if (ipp->funcs->ipp_cursor_set_attributes( | ||
199 | ipp, attributes)) | ||
200 | ret = true; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | bool dc_stream_set_cursor_position( | ||
208 | const struct dc_stream *dc_stream, | ||
209 | const struct dc_cursor_position *position) | ||
210 | { | ||
211 | int i; | ||
212 | struct core_stream *stream; | ||
213 | struct core_dc *core_dc; | ||
214 | struct resource_context *res_ctx; | ||
215 | bool ret = false; | ||
216 | |||
217 | if (NULL == dc_stream) { | ||
218 | dm_error("DC: dc_stream is NULL!\n"); | ||
219 | return false; | ||
220 | } | ||
221 | |||
222 | if (NULL == position) { | ||
223 | dm_error("DC: cursor position is NULL!\n"); | ||
224 | return false; | ||
225 | } | ||
226 | |||
227 | stream = DC_STREAM_TO_CORE(dc_stream); | ||
228 | core_dc = DC_TO_CORE(stream->ctx->dc); | ||
229 | res_ctx = &core_dc->current_context->res_ctx; | ||
230 | |||
231 | for (i = 0; i < MAX_PIPES; i++) { | ||
232 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | ||
233 | |||
234 | if (pipe_ctx->stream == stream) { | ||
235 | struct input_pixel_processor *ipp = pipe_ctx->ipp; | ||
236 | struct dc_cursor_mi_param param = { | ||
237 | .pixel_clk_khz = dc_stream->timing.pix_clk_khz, | ||
238 | .ref_clk_khz = 48000,/*todo refclk*/ | ||
239 | .viewport_x_start = pipe_ctx->scl_data.viewport.x, | ||
240 | .viewport_width = pipe_ctx->scl_data.viewport.width, | ||
241 | .h_scale_ratio = pipe_ctx->scl_data.ratios.horz, | ||
242 | }; | ||
243 | |||
244 | ipp->funcs->ipp_cursor_set_position(ipp, position, ¶m); | ||
245 | ret = true; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | uint32_t dc_stream_get_vblank_counter(const struct dc_stream *dc_stream) | ||
253 | { | ||
254 | uint8_t i; | ||
255 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); | ||
256 | struct core_dc *core_dc = DC_TO_CORE(stream->ctx->dc); | ||
257 | struct resource_context *res_ctx = | ||
258 | &core_dc->current_context->res_ctx; | ||
259 | |||
260 | for (i = 0; i < MAX_PIPES; i++) { | ||
261 | struct timing_generator *tg = res_ctx->pipe_ctx[i].tg; | ||
262 | |||
263 | if (res_ctx->pipe_ctx[i].stream != stream) | ||
264 | continue; | ||
265 | |||
266 | return tg->funcs->get_frame_count(tg); | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | uint32_t dc_stream_get_scanoutpos( | ||
273 | const struct dc_stream *dc_stream, | ||
274 | uint32_t *vbl, | ||
275 | uint32_t *position) | ||
276 | { | ||
277 | uint8_t i; | ||
278 | struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream); | ||
279 | struct core_dc *core_dc = DC_TO_CORE(stream->ctx->dc); | ||
280 | struct resource_context *res_ctx = | ||
281 | &core_dc->current_context->res_ctx; | ||
282 | |||
283 | for (i = 0; i < MAX_PIPES; i++) { | ||
284 | struct timing_generator *tg = res_ctx->pipe_ctx[i].tg; | ||
285 | |||
286 | if (res_ctx->pipe_ctx[i].stream != stream) | ||
287 | continue; | ||
288 | |||
289 | return tg->funcs->get_scanoutpos(tg, vbl, position); | ||
290 | } | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | |||
296 | void dc_stream_log( | ||
297 | const struct dc_stream *stream, | ||
298 | struct dal_logger *dm_logger, | ||
299 | enum dc_log_type log_type) | ||
300 | { | ||
301 | const struct core_stream *core_stream = | ||
302 | DC_STREAM_TO_CORE(stream); | ||
303 | |||
304 | dm_logger_write(dm_logger, | ||
305 | log_type, | ||
306 | "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d;\n", | ||
307 | core_stream, | ||
308 | core_stream->public.src.x, | ||
309 | core_stream->public.src.y, | ||
310 | core_stream->public.src.width, | ||
311 | core_stream->public.src.height, | ||
312 | core_stream->public.dst.x, | ||
313 | core_stream->public.dst.y, | ||
314 | core_stream->public.dst.width, | ||
315 | core_stream->public.dst.height); | ||
316 | dm_logger_write(dm_logger, | ||
317 | log_type, | ||
318 | "\tpix_clk_khz: %d, h_total: %d, v_total: %d\n", | ||
319 | core_stream->public.timing.pix_clk_khz, | ||
320 | core_stream->public.timing.h_total, | ||
321 | core_stream->public.timing.v_total); | ||
322 | dm_logger_write(dm_logger, | ||
323 | log_type, | ||
324 | "\tsink name: %s, serial: %d\n", | ||
325 | core_stream->sink->public.edid_caps.display_name, | ||
326 | core_stream->sink->public.edid_caps.serial_number); | ||
327 | dm_logger_write(dm_logger, | ||
328 | log_type, | ||
329 | "\tlink: %d\n", | ||
330 | core_stream->sink->link->public.link_index); | ||
331 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_target.c b/drivers/gpu/drm/amd/display/dc/core/dc_target.c deleted file mode 100644 index 2d25b00b4bff..000000000000 --- a/drivers/gpu/drm/amd/display/dc/core/dc_target.c +++ /dev/null | |||
@@ -1,333 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2012-15 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "dm_services.h" | ||
27 | #include "core_types.h" | ||
28 | #include "hw_sequencer.h" | ||
29 | #include "resource.h" | ||
30 | #include "ipp.h" | ||
31 | #include "timing_generator.h" | ||
32 | |||
33 | struct target { | ||
34 | struct core_target protected; | ||
35 | int ref_count; | ||
36 | }; | ||
37 | |||
38 | #define DC_TARGET_TO_TARGET(dc_target) \ | ||
39 | container_of(dc_target, struct target, protected.public) | ||
40 | #define CORE_TARGET_TO_TARGET(core_target) \ | ||
41 | container_of(core_target, struct target, protected) | ||
42 | |||
43 | static void construct( | ||
44 | struct core_target *target, | ||
45 | struct dc_context *ctx, | ||
46 | struct dc_stream *dc_streams[], | ||
47 | uint8_t stream_count) | ||
48 | { | ||
49 | uint8_t i; | ||
50 | for (i = 0; i < stream_count; i++) { | ||
51 | target->public.streams[i] = dc_streams[i]; | ||
52 | dc_stream_retain(dc_streams[i]); | ||
53 | } | ||
54 | |||
55 | target->ctx = ctx; | ||
56 | target->public.stream_count = stream_count; | ||
57 | } | ||
58 | |||
59 | static void destruct(struct core_target *core_target) | ||
60 | { | ||
61 | int i; | ||
62 | |||
63 | for (i = 0; i < core_target->public.stream_count; i++) { | ||
64 | dc_stream_release( | ||
65 | (struct dc_stream *)core_target->public.streams[i]); | ||
66 | core_target->public.streams[i] = NULL; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | void dc_target_retain(const struct dc_target *dc_target) | ||
71 | { | ||
72 | struct target *target = DC_TARGET_TO_TARGET(dc_target); | ||
73 | |||
74 | ASSERT(target->ref_count > 0); | ||
75 | target->ref_count++; | ||
76 | } | ||
77 | |||
78 | void dc_target_release(const struct dc_target *dc_target) | ||
79 | { | ||
80 | struct target *target = DC_TARGET_TO_TARGET(dc_target); | ||
81 | struct core_target *protected = DC_TARGET_TO_CORE(dc_target); | ||
82 | |||
83 | ASSERT(target->ref_count > 0); | ||
84 | target->ref_count--; | ||
85 | |||
86 | if (target->ref_count == 0) { | ||
87 | destruct(protected); | ||
88 | dm_free(target); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | const struct dc_target_status *dc_target_get_status( | ||
93 | const struct dc_target* dc_target) | ||
94 | { | ||
95 | uint8_t i; | ||
96 | struct core_target* target = DC_TARGET_TO_CORE(dc_target); | ||
97 | struct core_dc *dc = DC_TO_CORE(target->ctx->dc); | ||
98 | |||
99 | for (i = 0; i < dc->current_context->target_count; i++) | ||
100 | if (target == dc->current_context->targets[i]) | ||
101 | return &dc->current_context->target_status[i]; | ||
102 | |||
103 | return NULL; | ||
104 | } | ||
105 | |||
106 | struct dc_target *dc_create_target_for_streams( | ||
107 | struct dc_stream *dc_streams[], | ||
108 | uint8_t stream_count) | ||
109 | { | ||
110 | struct core_stream *stream; | ||
111 | struct target *target; | ||
112 | |||
113 | if (0 == stream_count) | ||
114 | goto target_alloc_fail; | ||
115 | |||
116 | stream = DC_STREAM_TO_CORE(dc_streams[0]); | ||
117 | |||
118 | target = dm_alloc(sizeof(struct target)); | ||
119 | |||
120 | if (NULL == target) | ||
121 | goto target_alloc_fail; | ||
122 | |||
123 | construct(&target->protected, stream->ctx, dc_streams, stream_count); | ||
124 | |||
125 | target->ref_count++; | ||
126 | |||
127 | return &target->protected.public; | ||
128 | |||
129 | target_alloc_fail: | ||
130 | return NULL; | ||
131 | } | ||
132 | |||
133 | bool dc_target_is_connected_to_sink( | ||
134 | const struct dc_target * dc_target, | ||
135 | const struct dc_sink *dc_sink) | ||
136 | { | ||
137 | struct core_target *target = DC_TARGET_TO_CORE(dc_target); | ||
138 | uint8_t i; | ||
139 | for (i = 0; i < target->public.stream_count; i++) { | ||
140 | if (target->public.streams[i]->sink == dc_sink) | ||
141 | return true; | ||
142 | } | ||
143 | return false; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * Update the cursor attributes and set cursor surface address | ||
148 | */ | ||
149 | bool dc_target_set_cursor_attributes( | ||
150 | struct dc_target *dc_target, | ||
151 | const struct dc_cursor_attributes *attributes) | ||
152 | { | ||
153 | int i, j; | ||
154 | struct core_target *target; | ||
155 | struct core_dc *core_dc; | ||
156 | struct resource_context *res_ctx; | ||
157 | bool ret = false; | ||
158 | |||
159 | if (NULL == dc_target) { | ||
160 | dm_error("DC: dc_target is NULL!\n"); | ||
161 | return false; | ||
162 | } | ||
163 | if (NULL == attributes) { | ||
164 | dm_error("DC: attributes is NULL!\n"); | ||
165 | return false; | ||
166 | } | ||
167 | |||
168 | target = DC_TARGET_TO_CORE(dc_target); | ||
169 | core_dc = DC_TO_CORE(target->ctx->dc); | ||
170 | res_ctx = &core_dc->current_context->res_ctx; | ||
171 | |||
172 | for (i = 0; i < dc_target->stream_count; i++) { | ||
173 | const struct dc_stream *stream = dc_target->streams[i]; | ||
174 | |||
175 | for (j = 0; j < MAX_PIPES; j++) { | ||
176 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[j]; | ||
177 | |||
178 | if (&pipe_ctx->stream->public == stream) { | ||
179 | struct input_pixel_processor *ipp = pipe_ctx->ipp; | ||
180 | |||
181 | if (ipp->funcs->ipp_cursor_set_attributes( | ||
182 | ipp, attributes)) | ||
183 | ret = true; | ||
184 | } | ||
185 | } | ||
186 | } | ||
187 | |||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | bool dc_target_set_cursor_position( | ||
192 | struct dc_target *dc_target, | ||
193 | const struct dc_cursor_position *position) | ||
194 | { | ||
195 | int i, j; | ||
196 | struct core_target *target = DC_TARGET_TO_CORE(dc_target); | ||
197 | struct core_dc *core_dc = DC_TO_CORE(target->ctx->dc); | ||
198 | struct resource_context *res_ctx = &core_dc->current_context->res_ctx; | ||
199 | bool ret = false; | ||
200 | |||
201 | if (NULL == dc_target) { | ||
202 | dm_error("DC: dc_target is NULL!\n"); | ||
203 | return false; | ||
204 | } | ||
205 | |||
206 | if (NULL == position) { | ||
207 | dm_error("DC: cursor position is NULL!\n"); | ||
208 | return false; | ||
209 | } | ||
210 | |||
211 | for (i = 0; i < dc_target->stream_count; i++) { | ||
212 | const struct dc_stream *stream = dc_target->streams[i]; | ||
213 | |||
214 | for (j = 0; j < MAX_PIPES; j++) { | ||
215 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[j]; | ||
216 | |||
217 | if (&pipe_ctx->stream->public == stream) { | ||
218 | struct input_pixel_processor *ipp = pipe_ctx->ipp; | ||
219 | struct dc_cursor_mi_param param = { | ||
220 | .pixel_clk_khz = stream->timing.pix_clk_khz, | ||
221 | .ref_clk_khz = 48000,/*todo refclk*/ | ||
222 | .viewport_x_start = pipe_ctx->scl_data.viewport.x, | ||
223 | .viewport_width = pipe_ctx->scl_data.viewport.width, | ||
224 | .h_scale_ratio = pipe_ctx->scl_data.ratios.horz, | ||
225 | }; | ||
226 | |||
227 | ipp->funcs->ipp_cursor_set_position(ipp, position, ¶m); | ||
228 | ret = true; | ||
229 | } | ||
230 | } | ||
231 | } | ||
232 | |||
233 | return ret; | ||
234 | } | ||
235 | |||
236 | uint32_t dc_target_get_vblank_counter(const struct dc_target *dc_target) | ||
237 | { | ||
238 | uint8_t i, j; | ||
239 | struct core_target *target = DC_TARGET_TO_CORE(dc_target); | ||
240 | struct core_dc *core_dc = DC_TO_CORE(target->ctx->dc); | ||
241 | struct resource_context *res_ctx = | ||
242 | &core_dc->current_context->res_ctx; | ||
243 | |||
244 | for (i = 0; i < target->public.stream_count; i++) { | ||
245 | for (j = 0; j < MAX_PIPES; j++) { | ||
246 | struct timing_generator *tg = res_ctx->pipe_ctx[j].tg; | ||
247 | |||
248 | if (res_ctx->pipe_ctx[j].stream != | ||
249 | DC_STREAM_TO_CORE(target->public.streams[i])) | ||
250 | continue; | ||
251 | |||
252 | return tg->funcs->get_frame_count(tg); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | uint32_t dc_target_get_scanoutpos( | ||
260 | const struct dc_target *dc_target, | ||
261 | uint32_t *vbl, | ||
262 | uint32_t *position) | ||
263 | { | ||
264 | uint8_t i, j; | ||
265 | struct core_target *target = DC_TARGET_TO_CORE(dc_target); | ||
266 | struct core_dc *core_dc = DC_TO_CORE(target->ctx->dc); | ||
267 | struct resource_context *res_ctx = | ||
268 | &core_dc->current_context->res_ctx; | ||
269 | |||
270 | for (i = 0; i < target->public.stream_count; i++) { | ||
271 | for (j = 0; j < MAX_PIPES; j++) { | ||
272 | struct timing_generator *tg = res_ctx->pipe_ctx[j].tg; | ||
273 | |||
274 | if (res_ctx->pipe_ctx[j].stream != | ||
275 | DC_STREAM_TO_CORE(target->public.streams[i])) | ||
276 | continue; | ||
277 | |||
278 | return tg->funcs->get_scanoutpos(tg, vbl, position); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | void dc_target_log( | ||
286 | const struct dc_target *dc_target, | ||
287 | struct dal_logger *dm_logger, | ||
288 | enum dc_log_type log_type) | ||
289 | { | ||
290 | int i; | ||
291 | |||
292 | const struct core_target *core_target = | ||
293 | CONST_DC_TARGET_TO_CORE(dc_target); | ||
294 | |||
295 | dm_logger_write(dm_logger, | ||
296 | log_type, | ||
297 | "core_target 0x%x: stream_count=%d\n", | ||
298 | core_target, | ||
299 | core_target->public.stream_count); | ||
300 | |||
301 | for (i = 0; i < core_target->public.stream_count; i++) { | ||
302 | const struct core_stream *core_stream = | ||
303 | DC_STREAM_TO_CORE(core_target->public.streams[i]); | ||
304 | |||
305 | dm_logger_write(dm_logger, | ||
306 | log_type, | ||
307 | "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d;\n", | ||
308 | core_stream, | ||
309 | core_stream->public.src.x, | ||
310 | core_stream->public.src.y, | ||
311 | core_stream->public.src.width, | ||
312 | core_stream->public.src.height, | ||
313 | core_stream->public.dst.x, | ||
314 | core_stream->public.dst.y, | ||
315 | core_stream->public.dst.width, | ||
316 | core_stream->public.dst.height); | ||
317 | dm_logger_write(dm_logger, | ||
318 | log_type, | ||
319 | "\tpix_clk_khz: %d, h_total: %d, v_total: %d\n", | ||
320 | core_stream->public.timing.pix_clk_khz, | ||
321 | core_stream->public.timing.h_total, | ||
322 | core_stream->public.timing.v_total); | ||
323 | dm_logger_write(dm_logger, | ||
324 | log_type, | ||
325 | "\tsink name: %s, serial: %d\n", | ||
326 | core_stream->sink->public.edid_caps.display_name, | ||
327 | core_stream->sink->public.edid_caps.serial_number); | ||
328 | dm_logger_write(dm_logger, | ||
329 | log_type, | ||
330 | "\tlink: %d\n", | ||
331 | core_stream->sink->link->public.link_index); | ||
332 | } | ||
333 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e8cb7a4dee80..b814e7b76bbc 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
@@ -32,8 +32,8 @@ | |||
32 | #include "gpio_types.h" | 32 | #include "gpio_types.h" |
33 | #include "link_service_types.h" | 33 | #include "link_service_types.h" |
34 | 34 | ||
35 | #define MAX_TARGETS 6 | ||
36 | #define MAX_SURFACES 3 | 35 | #define MAX_SURFACES 3 |
36 | #define MAX_STREAMS 6 | ||
37 | #define MAX_SINKS_PER_LINK 4 | 37 | #define MAX_SINKS_PER_LINK 4 |
38 | 38 | ||
39 | /******************************************************************************* | 39 | /******************************************************************************* |
@@ -41,7 +41,7 @@ | |||
41 | ******************************************************************************/ | 41 | ******************************************************************************/ |
42 | 42 | ||
43 | struct dc_caps { | 43 | struct dc_caps { |
44 | uint32_t max_targets; | 44 | uint32_t max_streams; |
45 | uint32_t max_links; | 45 | uint32_t max_links; |
46 | uint32_t max_audios; | 46 | uint32_t max_audios; |
47 | uint32_t max_slave_planes; | 47 | uint32_t max_slave_planes; |
@@ -139,7 +139,6 @@ struct dc_config { | |||
139 | struct dc_debug { | 139 | struct dc_debug { |
140 | bool surface_visual_confirm; | 140 | bool surface_visual_confirm; |
141 | bool max_disp_clk; | 141 | bool max_disp_clk; |
142 | bool target_trace; | ||
143 | bool surface_trace; | 142 | bool surface_trace; |
144 | bool timing_trace; | 143 | bool timing_trace; |
145 | bool validation_trace; | 144 | bool validation_trace; |
@@ -351,95 +350,91 @@ void dc_flip_surface_addrs(struct dc *dc, | |||
351 | uint32_t count); | 350 | uint32_t count); |
352 | 351 | ||
353 | /* | 352 | /* |
354 | * Set up surface attributes and associate to a target | 353 | * Set up surface attributes and associate to a stream |
355 | * The surfaces parameter is an absolute set of all surface active for the target. | 354 | * The surfaces parameter is an absolute set of all surface active for the stream. |
356 | * If no surfaces are provided, the target will be blanked; no memory read. | 355 | * If no surfaces are provided, the stream will be blanked; no memory read. |
357 | * Any flip related attribute changes must be done through this interface. | 356 | * Any flip related attribute changes must be done through this interface. |
358 | * | 357 | * |
359 | * After this call: | 358 | * After this call: |
360 | * Surfaces attributes are programmed and configured to be composed into target. | 359 | * Surfaces attributes are programmed and configured to be composed into stream. |
361 | * This does not trigger a flip. No surface address is programmed. | 360 | * This does not trigger a flip. No surface address is programmed. |
362 | */ | 361 | */ |
363 | 362 | ||
364 | bool dc_commit_surfaces_to_target( | 363 | bool dc_commit_surfaces_to_stream( |
365 | struct dc *dc, | 364 | struct dc *dc, |
366 | const struct dc_surface **dc_surfaces, | 365 | const struct dc_surface **dc_surfaces, |
367 | uint8_t surface_count, | 366 | uint8_t surface_count, |
368 | struct dc_target *dc_target); | 367 | const struct dc_stream *stream); |
369 | 368 | ||
370 | bool dc_pre_update_surfaces_to_target( | 369 | bool dc_pre_update_surfaces_to_stream( |
371 | struct dc *dc, | 370 | struct dc *dc, |
372 | const struct dc_surface *const *new_surfaces, | 371 | const struct dc_surface *const *new_surfaces, |
373 | uint8_t new_surface_count, | 372 | uint8_t new_surface_count, |
374 | struct dc_target *dc_target); | 373 | const struct dc_stream *stream); |
375 | 374 | ||
376 | bool dc_post_update_surfaces_to_target( | 375 | bool dc_post_update_surfaces_to_stream( |
377 | struct dc *dc); | 376 | struct dc *dc); |
378 | 377 | ||
379 | void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *updates, | 378 | void dc_update_surfaces_for_stream(struct dc *dc, struct dc_surface_update *updates, |
380 | int surface_count, struct dc_target *dc_target); | 379 | int surface_count, const struct dc_stream *stream); |
381 | 380 | ||
382 | /******************************************************************************* | 381 | /******************************************************************************* |
383 | * Target Interfaces | 382 | * Stream Interfaces |
384 | ******************************************************************************/ | 383 | ******************************************************************************/ |
385 | #define MAX_STREAM_NUM 1 | 384 | struct dc_stream { |
385 | const struct dc_sink *sink; | ||
386 | struct dc_crtc_timing timing; | ||
386 | 387 | ||
387 | struct dc_target { | 388 | enum dc_color_space output_color_space; |
388 | uint8_t stream_count; | ||
389 | const struct dc_stream *streams[MAX_STREAM_NUM]; | ||
390 | }; | ||
391 | 389 | ||
392 | /* | 390 | struct rect src; /* composition area */ |
393 | * Target status is returned from dc_target_get_status in order to get the | 391 | struct rect dst; /* stream addressable area */ |
394 | * the IRQ source, current frame counter and currently attached surfaces. | ||
395 | */ | ||
396 | struct dc_target_status { | ||
397 | int primary_otg_inst; | ||
398 | int cur_frame_count; | ||
399 | int surface_count; | ||
400 | const struct dc_surface *surfaces[MAX_SURFACE_NUM]; | ||
401 | }; | ||
402 | 392 | ||
403 | struct dc_target *dc_create_target_for_streams( | 393 | struct audio_info audio_info; |
404 | struct dc_stream *dc_streams[], | 394 | |
405 | uint8_t stream_count); | 395 | bool ignore_msa_timing_param; |
396 | |||
397 | struct freesync_context freesync_ctx; | ||
398 | |||
399 | const struct dc_transfer_func *out_transfer_func; | ||
400 | struct colorspace_transform gamut_remap_matrix; | ||
401 | struct csc_transform csc_color_matrix; | ||
402 | |||
403 | /* TODO: dithering */ | ||
404 | /* TODO: custom INFO packets */ | ||
405 | /* TODO: ABM info (DMCU) */ | ||
406 | /* TODO: PSR info */ | ||
407 | /* TODO: CEA VIC */ | ||
408 | }; | ||
406 | 409 | ||
407 | /* | 410 | /* |
408 | * Get the current target status. | 411 | * Log the current stream state. |
409 | */ | 412 | */ |
410 | const struct dc_target_status *dc_target_get_status( | 413 | void dc_stream_log( |
411 | const struct dc_target* dc_target); | 414 | const struct dc_stream *stream, |
412 | |||
413 | void dc_target_retain(const struct dc_target *dc_target); | ||
414 | void dc_target_release(const struct dc_target *dc_target); | ||
415 | void dc_target_log( | ||
416 | const struct dc_target *dc_target, | ||
417 | struct dal_logger *dc_logger, | 415 | struct dal_logger *dc_logger, |
418 | enum dc_log_type log_type); | 416 | enum dc_log_type log_type); |
419 | 417 | ||
420 | uint8_t dc_get_current_target_count(const struct dc *dc); | 418 | uint8_t dc_get_current_stream_count(const struct dc *dc); |
421 | struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i); | 419 | struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i); |
422 | 420 | ||
423 | bool dc_target_is_connected_to_sink( | 421 | /* |
424 | const struct dc_target *dc_target, | 422 | * Return the current frame counter. |
425 | const struct dc_sink *dc_sink); | 423 | */ |
426 | 424 | uint32_t dc_stream_get_vblank_counter(const struct dc_stream *stream); | |
427 | uint32_t dc_target_get_vblank_counter(const struct dc_target *dc_target); | ||
428 | 425 | ||
429 | /* TODO: Return parsed values rather than direct register read | 426 | /* TODO: Return parsed values rather than direct register read |
430 | * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos) | 427 | * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos) |
431 | * being refactored properly to be dce-specific | 428 | * being refactored properly to be dce-specific |
432 | */ | 429 | */ |
433 | uint32_t dc_target_get_scanoutpos( | 430 | uint32_t dc_stream_get_scanoutpos( |
434 | const struct dc_target *dc_target, | 431 | const struct dc_stream *stream, uint32_t *vbl, uint32_t *position); |
435 | uint32_t *vbl, | ||
436 | uint32_t *position); | ||
437 | 432 | ||
438 | /* | 433 | /* |
439 | * Structure to store surface/target associations for validation | 434 | * Structure to store surface/stream associations for validation |
440 | */ | 435 | */ |
441 | struct dc_validation_set { | 436 | struct dc_validation_set { |
442 | const struct dc_target *target; | 437 | const struct dc_stream *stream; |
443 | const struct dc_surface *surfaces[MAX_SURFACES]; | 438 | const struct dc_surface *surfaces[MAX_SURFACES]; |
444 | uint8_t surface_count; | 439 | uint8_t surface_count; |
445 | }; | 440 | }; |
@@ -456,8 +451,8 @@ bool dc_validate_resources( | |||
456 | uint8_t set_count); | 451 | uint8_t set_count); |
457 | 452 | ||
458 | /* | 453 | /* |
459 | * This function takes a target and checks if it is guaranteed to be supported. | 454 | * This function takes a stream and checks if it is guaranteed to be supported. |
460 | * Guaranteed means that MAX_COFUNC*target is supported. | 455 | * Guaranteed means that MAX_COFUNC similar streams are supported. |
461 | * | 456 | * |
462 | * After this call: | 457 | * After this call: |
463 | * No hardware is programmed for call. Only validation is done. | 458 | * No hardware is programmed for call. Only validation is done. |
@@ -465,49 +460,20 @@ bool dc_validate_resources( | |||
465 | 460 | ||
466 | bool dc_validate_guaranteed( | 461 | bool dc_validate_guaranteed( |
467 | const struct dc *dc, | 462 | const struct dc *dc, |
468 | const struct dc_target *dc_target); | 463 | const struct dc_stream *stream); |
469 | 464 | ||
470 | /* | 465 | /* |
471 | * Set up streams and links associated to targets to drive sinks | 466 | * Set up streams and links associated to drive sinks |
472 | * The targets parameter is an absolute set of all active targets. | 467 | * The streams parameter is an absolute set of all active streams. |
473 | * | 468 | * |
474 | * After this call: | 469 | * After this call: |
475 | * Phy, Encoder, Timing Generator are programmed and enabled. | 470 | * Phy, Encoder, Timing Generator are programmed and enabled. |
476 | * New targets are enabled with blank stream; no memory read. | 471 | * New streams are enabled with blank stream; no memory read. |
477 | */ | 472 | */ |
478 | bool dc_commit_targets( | 473 | bool dc_commit_streams( |
479 | struct dc *dc, | 474 | struct dc *dc, |
480 | struct dc_target *targets[], | 475 | const struct dc_stream *streams[], |
481 | uint8_t target_count); | 476 | uint8_t stream_count); |
482 | |||
483 | /******************************************************************************* | ||
484 | * Stream Interfaces | ||
485 | ******************************************************************************/ | ||
486 | struct dc_stream { | ||
487 | const struct dc_sink *sink; | ||
488 | struct dc_crtc_timing timing; | ||
489 | |||
490 | enum dc_color_space output_color_space; | ||
491 | |||
492 | struct rect src; /* viewport in target space*/ | ||
493 | struct rect dst; /* stream addressable area */ | ||
494 | |||
495 | struct audio_info audio_info; | ||
496 | |||
497 | bool ignore_msa_timing_param; | ||
498 | |||
499 | struct freesync_context freesync_ctx; | ||
500 | |||
501 | const struct dc_transfer_func *out_transfer_func; | ||
502 | struct colorspace_transform gamut_remap_matrix; | ||
503 | struct csc_transform csc_color_matrix; | ||
504 | |||
505 | /* TODO: dithering */ | ||
506 | /* TODO: custom INFO packets */ | ||
507 | /* TODO: ABM info (DMCU) */ | ||
508 | /* TODO: PSR info */ | ||
509 | /* TODO: CEA VIC */ | ||
510 | }; | ||
511 | 477 | ||
512 | /** | 478 | /** |
513 | * Create a new default stream for the requested sink | 479 | * Create a new default stream for the requested sink |
@@ -518,6 +484,10 @@ void dc_stream_retain(const struct dc_stream *dc_stream); | |||
518 | void dc_stream_release(const struct dc_stream *dc_stream); | 484 | void dc_stream_release(const struct dc_stream *dc_stream); |
519 | 485 | ||
520 | struct dc_stream_status { | 486 | struct dc_stream_status { |
487 | int primary_otg_inst; | ||
488 | int surface_count; | ||
489 | const struct dc_surface *surfaces[MAX_SURFACE_NUM]; | ||
490 | |||
521 | /* | 491 | /* |
522 | * link this stream passes through | 492 | * link this stream passes through |
523 | */ | 493 | */ |
@@ -691,15 +661,15 @@ struct dc_sink_init_data { | |||
691 | struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params); | 661 | struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params); |
692 | 662 | ||
693 | /******************************************************************************* | 663 | /******************************************************************************* |
694 | * Cursor interfaces - To manages the cursor within a target | 664 | * Cursor interfaces - To manages the cursor within a stream |
695 | ******************************************************************************/ | 665 | ******************************************************************************/ |
696 | /* TODO: Deprecated once we switch to dc_set_cursor_position */ | 666 | /* TODO: Deprecated once we switch to dc_set_cursor_position */ |
697 | bool dc_target_set_cursor_attributes( | 667 | bool dc_stream_set_cursor_attributes( |
698 | struct dc_target *dc_target, | 668 | const struct dc_stream *stream, |
699 | const struct dc_cursor_attributes *attributes); | 669 | const struct dc_cursor_attributes *attributes); |
700 | 670 | ||
701 | bool dc_target_set_cursor_position( | 671 | bool dc_stream_set_cursor_position( |
702 | struct dc_target *dc_target, | 672 | const struct dc_stream *stream, |
703 | const struct dc_cursor_position *position); | 673 | const struct dc_cursor_position *position); |
704 | 674 | ||
705 | /* Newer interfaces */ | 675 | /* Newer interfaces */ |
@@ -708,36 +678,6 @@ struct dc_cursor { | |||
708 | struct dc_cursor_attributes attributes; | 678 | struct dc_cursor_attributes attributes; |
709 | }; | 679 | }; |
710 | 680 | ||
711 | /* | ||
712 | * Create a new cursor with default values for a given target. | ||
713 | */ | ||
714 | struct dc_cursor *dc_create_cursor_for_target( | ||
715 | const struct dc *dc, | ||
716 | struct dc_target *dc_target); | ||
717 | |||
718 | /** | ||
719 | * Commit cursor attribute changes such as pixel format and dimensions and | ||
720 | * surface address. | ||
721 | * | ||
722 | * After this call: | ||
723 | * Cursor address and format is programmed to the new values. | ||
724 | * Cursor position is unmodified. | ||
725 | */ | ||
726 | bool dc_commit_cursor( | ||
727 | const struct dc *dc, | ||
728 | struct dc_cursor *cursor); | ||
729 | |||
730 | /* | ||
731 | * Optimized cursor position update | ||
732 | * | ||
733 | * After this call: | ||
734 | * Cursor position will be programmed as well as enable/disable bit. | ||
735 | */ | ||
736 | bool dc_set_cursor_position( | ||
737 | const struct dc *dc, | ||
738 | struct dc_cursor *cursor, | ||
739 | struct dc_cursor_position *pos); | ||
740 | |||
741 | /******************************************************************************* | 681 | /******************************************************************************* |
742 | * Interrupt interfaces | 682 | * Interrupt interfaces |
743 | ******************************************************************************/ | 683 | ******************************************************************************/ |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index ae9fcca121e6..242dd7b3b6b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h | |||
@@ -34,7 +34,6 @@ | |||
34 | 34 | ||
35 | /* forward declarations */ | 35 | /* forward declarations */ |
36 | struct dc_surface; | 36 | struct dc_surface; |
37 | struct dc_target; | ||
38 | struct dc_stream; | 37 | struct dc_stream; |
39 | struct dc_link; | 38 | struct dc_link; |
40 | struct dc_sink; | 39 | struct dc_sink; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 082f1f053a3a..ae0e7eac2c9d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | |||
@@ -741,53 +741,48 @@ static enum dc_status validate_mapped_resource( | |||
741 | struct validate_context *context) | 741 | struct validate_context *context) |
742 | { | 742 | { |
743 | enum dc_status status = DC_OK; | 743 | enum dc_status status = DC_OK; |
744 | uint8_t i, j, k; | 744 | uint8_t i, j; |
745 | 745 | ||
746 | for (i = 0; i < context->target_count; i++) { | 746 | for (i = 0; i < context->stream_count; i++) { |
747 | struct core_target *target = context->targets[i]; | 747 | struct core_stream *stream = context->streams[i]; |
748 | struct core_link *link = stream->sink->link; | ||
748 | 749 | ||
749 | for (j = 0; j < target->public.stream_count; j++) { | 750 | if (resource_is_stream_unchanged(dc->current_context, stream)) |
750 | struct core_stream *stream = | 751 | continue; |
751 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
752 | struct core_link *link = stream->sink->link; | ||
753 | |||
754 | if (resource_is_stream_unchanged(dc->current_context, stream)) | ||
755 | continue; | ||
756 | 752 | ||
757 | for (k = 0; k < MAX_PIPES; k++) { | 753 | for (j = 0; j < MAX_PIPES; j++) { |
758 | struct pipe_ctx *pipe_ctx = | 754 | struct pipe_ctx *pipe_ctx = |
759 | &context->res_ctx.pipe_ctx[k]; | 755 | &context->res_ctx.pipe_ctx[j]; |
760 | 756 | ||
761 | if (context->res_ctx.pipe_ctx[k].stream != stream) | 757 | if (context->res_ctx.pipe_ctx[j].stream != stream) |
762 | continue; | 758 | continue; |
763 | 759 | ||
764 | if (!pipe_ctx->tg->funcs->validate_timing( | 760 | if (!pipe_ctx->tg->funcs->validate_timing( |
765 | pipe_ctx->tg, &stream->public.timing)) | 761 | pipe_ctx->tg, &stream->public.timing)) |
766 | return DC_FAIL_CONTROLLER_VALIDATE; | 762 | return DC_FAIL_CONTROLLER_VALIDATE; |
767 | 763 | ||
768 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); | 764 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); |
769 | 765 | ||
770 | if (status != DC_OK) | 766 | if (status != DC_OK) |
771 | return status; | 767 | return status; |
772 | 768 | ||
773 | if (!link->link_enc->funcs->validate_output_with_stream( | 769 | if (!link->link_enc->funcs->validate_output_with_stream( |
774 | link->link_enc, | 770 | link->link_enc, |
775 | pipe_ctx)) | 771 | pipe_ctx)) |
776 | return DC_FAIL_ENC_VALIDATE; | 772 | return DC_FAIL_ENC_VALIDATE; |
777 | 773 | ||
778 | /* TODO: validate audio ASIC caps, encoder */ | 774 | /* TODO: validate audio ASIC caps, encoder */ |
779 | status = dc_link_validate_mode_timing(stream, | 775 | status = dc_link_validate_mode_timing(stream, |
780 | link, | 776 | link, |
781 | &stream->public.timing); | 777 | &stream->public.timing); |
782 | 778 | ||
783 | if (status != DC_OK) | 779 | if (status != DC_OK) |
784 | return status; | 780 | return status; |
785 | 781 | ||
786 | resource_build_info_frame(pipe_ctx); | 782 | resource_build_info_frame(pipe_ctx); |
787 | 783 | ||
788 | /* do not need to validate non root pipes */ | 784 | /* do not need to validate non root pipes */ |
789 | break; | 785 | break; |
790 | } | ||
791 | } | 786 | } |
792 | } | 787 | } |
793 | 788 | ||
@@ -818,9 +813,9 @@ static bool dce100_validate_surface_sets( | |||
818 | return false; | 813 | return false; |
819 | 814 | ||
820 | if (set[i].surfaces[0]->clip_rect.width | 815 | if (set[i].surfaces[0]->clip_rect.width |
821 | != set[i].target->streams[0]->src.width | 816 | != set[i].stream->src.width |
822 | || set[i].surfaces[0]->clip_rect.height | 817 | || set[i].surfaces[0]->clip_rect.height |
823 | != set[i].target->streams[0]->src.height) | 818 | != set[i].stream->src.height) |
824 | return false; | 819 | return false; |
825 | if (set[i].surfaces[0]->format | 820 | if (set[i].surfaces[0]->format |
826 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) | 821 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) |
@@ -846,9 +841,9 @@ enum dc_status dce100_validate_with_context( | |||
846 | context->res_ctx.pool = dc->res_pool; | 841 | context->res_ctx.pool = dc->res_pool; |
847 | 842 | ||
848 | for (i = 0; i < set_count; i++) { | 843 | for (i = 0; i < set_count; i++) { |
849 | context->targets[i] = DC_TARGET_TO_CORE(set[i].target); | 844 | context->streams[i] = DC_STREAM_TO_CORE(set[i].stream); |
850 | dc_target_retain(&context->targets[i]->public); | 845 | dc_stream_retain(&context->streams[i]->public); |
851 | context->target_count++; | 846 | context->stream_count++; |
852 | } | 847 | } |
853 | 848 | ||
854 | result = resource_map_pool_resources(dc, context); | 849 | result = resource_map_pool_resources(dc, context); |
@@ -858,7 +853,7 @@ enum dc_status dce100_validate_with_context( | |||
858 | 853 | ||
859 | if (!resource_validate_attach_surfaces( | 854 | if (!resource_validate_attach_surfaces( |
860 | set, set_count, dc->current_context, context)) { | 855 | set, set_count, dc->current_context, context)) { |
861 | DC_ERROR("Failed to attach surface to target!\n"); | 856 | DC_ERROR("Failed to attach surface to stream!\n"); |
862 | return DC_FAIL_ATTACH_SURFACES; | 857 | return DC_FAIL_ATTACH_SURFACES; |
863 | } | 858 | } |
864 | 859 | ||
@@ -876,16 +871,16 @@ enum dc_status dce100_validate_with_context( | |||
876 | 871 | ||
877 | enum dc_status dce100_validate_guaranteed( | 872 | enum dc_status dce100_validate_guaranteed( |
878 | const struct core_dc *dc, | 873 | const struct core_dc *dc, |
879 | const struct dc_target *dc_target, | 874 | const struct dc_stream *dc_stream, |
880 | struct validate_context *context) | 875 | struct validate_context *context) |
881 | { | 876 | { |
882 | enum dc_status result = DC_ERROR_UNEXPECTED; | 877 | enum dc_status result = DC_ERROR_UNEXPECTED; |
883 | 878 | ||
884 | context->res_ctx.pool = dc->res_pool; | 879 | context->res_ctx.pool = dc->res_pool; |
885 | 880 | ||
886 | context->targets[0] = DC_TARGET_TO_CORE(dc_target); | 881 | context->streams[0] = DC_STREAM_TO_CORE(dc_stream); |
887 | dc_target_retain(&context->targets[0]->public); | 882 | dc_stream_retain(&context->streams[0]->public); |
888 | context->target_count++; | 883 | context->stream_count++; |
889 | 884 | ||
890 | result = resource_map_pool_resources(dc, context); | 885 | result = resource_map_pool_resources(dc, context); |
891 | 886 | ||
@@ -896,8 +891,8 @@ enum dc_status dce100_validate_guaranteed( | |||
896 | result = validate_mapped_resource(dc, context); | 891 | result = validate_mapped_resource(dc, context); |
897 | 892 | ||
898 | if (result == DC_OK) { | 893 | if (result == DC_OK) { |
899 | validate_guaranteed_copy_target( | 894 | validate_guaranteed_copy_streams( |
900 | context, dc->public.caps.max_targets); | 895 | context, dc->public.caps.max_streams); |
901 | result = resource_build_scaling_params_for_context(dc, context); | 896 | result = resource_build_scaling_params_for_context(dc, context); |
902 | } | 897 | } |
903 | 898 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 751dbb88c265..415b12accd2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -753,7 +753,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( | |||
753 | stream->public.timing.h_total, | 753 | stream->public.timing.h_total, |
754 | stream->public.timing.v_total, | 754 | stream->public.timing.v_total, |
755 | stream->public.timing.pix_clk_khz, | 755 | stream->public.timing.pix_clk_khz, |
756 | context->target_count); | 756 | context->stream_count); |
757 | 757 | ||
758 | return DC_OK; | 758 | return DC_OK; |
759 | } | 759 | } |
@@ -1055,7 +1055,7 @@ static void reset_single_pipe_hw_ctx( | |||
1055 | } | 1055 | } |
1056 | pipe_ctx->tg->funcs->disable_crtc(pipe_ctx->tg); | 1056 | pipe_ctx->tg->funcs->disable_crtc(pipe_ctx->tg); |
1057 | pipe_ctx->mi->funcs->free_mem_input( | 1057 | pipe_ctx->mi->funcs->free_mem_input( |
1058 | pipe_ctx->mi, context->target_count); | 1058 | pipe_ctx->mi, context->stream_count); |
1059 | resource_unreference_clock_source( | 1059 | resource_unreference_clock_source( |
1060 | &context->res_ctx, &pipe_ctx->clock_source); | 1060 | &context->res_ctx, &pipe_ctx->clock_source); |
1061 | 1061 | ||
@@ -1254,7 +1254,7 @@ enum dc_status dce110_apply_ctx_to_hw( | |||
1254 | dc->hwss.reset_hw_ctx_wrap(dc, context); | 1254 | dc->hwss.reset_hw_ctx_wrap(dc, context); |
1255 | 1255 | ||
1256 | /* Skip applying if no targets */ | 1256 | /* Skip applying if no targets */ |
1257 | if (context->target_count <= 0) | 1257 | if (context->stream_count <= 0) |
1258 | return DC_OK; | 1258 | return DC_OK; |
1259 | 1259 | ||
1260 | if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { | 1260 | if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { |
@@ -1761,7 +1761,7 @@ static void dce110_power_on_pipe_if_needed( | |||
1761 | pipe_ctx->stream->public.timing.h_total, | 1761 | pipe_ctx->stream->public.timing.h_total, |
1762 | pipe_ctx->stream->public.timing.v_total, | 1762 | pipe_ctx->stream->public.timing.v_total, |
1763 | pipe_ctx->stream->public.timing.pix_clk_khz, | 1763 | pipe_ctx->stream->public.timing.pix_clk_khz, |
1764 | context->target_count); | 1764 | context->stream_count); |
1765 | 1765 | ||
1766 | /* TODO unhardcode*/ | 1766 | /* TODO unhardcode*/ |
1767 | color_space_to_black_color(dc, | 1767 | color_space_to_black_color(dc, |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 968ee99003fc..cfbb4ef21f8b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | |||
@@ -817,58 +817,53 @@ static enum dc_status validate_mapped_resource( | |||
817 | struct validate_context *context) | 817 | struct validate_context *context) |
818 | { | 818 | { |
819 | enum dc_status status = DC_OK; | 819 | enum dc_status status = DC_OK; |
820 | uint8_t i, j, k; | 820 | uint8_t i, j; |
821 | 821 | ||
822 | for (i = 0; i < context->target_count; i++) { | 822 | for (i = 0; i < context->stream_count; i++) { |
823 | struct core_target *target = context->targets[i]; | 823 | struct core_stream *stream = context->streams[i]; |
824 | struct core_link *link = stream->sink->link; | ||
824 | 825 | ||
825 | for (j = 0; j < target->public.stream_count; j++) { | 826 | if (resource_is_stream_unchanged(dc->current_context, stream)) |
826 | struct core_stream *stream = | 827 | continue; |
827 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
828 | struct core_link *link = stream->sink->link; | ||
829 | |||
830 | if (resource_is_stream_unchanged(dc->current_context, stream)) | ||
831 | continue; | ||
832 | 828 | ||
833 | for (k = 0; k < MAX_PIPES; k++) { | 829 | for (j = 0; j < MAX_PIPES; j++) { |
834 | struct pipe_ctx *pipe_ctx = | 830 | struct pipe_ctx *pipe_ctx = |
835 | &context->res_ctx.pipe_ctx[k]; | 831 | &context->res_ctx.pipe_ctx[j]; |
836 | 832 | ||
837 | if (context->res_ctx.pipe_ctx[k].stream != stream) | 833 | if (context->res_ctx.pipe_ctx[j].stream != stream) |
838 | continue; | 834 | continue; |
839 | 835 | ||
840 | if (!is_surface_pixel_format_supported(pipe_ctx, | 836 | if (!is_surface_pixel_format_supported(pipe_ctx, |
841 | context->res_ctx.pool->underlay_pipe_index)) | 837 | context->res_ctx.pool->underlay_pipe_index)) |
842 | return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; | 838 | return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; |
843 | 839 | ||
844 | if (!pipe_ctx->tg->funcs->validate_timing( | 840 | if (!pipe_ctx->tg->funcs->validate_timing( |
845 | pipe_ctx->tg, &stream->public.timing)) | 841 | pipe_ctx->tg, &stream->public.timing)) |
846 | return DC_FAIL_CONTROLLER_VALIDATE; | 842 | return DC_FAIL_CONTROLLER_VALIDATE; |
847 | 843 | ||
848 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); | 844 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); |
849 | 845 | ||
850 | if (status != DC_OK) | 846 | if (status != DC_OK) |
851 | return status; | 847 | return status; |
852 | 848 | ||
853 | if (!link->link_enc->funcs->validate_output_with_stream( | 849 | if (!link->link_enc->funcs->validate_output_with_stream( |
854 | link->link_enc, | 850 | link->link_enc, |
855 | pipe_ctx)) | 851 | pipe_ctx)) |
856 | return DC_FAIL_ENC_VALIDATE; | 852 | return DC_FAIL_ENC_VALIDATE; |
857 | 853 | ||
858 | /* TODO: validate audio ASIC caps, encoder */ | 854 | /* TODO: validate audio ASIC caps, encoder */ |
859 | 855 | ||
860 | status = dc_link_validate_mode_timing(stream, | 856 | status = dc_link_validate_mode_timing(stream, |
861 | link, | 857 | link, |
862 | &stream->public.timing); | 858 | &stream->public.timing); |
863 | 859 | ||
864 | if (status != DC_OK) | 860 | if (status != DC_OK) |
865 | return status; | 861 | return status; |
866 | 862 | ||
867 | resource_build_info_frame(pipe_ctx); | 863 | resource_build_info_frame(pipe_ctx); |
868 | 864 | ||
869 | /* do not need to validate non root pipes */ | 865 | /* do not need to validate non root pipes */ |
870 | break; | 866 | break; |
871 | } | ||
872 | } | 867 | } |
873 | } | 868 | } |
874 | 869 | ||
@@ -901,9 +896,9 @@ enum dc_status dce110_validate_bandwidth( | |||
901 | dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION, | 896 | dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION, |
902 | "%s: %dx%d@%d Bandwidth validation failed!\n", | 897 | "%s: %dx%d@%d Bandwidth validation failed!\n", |
903 | __func__, | 898 | __func__, |
904 | context->targets[0]->public.streams[0]->timing.h_addressable, | 899 | context->streams[0]->public.timing.h_addressable, |
905 | context->targets[0]->public.streams[0]->timing.v_addressable, | 900 | context->streams[0]->public.timing.v_addressable, |
906 | context->targets[0]->public.streams[0]->timing.pix_clk_khz); | 901 | context->streams[0]->public.timing.pix_clk_khz); |
907 | 902 | ||
908 | if (memcmp(&dc->current_context->bw_results, | 903 | if (memcmp(&dc->current_context->bw_results, |
909 | &context->bw_results, sizeof(context->bw_results))) { | 904 | &context->bw_results, sizeof(context->bw_results))) { |
@@ -972,9 +967,9 @@ static bool dce110_validate_surface_sets( | |||
972 | return false; | 967 | return false; |
973 | 968 | ||
974 | if (set[i].surfaces[0]->src_rect.width | 969 | if (set[i].surfaces[0]->src_rect.width |
975 | != set[i].target->streams[0]->src.width | 970 | != set[i].stream->src.width |
976 | || set[i].surfaces[0]->src_rect.height | 971 | || set[i].surfaces[0]->src_rect.height |
977 | != set[i].target->streams[0]->src.height) | 972 | != set[i].stream->src.height) |
978 | return false; | 973 | return false; |
979 | if (set[i].surfaces[0]->format | 974 | if (set[i].surfaces[0]->format |
980 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) | 975 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) |
@@ -988,7 +983,7 @@ static bool dce110_validate_surface_sets( | |||
988 | || set[i].surfaces[1]->src_rect.height > 1080) | 983 | || set[i].surfaces[1]->src_rect.height > 1080) |
989 | return false; | 984 | return false; |
990 | 985 | ||
991 | if (set[i].target->streams[0]->timing.pixel_encoding != PIXEL_ENCODING_RGB) | 986 | if (set[i].stream->timing.pixel_encoding != PIXEL_ENCODING_RGB) |
992 | return false; | 987 | return false; |
993 | } | 988 | } |
994 | } | 989 | } |
@@ -1012,9 +1007,9 @@ enum dc_status dce110_validate_with_context( | |||
1012 | context->res_ctx.pool = dc->res_pool; | 1007 | context->res_ctx.pool = dc->res_pool; |
1013 | 1008 | ||
1014 | for (i = 0; i < set_count; i++) { | 1009 | for (i = 0; i < set_count; i++) { |
1015 | context->targets[i] = DC_TARGET_TO_CORE(set[i].target); | 1010 | context->streams[i] = DC_STREAM_TO_CORE(set[i].stream); |
1016 | dc_target_retain(&context->targets[i]->public); | 1011 | dc_stream_retain(&context->streams[i]->public); |
1017 | context->target_count++; | 1012 | context->stream_count++; |
1018 | } | 1013 | } |
1019 | 1014 | ||
1020 | result = resource_map_pool_resources(dc, context); | 1015 | result = resource_map_pool_resources(dc, context); |
@@ -1024,7 +1019,7 @@ enum dc_status dce110_validate_with_context( | |||
1024 | 1019 | ||
1025 | if (!resource_validate_attach_surfaces( | 1020 | if (!resource_validate_attach_surfaces( |
1026 | set, set_count, dc->current_context, context)) { | 1021 | set, set_count, dc->current_context, context)) { |
1027 | DC_ERROR("Failed to attach surface to target!\n"); | 1022 | DC_ERROR("Failed to attach surface to stream!\n"); |
1028 | return DC_FAIL_ATTACH_SURFACES; | 1023 | return DC_FAIL_ATTACH_SURFACES; |
1029 | } | 1024 | } |
1030 | 1025 | ||
@@ -1042,16 +1037,16 @@ enum dc_status dce110_validate_with_context( | |||
1042 | 1037 | ||
1043 | enum dc_status dce110_validate_guaranteed( | 1038 | enum dc_status dce110_validate_guaranteed( |
1044 | const struct core_dc *dc, | 1039 | const struct core_dc *dc, |
1045 | const struct dc_target *dc_target, | 1040 | const struct dc_stream *dc_stream, |
1046 | struct validate_context *context) | 1041 | struct validate_context *context) |
1047 | { | 1042 | { |
1048 | enum dc_status result = DC_ERROR_UNEXPECTED; | 1043 | enum dc_status result = DC_ERROR_UNEXPECTED; |
1049 | 1044 | ||
1050 | context->res_ctx.pool = dc->res_pool; | 1045 | context->res_ctx.pool = dc->res_pool; |
1051 | 1046 | ||
1052 | context->targets[0] = DC_TARGET_TO_CORE(dc_target); | 1047 | context->streams[0] = DC_STREAM_TO_CORE(dc_stream); |
1053 | dc_target_retain(&context->targets[0]->public); | 1048 | dc_stream_retain(&context->streams[0]->public); |
1054 | context->target_count++; | 1049 | context->stream_count++; |
1055 | 1050 | ||
1056 | result = resource_map_pool_resources(dc, context); | 1051 | result = resource_map_pool_resources(dc, context); |
1057 | 1052 | ||
@@ -1062,8 +1057,8 @@ enum dc_status dce110_validate_guaranteed( | |||
1062 | result = validate_mapped_resource(dc, context); | 1057 | result = validate_mapped_resource(dc, context); |
1063 | 1058 | ||
1064 | if (result == DC_OK) { | 1059 | if (result == DC_OK) { |
1065 | validate_guaranteed_copy_target( | 1060 | validate_guaranteed_copy_streams( |
1066 | context, dc->public.caps.max_targets); | 1061 | context, dc->public.caps.max_streams); |
1067 | result = resource_build_scaling_params_for_context(dc, context); | 1062 | result = resource_build_scaling_params_for_context(dc, context); |
1068 | } | 1063 | } |
1069 | 1064 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 7fca2eb188cf..64fae91dd5eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | |||
@@ -779,54 +779,49 @@ static enum dc_status validate_mapped_resource( | |||
779 | struct validate_context *context) | 779 | struct validate_context *context) |
780 | { | 780 | { |
781 | enum dc_status status = DC_OK; | 781 | enum dc_status status = DC_OK; |
782 | uint8_t i, j, k; | 782 | uint8_t i, j; |
783 | 783 | ||
784 | for (i = 0; i < context->target_count; i++) { | 784 | for (i = 0; i < context->stream_count; i++) { |
785 | struct core_target *target = context->targets[i]; | 785 | struct core_stream *stream = context->streams[i]; |
786 | struct core_link *link = stream->sink->link; | ||
786 | 787 | ||
787 | for (j = 0; j < target->public.stream_count; j++) { | 788 | if (resource_is_stream_unchanged(dc->current_context, stream)) |
788 | struct core_stream *stream = | 789 | continue; |
789 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
790 | struct core_link *link = stream->sink->link; | ||
791 | |||
792 | if (resource_is_stream_unchanged(dc->current_context, stream)) | ||
793 | continue; | ||
794 | 790 | ||
795 | for (k = 0; k < MAX_PIPES; k++) { | 791 | for (j = 0; j < MAX_PIPES; j++) { |
796 | struct pipe_ctx *pipe_ctx = | 792 | struct pipe_ctx *pipe_ctx = |
797 | &context->res_ctx.pipe_ctx[k]; | 793 | &context->res_ctx.pipe_ctx[j]; |
798 | 794 | ||
799 | if (context->res_ctx.pipe_ctx[k].stream != stream) | 795 | if (context->res_ctx.pipe_ctx[j].stream != stream) |
800 | continue; | 796 | continue; |
801 | 797 | ||
802 | if (!pipe_ctx->tg->funcs->validate_timing( | 798 | if (!pipe_ctx->tg->funcs->validate_timing( |
803 | pipe_ctx->tg, &stream->public.timing)) | 799 | pipe_ctx->tg, &stream->public.timing)) |
804 | return DC_FAIL_CONTROLLER_VALIDATE; | 800 | return DC_FAIL_CONTROLLER_VALIDATE; |
805 | 801 | ||
806 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); | 802 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); |
807 | 803 | ||
808 | if (status != DC_OK) | 804 | if (status != DC_OK) |
809 | return status; | 805 | return status; |
810 | 806 | ||
811 | if (!link->link_enc->funcs->validate_output_with_stream( | 807 | if (!link->link_enc->funcs->validate_output_with_stream( |
812 | link->link_enc, | 808 | link->link_enc, |
813 | pipe_ctx)) | 809 | pipe_ctx)) |
814 | return DC_FAIL_ENC_VALIDATE; | 810 | return DC_FAIL_ENC_VALIDATE; |
815 | 811 | ||
816 | /* TODO: validate audio ASIC caps, encoder */ | 812 | /* TODO: validate audio ASIC caps, encoder */ |
817 | 813 | ||
818 | status = dc_link_validate_mode_timing(stream, | 814 | status = dc_link_validate_mode_timing(stream, |
819 | link, | 815 | link, |
820 | &stream->public.timing); | 816 | &stream->public.timing); |
821 | 817 | ||
822 | if (status != DC_OK) | 818 | if (status != DC_OK) |
823 | return status; | 819 | return status; |
824 | 820 | ||
825 | resource_build_info_frame(pipe_ctx); | 821 | resource_build_info_frame(pipe_ctx); |
826 | 822 | ||
827 | /* do not need to validate non root pipes */ | 823 | /* do not need to validate non root pipes */ |
828 | break; | 824 | break; |
829 | } | ||
830 | } | 825 | } |
831 | } | 826 | } |
832 | 827 | ||
@@ -917,45 +912,40 @@ enum dc_status resource_map_phy_clock_resources( | |||
917 | const struct core_dc *dc, | 912 | const struct core_dc *dc, |
918 | struct validate_context *context) | 913 | struct validate_context *context) |
919 | { | 914 | { |
920 | uint8_t i, j, k; | 915 | uint8_t i, j; |
921 | 916 | ||
922 | /* acquire new resources */ | 917 | /* acquire new resources */ |
923 | for (i = 0; i < context->target_count; i++) { | 918 | for (i = 0; i < context->stream_count; i++) { |
924 | struct core_target *target = context->targets[i]; | 919 | struct core_stream *stream = context->streams[i]; |
925 | 920 | ||
926 | for (j = 0; j < target->public.stream_count; j++) { | 921 | if (resource_is_stream_unchanged(dc->current_context, stream)) |
927 | struct core_stream *stream = | 922 | continue; |
928 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
929 | |||
930 | if (resource_is_stream_unchanged(dc->current_context, stream)) | ||
931 | continue; | ||
932 | 923 | ||
933 | for (k = 0; k < MAX_PIPES; k++) { | 924 | for (j = 0; j < MAX_PIPES; j++) { |
934 | struct pipe_ctx *pipe_ctx = | 925 | struct pipe_ctx *pipe_ctx = |
935 | &context->res_ctx.pipe_ctx[k]; | 926 | &context->res_ctx.pipe_ctx[j]; |
936 | 927 | ||
937 | if (context->res_ctx.pipe_ctx[k].stream != stream) | 928 | if (context->res_ctx.pipe_ctx[j].stream != stream) |
938 | continue; | 929 | continue; |
939 | 930 | ||
940 | if (dc_is_dp_signal(pipe_ctx->stream->signal) | 931 | if (dc_is_dp_signal(pipe_ctx->stream->signal) |
941 | || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL) | 932 | || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL) |
942 | pipe_ctx->clock_source = | 933 | pipe_ctx->clock_source = |
943 | context->res_ctx.pool->dp_clock_source; | 934 | context->res_ctx.pool->dp_clock_source; |
944 | else | 935 | else |
945 | pipe_ctx->clock_source = | 936 | pipe_ctx->clock_source = |
946 | find_matching_pll(&context->res_ctx, | 937 | find_matching_pll(&context->res_ctx, |
947 | stream); | 938 | stream); |
948 | 939 | ||
949 | if (pipe_ctx->clock_source == NULL) | 940 | if (pipe_ctx->clock_source == NULL) |
950 | return DC_NO_CLOCK_SOURCE_RESOURCE; | 941 | return DC_NO_CLOCK_SOURCE_RESOURCE; |
951 | 942 | ||
952 | resource_reference_clock_source( | 943 | resource_reference_clock_source( |
953 | &context->res_ctx, | 944 | &context->res_ctx, |
954 | pipe_ctx->clock_source); | 945 | pipe_ctx->clock_source); |
955 | 946 | ||
956 | /* only one cs per stream regardless of mpo */ | 947 | /* only one cs per stream regardless of mpo */ |
957 | break; | 948 | break; |
958 | } | ||
959 | } | 949 | } |
960 | } | 950 | } |
961 | 951 | ||
@@ -976,9 +966,9 @@ static bool dce112_validate_surface_sets( | |||
976 | return false; | 966 | return false; |
977 | 967 | ||
978 | if (set[i].surfaces[0]->clip_rect.width | 968 | if (set[i].surfaces[0]->clip_rect.width |
979 | != set[i].target->streams[0]->src.width | 969 | != set[i].stream->src.width |
980 | || set[i].surfaces[0]->clip_rect.height | 970 | || set[i].surfaces[0]->clip_rect.height |
981 | != set[i].target->streams[0]->src.height) | 971 | != set[i].stream->src.height) |
982 | return false; | 972 | return false; |
983 | if (set[i].surfaces[0]->format | 973 | if (set[i].surfaces[0]->format |
984 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) | 974 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) |
@@ -1004,9 +994,9 @@ enum dc_status dce112_validate_with_context( | |||
1004 | context->res_ctx.pool = dc->res_pool; | 994 | context->res_ctx.pool = dc->res_pool; |
1005 | 995 | ||
1006 | for (i = 0; i < set_count; i++) { | 996 | for (i = 0; i < set_count; i++) { |
1007 | context->targets[i] = DC_TARGET_TO_CORE(set[i].target); | 997 | context->streams[i] = DC_STREAM_TO_CORE(set[i].stream); |
1008 | dc_target_retain(&context->targets[i]->public); | 998 | dc_stream_retain(&context->streams[i]->public); |
1009 | context->target_count++; | 999 | context->stream_count++; |
1010 | } | 1000 | } |
1011 | 1001 | ||
1012 | result = resource_map_pool_resources(dc, context); | 1002 | result = resource_map_pool_resources(dc, context); |
@@ -1016,7 +1006,7 @@ enum dc_status dce112_validate_with_context( | |||
1016 | 1006 | ||
1017 | if (!resource_validate_attach_surfaces( | 1007 | if (!resource_validate_attach_surfaces( |
1018 | set, set_count, dc->current_context, context)) { | 1008 | set, set_count, dc->current_context, context)) { |
1019 | DC_ERROR("Failed to attach surface to target!\n"); | 1009 | DC_ERROR("Failed to attach surface to stream!\n"); |
1020 | return DC_FAIL_ATTACH_SURFACES; | 1010 | return DC_FAIL_ATTACH_SURFACES; |
1021 | } | 1011 | } |
1022 | 1012 | ||
@@ -1034,16 +1024,16 @@ enum dc_status dce112_validate_with_context( | |||
1034 | 1024 | ||
1035 | enum dc_status dce112_validate_guaranteed( | 1025 | enum dc_status dce112_validate_guaranteed( |
1036 | const struct core_dc *dc, | 1026 | const struct core_dc *dc, |
1037 | const struct dc_target *dc_target, | 1027 | const struct dc_stream *dc_stream, |
1038 | struct validate_context *context) | 1028 | struct validate_context *context) |
1039 | { | 1029 | { |
1040 | enum dc_status result = DC_ERROR_UNEXPECTED; | 1030 | enum dc_status result = DC_ERROR_UNEXPECTED; |
1041 | 1031 | ||
1042 | context->res_ctx.pool = dc->res_pool; | 1032 | context->res_ctx.pool = dc->res_pool; |
1043 | 1033 | ||
1044 | context->targets[0] = DC_TARGET_TO_CORE(dc_target); | 1034 | context->streams[0] = DC_STREAM_TO_CORE(dc_stream); |
1045 | dc_target_retain(&context->targets[0]->public); | 1035 | dc_stream_retain(&context->streams[0]->public); |
1046 | context->target_count++; | 1036 | context->stream_count++; |
1047 | 1037 | ||
1048 | result = resource_map_pool_resources(dc, context); | 1038 | result = resource_map_pool_resources(dc, context); |
1049 | 1039 | ||
@@ -1054,8 +1044,8 @@ enum dc_status dce112_validate_guaranteed( | |||
1054 | result = validate_mapped_resource(dc, context); | 1044 | result = validate_mapped_resource(dc, context); |
1055 | 1045 | ||
1056 | if (result == DC_OK) { | 1046 | if (result == DC_OK) { |
1057 | validate_guaranteed_copy_target( | 1047 | validate_guaranteed_copy_streams( |
1058 | context, dc->public.caps.max_targets); | 1048 | context, dc->public.caps.max_streams); |
1059 | result = resource_build_scaling_params_for_context(dc, context); | 1049 | result = resource_build_scaling_params_for_context(dc, context); |
1060 | } | 1050 | } |
1061 | 1051 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h index f21eb57857d4..faa8c45a3544 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h | |||
@@ -43,7 +43,7 @@ enum dc_status dce112_validate_with_context( | |||
43 | 43 | ||
44 | enum dc_status dce112_validate_guaranteed( | 44 | enum dc_status dce112_validate_guaranteed( |
45 | const struct core_dc *dc, | 45 | const struct core_dc *dc, |
46 | const struct dc_target *dc_target, | 46 | const struct dc_stream *dc_stream, |
47 | struct validate_context *context); | 47 | struct validate_context *context); |
48 | 48 | ||
49 | enum dc_status dce112_validate_bandwidth( | 49 | enum dc_status dce112_validate_bandwidth( |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index e2bfa7efce1c..bee3a41ffe9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
@@ -731,54 +731,49 @@ static enum dc_status validate_mapped_resource( | |||
731 | struct validate_context *context) | 731 | struct validate_context *context) |
732 | { | 732 | { |
733 | enum dc_status status = DC_OK; | 733 | enum dc_status status = DC_OK; |
734 | uint8_t i, j, k; | 734 | uint8_t i, j; |
735 | 735 | ||
736 | for (i = 0; i < context->target_count; i++) { | 736 | for (i = 0; i < context->stream_count; i++) { |
737 | struct core_target *target = context->targets[i]; | 737 | struct core_stream *stream = context->streams[i]; |
738 | struct core_link *link = stream->sink->link; | ||
738 | 739 | ||
739 | for (j = 0; j < target->public.stream_count; j++) { | 740 | if (resource_is_stream_unchanged(dc->current_context, stream)) |
740 | struct core_stream *stream = | 741 | continue; |
741 | DC_STREAM_TO_CORE(target->public.streams[j]); | ||
742 | struct core_link *link = stream->sink->link; | ||
743 | |||
744 | if (resource_is_stream_unchanged(dc->current_context, stream)) | ||
745 | continue; | ||
746 | 742 | ||
747 | for (k = 0; k < MAX_PIPES; k++) { | 743 | for (j = 0; j < MAX_PIPES; j++) { |
748 | struct pipe_ctx *pipe_ctx = | 744 | struct pipe_ctx *pipe_ctx = |
749 | &context->res_ctx.pipe_ctx[k]; | 745 | &context->res_ctx.pipe_ctx[j]; |
750 | 746 | ||
751 | if (context->res_ctx.pipe_ctx[k].stream != stream) | 747 | if (context->res_ctx.pipe_ctx[j].stream != stream) |
752 | continue; | 748 | continue; |
753 | 749 | ||
754 | if (!pipe_ctx->tg->funcs->validate_timing( | 750 | if (!pipe_ctx->tg->funcs->validate_timing( |
755 | pipe_ctx->tg, &stream->public.timing)) | 751 | pipe_ctx->tg, &stream->public.timing)) |
756 | return DC_FAIL_CONTROLLER_VALIDATE; | 752 | return DC_FAIL_CONTROLLER_VALIDATE; |
757 | 753 | ||
758 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); | 754 | status = dce110_resource_build_pipe_hw_param(pipe_ctx); |
759 | 755 | ||
760 | if (status != DC_OK) | 756 | if (status != DC_OK) |
761 | return status; | 757 | return status; |
762 | 758 | ||
763 | if (!link->link_enc->funcs->validate_output_with_stream( | 759 | if (!link->link_enc->funcs->validate_output_with_stream( |
764 | link->link_enc, | 760 | link->link_enc, |
765 | pipe_ctx)) | 761 | pipe_ctx)) |
766 | return DC_FAIL_ENC_VALIDATE; | 762 | return DC_FAIL_ENC_VALIDATE; |
767 | 763 | ||
768 | /* TODO: validate audio ASIC caps, encoder */ | 764 | /* TODO: validate audio ASIC caps, encoder */ |
769 | 765 | ||
770 | status = dc_link_validate_mode_timing(stream, | 766 | status = dc_link_validate_mode_timing(stream, |
771 | link, | 767 | link, |
772 | &stream->public.timing); | 768 | &stream->public.timing); |
773 | 769 | ||
774 | if (status != DC_OK) | 770 | if (status != DC_OK) |
775 | return status; | 771 | return status; |
776 | 772 | ||
777 | resource_build_info_frame(pipe_ctx); | 773 | resource_build_info_frame(pipe_ctx); |
778 | 774 | ||
779 | /* do not need to validate non root pipes */ | 775 | /* do not need to validate non root pipes */ |
780 | break; | 776 | break; |
781 | } | ||
782 | } | 777 | } |
783 | } | 778 | } |
784 | 779 | ||
@@ -810,9 +805,9 @@ static bool dce80_validate_surface_sets( | |||
810 | return false; | 805 | return false; |
811 | 806 | ||
812 | if (set[i].surfaces[0]->clip_rect.width | 807 | if (set[i].surfaces[0]->clip_rect.width |
813 | != set[i].target->streams[0]->src.width | 808 | != set[i].stream->src.width |
814 | || set[i].surfaces[0]->clip_rect.height | 809 | || set[i].surfaces[0]->clip_rect.height |
815 | != set[i].target->streams[0]->src.height) | 810 | != set[i].stream->src.height) |
816 | return false; | 811 | return false; |
817 | if (set[i].surfaces[0]->format | 812 | if (set[i].surfaces[0]->format |
818 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) | 813 | >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) |
@@ -838,9 +833,9 @@ enum dc_status dce80_validate_with_context( | |||
838 | context->res_ctx.pool = dc->res_pool; | 833 | context->res_ctx.pool = dc->res_pool; |
839 | 834 | ||
840 | for (i = 0; i < set_count; i++) { | 835 | for (i = 0; i < set_count; i++) { |
841 | context->targets[i] = DC_TARGET_TO_CORE(set[i].target); | 836 | context->streams[i] = DC_STREAM_TO_CORE(set[i].stream); |
842 | dc_target_retain(&context->targets[i]->public); | 837 | dc_stream_retain(&context->streams[i]->public); |
843 | context->target_count++; | 838 | context->stream_count++; |
844 | } | 839 | } |
845 | 840 | ||
846 | result = resource_map_pool_resources(dc, context); | 841 | result = resource_map_pool_resources(dc, context); |
@@ -850,7 +845,7 @@ enum dc_status dce80_validate_with_context( | |||
850 | 845 | ||
851 | if (!resource_validate_attach_surfaces( | 846 | if (!resource_validate_attach_surfaces( |
852 | set, set_count, dc->current_context, context)) { | 847 | set, set_count, dc->current_context, context)) { |
853 | DC_ERROR("Failed to attach surface to target!\n"); | 848 | DC_ERROR("Failed to attach surface to stream!\n"); |
854 | return DC_FAIL_ATTACH_SURFACES; | 849 | return DC_FAIL_ATTACH_SURFACES; |
855 | } | 850 | } |
856 | 851 | ||
@@ -868,16 +863,16 @@ enum dc_status dce80_validate_with_context( | |||
868 | 863 | ||
869 | enum dc_status dce80_validate_guaranteed( | 864 | enum dc_status dce80_validate_guaranteed( |
870 | const struct core_dc *dc, | 865 | const struct core_dc *dc, |
871 | const struct dc_target *dc_target, | 866 | const struct dc_stream *dc_stream, |
872 | struct validate_context *context) | 867 | struct validate_context *context) |
873 | { | 868 | { |
874 | enum dc_status result = DC_ERROR_UNEXPECTED; | 869 | enum dc_status result = DC_ERROR_UNEXPECTED; |
875 | 870 | ||
876 | context->res_ctx.pool = dc->res_pool; | 871 | context->res_ctx.pool = dc->res_pool; |
877 | 872 | ||
878 | context->targets[0] = DC_TARGET_TO_CORE(dc_target); | 873 | context->streams[0] = DC_STREAM_TO_CORE(dc_stream); |
879 | dc_target_retain(&context->targets[0]->public); | 874 | dc_stream_retain(&context->streams[0]->public); |
880 | context->target_count++; | 875 | context->stream_count++; |
881 | 876 | ||
882 | result = resource_map_pool_resources(dc, context); | 877 | result = resource_map_pool_resources(dc, context); |
883 | 878 | ||
@@ -888,8 +883,8 @@ enum dc_status dce80_validate_guaranteed( | |||
888 | result = validate_mapped_resource(dc, context); | 883 | result = validate_mapped_resource(dc, context); |
889 | 884 | ||
890 | if (result == DC_OK) { | 885 | if (result == DC_OK) { |
891 | validate_guaranteed_copy_target( | 886 | validate_guaranteed_copy_streams( |
892 | context, dc->public.caps.max_targets); | 887 | context, dc->public.caps.max_streams); |
893 | result = resource_build_scaling_params_for_context(dc, context); | 888 | result = resource_build_scaling_params_for_context(dc, context); |
894 | } | 889 | } |
895 | 890 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_dc.h b/drivers/gpu/drm/amd/display/dc/inc/core_dc.h index b5a5207a4df0..7a6444dc2957 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_dc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_dc.h | |||
@@ -21,7 +21,6 @@ struct core_dc { | |||
21 | uint8_t link_count; | 21 | uint8_t link_count; |
22 | struct core_link *links[MAX_PIPES * 2]; | 22 | struct core_link *links[MAX_PIPES * 2]; |
23 | 23 | ||
24 | /* TODO: determine max number of targets*/ | ||
25 | struct validate_context *current_context; | 24 | struct validate_context *current_context; |
26 | struct validate_context *temp_flip_context; | 25 | struct validate_context *temp_flip_context; |
27 | struct validate_context *scratch_val_ctx; | 26 | struct validate_context *scratch_val_ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c2d35c2c28bf..66bfcdb57c4c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h | |||
@@ -32,21 +32,10 @@ | |||
32 | #include "dc_bios_types.h" | 32 | #include "dc_bios_types.h" |
33 | 33 | ||
34 | struct core_stream; | 34 | struct core_stream; |
35 | /********* core_target *************/ | ||
36 | |||
37 | #define CONST_DC_TARGET_TO_CORE(dc_target) \ | ||
38 | container_of(dc_target, const struct core_target, public) | ||
39 | #define DC_TARGET_TO_CORE(dc_target) \ | ||
40 | container_of(dc_target, struct core_target, public) | ||
41 | 35 | ||
42 | #define MAX_PIPES 6 | 36 | #define MAX_PIPES 6 |
43 | #define MAX_CLOCK_SOURCES 7 | 37 | #define MAX_CLOCK_SOURCES 7 |
44 | 38 | ||
45 | struct core_target { | ||
46 | struct dc_target public; | ||
47 | |||
48 | struct dc_context *ctx; | ||
49 | }; | ||
50 | 39 | ||
51 | /********* core_surface **********/ | 40 | /********* core_surface **********/ |
52 | #define DC_SURFACE_TO_CORE(dc_surface) \ | 41 | #define DC_SURFACE_TO_CORE(dc_surface) \ |
@@ -215,7 +204,7 @@ struct resource_funcs { | |||
215 | 204 | ||
216 | enum dc_status (*validate_guaranteed)( | 205 | enum dc_status (*validate_guaranteed)( |
217 | const struct core_dc *dc, | 206 | const struct core_dc *dc, |
218 | const struct dc_target *dc_target, | 207 | const struct dc_stream *stream, |
219 | struct validate_context *context); | 208 | struct validate_context *context); |
220 | 209 | ||
221 | enum dc_status (*validate_bandwidth)( | 210 | enum dc_status (*validate_bandwidth)( |
@@ -312,9 +301,9 @@ struct resource_context { | |||
312 | }; | 301 | }; |
313 | 302 | ||
314 | struct validate_context { | 303 | struct validate_context { |
315 | struct core_target *targets[MAX_PIPES]; | 304 | struct core_stream *streams[MAX_PIPES]; |
316 | struct dc_target_status target_status[MAX_PIPES]; | 305 | struct dc_stream_status stream_status[MAX_PIPES]; |
317 | uint8_t target_count; | 306 | uint8_t stream_count; |
318 | 307 | ||
319 | struct resource_context res_ctx; | 308 | struct resource_context res_ctx; |
320 | 309 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index adf297ec33b6..d96c64bb0a70 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h | |||
@@ -118,25 +118,26 @@ struct pipe_ctx *resource_get_head_pipe_for_stream( | |||
118 | bool resource_attach_surfaces_to_context( | 118 | bool resource_attach_surfaces_to_context( |
119 | const struct dc_surface *const *surfaces, | 119 | const struct dc_surface *const *surfaces, |
120 | int surface_count, | 120 | int surface_count, |
121 | const struct dc_target *dc_target, | 121 | const struct dc_stream *dc_stream, |
122 | struct validate_context *context); | 122 | struct validate_context *context); |
123 | 123 | ||
124 | struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx); | 124 | struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx); |
125 | 125 | ||
126 | bool resource_is_stream_unchanged( | 126 | bool resource_is_stream_unchanged( |
127 | const struct validate_context *old_context, struct core_stream *stream); | 127 | const struct validate_context *old_context, const struct core_stream *stream); |
128 | |||
129 | bool is_stream_unchanged( | ||
130 | const struct core_stream *old_stream, const struct core_stream *stream); | ||
128 | 131 | ||
129 | bool is_target_unchanged( | ||
130 | const struct core_target *old_target, const struct core_target *target); | ||
131 | bool resource_validate_attach_surfaces( | 132 | bool resource_validate_attach_surfaces( |
132 | const struct dc_validation_set set[], | 133 | const struct dc_validation_set set[], |
133 | int set_count, | 134 | int set_count, |
134 | const struct validate_context *old_context, | 135 | const struct validate_context *old_context, |
135 | struct validate_context *context); | 136 | struct validate_context *context); |
136 | 137 | ||
137 | void validate_guaranteed_copy_target( | 138 | void validate_guaranteed_copy_streams( |
138 | struct validate_context *context, | 139 | struct validate_context *context, |
139 | int max_targets); | 140 | int max_streams); |
140 | 141 | ||
141 | void resource_validate_ctx_update_pointer_after_copy( | 142 | void resource_validate_ctx_update_pointer_after_copy( |
142 | const struct validate_context *src_ctx, | 143 | const struct validate_context *src_ctx, |