diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
66 files changed, 2718 insertions, 2092 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dd688cfed6aa..aa43bb253ea2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -76,6 +76,16 @@ | |||
76 | #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" | 76 | #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" |
77 | MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); | 77 | MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); |
78 | 78 | ||
79 | /** | ||
80 | * DOC: overview | ||
81 | * | ||
82 | * The AMDgpu display manager, **amdgpu_dm** (or even simpler, | ||
83 | * **dm**) sits between DRM and DC. It acts as a liason, converting DRM | ||
84 | * requests into DC requests, and DC responses into DRM responses. | ||
85 | * | ||
86 | * The root control structure is &struct amdgpu_display_manager. | ||
87 | */ | ||
88 | |||
79 | /* basic init/fini API */ | 89 | /* basic init/fini API */ |
80 | static int amdgpu_dm_init(struct amdgpu_device *adev); | 90 | static int amdgpu_dm_init(struct amdgpu_device *adev); |
81 | static void amdgpu_dm_fini(struct amdgpu_device *adev); | 91 | static void amdgpu_dm_fini(struct amdgpu_device *adev); |
@@ -95,7 +105,7 @@ static void | |||
95 | amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); | 105 | amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); |
96 | 106 | ||
97 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | 107 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, |
98 | struct amdgpu_plane *aplane, | 108 | struct drm_plane *plane, |
99 | unsigned long possible_crtcs); | 109 | unsigned long possible_crtcs); |
100 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | 110 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, |
101 | struct drm_plane *plane, | 111 | struct drm_plane *plane, |
@@ -379,11 +389,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) | |||
379 | 389 | ||
380 | } | 390 | } |
381 | 391 | ||
382 | /* | ||
383 | * Init display KMS | ||
384 | * | ||
385 | * Returns 0 on success | ||
386 | */ | ||
387 | static int amdgpu_dm_init(struct amdgpu_device *adev) | 392 | static int amdgpu_dm_init(struct amdgpu_device *adev) |
388 | { | 393 | { |
389 | struct dc_init_data init_data; | 394 | struct dc_init_data init_data; |
@@ -429,6 +434,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) | |||
429 | adev->asic_type < CHIP_RAVEN) | 434 | adev->asic_type < CHIP_RAVEN) |
430 | init_data.flags.gpu_vm_support = true; | 435 | init_data.flags.gpu_vm_support = true; |
431 | 436 | ||
437 | if (amdgpu_dc_feature_mask & DC_FBC_MASK) | ||
438 | init_data.flags.fbc_support = true; | ||
439 | |||
432 | /* Display Core create. */ | 440 | /* Display Core create. */ |
433 | adev->dm.dc = dc_create(&init_data); | 441 | adev->dm.dc = dc_create(&init_data); |
434 | 442 | ||
@@ -660,6 +668,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) | |||
660 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 668 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
661 | } | 669 | } |
662 | 670 | ||
671 | /** | ||
672 | * dm_hw_init() - Initialize DC device | ||
673 | * @handle: The base driver device containing the amdpgu_dm device. | ||
674 | * | ||
675 | * Initialize the &struct amdgpu_display_manager device. This involves calling | ||
676 | * the initializers of each DM component, then populating the struct with them. | ||
677 | * | ||
678 | * Although the function implies hardware initialization, both hardware and | ||
679 | * software are initialized here. Splitting them out to their relevant init | ||
680 | * hooks is a future TODO item. | ||
681 | * | ||
682 | * Some notable things that are initialized here: | ||
683 | * | ||
684 | * - Display Core, both software and hardware | ||
685 | * - DC modules that we need (freesync and color management) | ||
686 | * - DRM software states | ||
687 | * - Interrupt sources and handlers | ||
688 | * - Vblank support | ||
689 | * - Debug FS entries, if enabled | ||
690 | */ | ||
663 | static int dm_hw_init(void *handle) | 691 | static int dm_hw_init(void *handle) |
664 | { | 692 | { |
665 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 693 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -670,6 +698,14 @@ static int dm_hw_init(void *handle) | |||
670 | return 0; | 698 | return 0; |
671 | } | 699 | } |
672 | 700 | ||
701 | /** | ||
702 | * dm_hw_fini() - Teardown DC device | ||
703 | * @handle: The base driver device containing the amdpgu_dm device. | ||
704 | * | ||
705 | * Teardown components within &struct amdgpu_display_manager that require | ||
706 | * cleanup. This involves cleaning up the DRM device, DC, and any modules that | ||
707 | * were loaded. Also flush IRQ workqueues and disable them. | ||
708 | */ | ||
673 | static int dm_hw_fini(void *handle) | 709 | static int dm_hw_fini(void *handle) |
674 | { | 710 | { |
675 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 711 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -895,6 +931,16 @@ static int dm_resume(void *handle) | |||
895 | return ret; | 931 | return ret; |
896 | } | 932 | } |
897 | 933 | ||
934 | /** | ||
935 | * DOC: DM Lifecycle | ||
936 | * | ||
937 | * DM (and consequently DC) is registered in the amdgpu base driver as a IP | ||
938 | * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to | ||
939 | * the base driver's device list to be initialized and torn down accordingly. | ||
940 | * | ||
941 | * The functions to do so are provided as hooks in &struct amd_ip_funcs. | ||
942 | */ | ||
943 | |||
898 | static const struct amd_ip_funcs amdgpu_dm_funcs = { | 944 | static const struct amd_ip_funcs amdgpu_dm_funcs = { |
899 | .name = "dm", | 945 | .name = "dm", |
900 | .early_init = dm_early_init, | 946 | .early_init = dm_early_init, |
@@ -962,6 +1008,12 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state) | |||
962 | kfree(dm_state); | 1008 | kfree(dm_state); |
963 | } | 1009 | } |
964 | 1010 | ||
1011 | /** | ||
1012 | * DOC: atomic | ||
1013 | * | ||
1014 | * *WIP* | ||
1015 | */ | ||
1016 | |||
965 | static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { | 1017 | static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { |
966 | .fb_create = amdgpu_display_user_framebuffer_create, | 1018 | .fb_create = amdgpu_display_user_framebuffer_create, |
967 | .output_poll_changed = drm_fb_helper_output_poll_changed, | 1019 | .output_poll_changed = drm_fb_helper_output_poll_changed, |
@@ -1524,15 +1576,23 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | |||
1524 | { | 1576 | { |
1525 | struct amdgpu_display_manager *dm = bl_get_data(bd); | 1577 | struct amdgpu_display_manager *dm = bl_get_data(bd); |
1526 | 1578 | ||
1579 | /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer | ||
1580 | * and 16 bit fractional, where 1.0 is max backlight value. | ||
1581 | * bd->props.brightness is 8 bit format and needs to be converted by | ||
1582 | * scaling via copy lower byte to upper byte of 16 bit value. | ||
1583 | */ | ||
1584 | uint32_t brightness = bd->props.brightness * 0x101; | ||
1585 | |||
1527 | /* | 1586 | /* |
1528 | * PWM interperts 0 as 100% rather than 0% because of HW | 1587 | * PWM interperts 0 as 100% rather than 0% because of HW |
1529 | * limitation for level 0.So limiting minimum brightness level | 1588 | * limitation for level 0. So limiting minimum brightness level |
1530 | * to 1. | 1589 | * to 1. |
1531 | */ | 1590 | */ |
1532 | if (bd->props.brightness < 1) | 1591 | if (bd->props.brightness < 1) |
1533 | return 1; | 1592 | brightness = 0x101; |
1593 | |||
1534 | if (dc_link_set_backlight_level(dm->backlight_link, | 1594 | if (dc_link_set_backlight_level(dm->backlight_link, |
1535 | bd->props.brightness, 0, 0)) | 1595 | brightness, 0, 0)) |
1536 | return 0; | 1596 | return 0; |
1537 | else | 1597 | else |
1538 | return 1; | 1598 | return 1; |
@@ -1584,18 +1644,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm, | |||
1584 | struct amdgpu_mode_info *mode_info, | 1644 | struct amdgpu_mode_info *mode_info, |
1585 | int plane_id) | 1645 | int plane_id) |
1586 | { | 1646 | { |
1587 | struct amdgpu_plane *plane; | 1647 | struct drm_plane *plane; |
1588 | unsigned long possible_crtcs; | 1648 | unsigned long possible_crtcs; |
1589 | int ret = 0; | 1649 | int ret = 0; |
1590 | 1650 | ||
1591 | plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); | 1651 | plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); |
1592 | mode_info->planes[plane_id] = plane; | 1652 | mode_info->planes[plane_id] = plane; |
1593 | 1653 | ||
1594 | if (!plane) { | 1654 | if (!plane) { |
1595 | DRM_ERROR("KMS: Failed to allocate plane\n"); | 1655 | DRM_ERROR("KMS: Failed to allocate plane\n"); |
1596 | return -ENOMEM; | 1656 | return -ENOMEM; |
1597 | } | 1657 | } |
1598 | plane->base.type = mode_info->plane_type[plane_id]; | 1658 | plane->type = mode_info->plane_type[plane_id]; |
1599 | 1659 | ||
1600 | /* | 1660 | /* |
1601 | * HACK: IGT tests expect that each plane can only have | 1661 | * HACK: IGT tests expect that each plane can only have |
@@ -1686,7 +1746,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
1686 | } | 1746 | } |
1687 | 1747 | ||
1688 | for (i = 0; i < dm->dc->caps.max_streams; i++) | 1748 | for (i = 0; i < dm->dc->caps.max_streams; i++) |
1689 | if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) { | 1749 | if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { |
1690 | DRM_ERROR("KMS: Failed to initialize crtc\n"); | 1750 | DRM_ERROR("KMS: Failed to initialize crtc\n"); |
1691 | goto fail; | 1751 | goto fail; |
1692 | } | 1752 | } |
@@ -2707,18 +2767,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2707 | drm_connector = &aconnector->base; | 2767 | drm_connector = &aconnector->base; |
2708 | 2768 | ||
2709 | if (!aconnector->dc_sink) { | 2769 | if (!aconnector->dc_sink) { |
2710 | /* | 2770 | if (!aconnector->mst_port) { |
2711 | * Create dc_sink when necessary to MST | 2771 | sink = create_fake_sink(aconnector); |
2712 | * Don't apply fake_sink to MST | 2772 | if (!sink) |
2713 | */ | 2773 | return stream; |
2714 | if (aconnector->mst_port) { | ||
2715 | dm_dp_mst_dc_sink_create(drm_connector); | ||
2716 | return stream; | ||
2717 | } | 2774 | } |
2718 | |||
2719 | sink = create_fake_sink(aconnector); | ||
2720 | if (!sink) | ||
2721 | return stream; | ||
2722 | } else { | 2775 | } else { |
2723 | sink = aconnector->dc_sink; | 2776 | sink = aconnector->dc_sink; |
2724 | } | 2777 | } |
@@ -3307,7 +3360,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane, | |||
3307 | static const struct drm_plane_funcs dm_plane_funcs = { | 3360 | static const struct drm_plane_funcs dm_plane_funcs = { |
3308 | .update_plane = drm_atomic_helper_update_plane, | 3361 | .update_plane = drm_atomic_helper_update_plane, |
3309 | .disable_plane = drm_atomic_helper_disable_plane, | 3362 | .disable_plane = drm_atomic_helper_disable_plane, |
3310 | .destroy = drm_plane_cleanup, | 3363 | .destroy = drm_primary_helper_destroy, |
3311 | .reset = dm_drm_plane_reset, | 3364 | .reset = dm_drm_plane_reset, |
3312 | .atomic_duplicate_state = dm_drm_plane_duplicate_state, | 3365 | .atomic_duplicate_state = dm_drm_plane_duplicate_state, |
3313 | .atomic_destroy_state = dm_drm_plane_destroy_state, | 3366 | .atomic_destroy_state = dm_drm_plane_destroy_state, |
@@ -3468,49 +3521,49 @@ static const u32 cursor_formats[] = { | |||
3468 | }; | 3521 | }; |
3469 | 3522 | ||
3470 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | 3523 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, |
3471 | struct amdgpu_plane *aplane, | 3524 | struct drm_plane *plane, |
3472 | unsigned long possible_crtcs) | 3525 | unsigned long possible_crtcs) |
3473 | { | 3526 | { |
3474 | int res = -EPERM; | 3527 | int res = -EPERM; |
3475 | 3528 | ||
3476 | switch (aplane->base.type) { | 3529 | switch (plane->type) { |
3477 | case DRM_PLANE_TYPE_PRIMARY: | 3530 | case DRM_PLANE_TYPE_PRIMARY: |
3478 | res = drm_universal_plane_init( | 3531 | res = drm_universal_plane_init( |
3479 | dm->adev->ddev, | 3532 | dm->adev->ddev, |
3480 | &aplane->base, | 3533 | plane, |
3481 | possible_crtcs, | 3534 | possible_crtcs, |
3482 | &dm_plane_funcs, | 3535 | &dm_plane_funcs, |
3483 | rgb_formats, | 3536 | rgb_formats, |
3484 | ARRAY_SIZE(rgb_formats), | 3537 | ARRAY_SIZE(rgb_formats), |
3485 | NULL, aplane->base.type, NULL); | 3538 | NULL, plane->type, NULL); |
3486 | break; | 3539 | break; |
3487 | case DRM_PLANE_TYPE_OVERLAY: | 3540 | case DRM_PLANE_TYPE_OVERLAY: |
3488 | res = drm_universal_plane_init( | 3541 | res = drm_universal_plane_init( |
3489 | dm->adev->ddev, | 3542 | dm->adev->ddev, |
3490 | &aplane->base, | 3543 | plane, |
3491 | possible_crtcs, | 3544 | possible_crtcs, |
3492 | &dm_plane_funcs, | 3545 | &dm_plane_funcs, |
3493 | yuv_formats, | 3546 | yuv_formats, |
3494 | ARRAY_SIZE(yuv_formats), | 3547 | ARRAY_SIZE(yuv_formats), |
3495 | NULL, aplane->base.type, NULL); | 3548 | NULL, plane->type, NULL); |
3496 | break; | 3549 | break; |
3497 | case DRM_PLANE_TYPE_CURSOR: | 3550 | case DRM_PLANE_TYPE_CURSOR: |
3498 | res = drm_universal_plane_init( | 3551 | res = drm_universal_plane_init( |
3499 | dm->adev->ddev, | 3552 | dm->adev->ddev, |
3500 | &aplane->base, | 3553 | plane, |
3501 | possible_crtcs, | 3554 | possible_crtcs, |
3502 | &dm_plane_funcs, | 3555 | &dm_plane_funcs, |
3503 | cursor_formats, | 3556 | cursor_formats, |
3504 | ARRAY_SIZE(cursor_formats), | 3557 | ARRAY_SIZE(cursor_formats), |
3505 | NULL, aplane->base.type, NULL); | 3558 | NULL, plane->type, NULL); |
3506 | break; | 3559 | break; |
3507 | } | 3560 | } |
3508 | 3561 | ||
3509 | drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs); | 3562 | drm_plane_helper_add(plane, &dm_plane_helper_funcs); |
3510 | 3563 | ||
3511 | /* Create (reset) the plane state */ | 3564 | /* Create (reset) the plane state */ |
3512 | if (aplane->base.funcs->reset) | 3565 | if (plane->funcs->reset) |
3513 | aplane->base.funcs->reset(&aplane->base); | 3566 | plane->funcs->reset(plane); |
3514 | 3567 | ||
3515 | 3568 | ||
3516 | return res; | 3569 | return res; |
@@ -3521,7 +3574,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | |||
3521 | uint32_t crtc_index) | 3574 | uint32_t crtc_index) |
3522 | { | 3575 | { |
3523 | struct amdgpu_crtc *acrtc = NULL; | 3576 | struct amdgpu_crtc *acrtc = NULL; |
3524 | struct amdgpu_plane *cursor_plane; | 3577 | struct drm_plane *cursor_plane; |
3525 | 3578 | ||
3526 | int res = -ENOMEM; | 3579 | int res = -ENOMEM; |
3527 | 3580 | ||
@@ -3529,7 +3582,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | |||
3529 | if (!cursor_plane) | 3582 | if (!cursor_plane) |
3530 | goto fail; | 3583 | goto fail; |
3531 | 3584 | ||
3532 | cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR; | 3585 | cursor_plane->type = DRM_PLANE_TYPE_CURSOR; |
3533 | res = amdgpu_dm_plane_init(dm, cursor_plane, 0); | 3586 | res = amdgpu_dm_plane_init(dm, cursor_plane, 0); |
3534 | 3587 | ||
3535 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); | 3588 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); |
@@ -3540,7 +3593,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | |||
3540 | dm->ddev, | 3593 | dm->ddev, |
3541 | &acrtc->base, | 3594 | &acrtc->base, |
3542 | plane, | 3595 | plane, |
3543 | &cursor_plane->base, | 3596 | cursor_plane, |
3544 | &amdgpu_dm_crtc_funcs, NULL); | 3597 | &amdgpu_dm_crtc_funcs, NULL); |
3545 | 3598 | ||
3546 | if (res) | 3599 | if (res) |
@@ -3779,12 +3832,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
3779 | case DRM_MODE_CONNECTOR_HDMIA: | 3832 | case DRM_MODE_CONNECTOR_HDMIA: |
3780 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | 3833 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
3781 | aconnector->base.ycbcr_420_allowed = | 3834 | aconnector->base.ycbcr_420_allowed = |
3782 | link->link_enc->features.ycbcr420_supported ? true : false; | 3835 | link->link_enc->features.hdmi_ycbcr420_supported ? true : false; |
3783 | break; | 3836 | break; |
3784 | case DRM_MODE_CONNECTOR_DisplayPort: | 3837 | case DRM_MODE_CONNECTOR_DisplayPort: |
3785 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | 3838 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
3786 | aconnector->base.ycbcr_420_allowed = | 3839 | aconnector->base.ycbcr_420_allowed = |
3787 | link->link_enc->features.ycbcr420_supported ? true : false; | 3840 | link->link_enc->features.dp_ycbcr420_supported ? true : false; |
3788 | break; | 3841 | break; |
3789 | case DRM_MODE_CONNECTOR_DVID: | 3842 | case DRM_MODE_CONNECTOR_DVID: |
3790 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | 3843 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
@@ -4542,6 +4595,14 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, | |||
4542 | /*TODO Handle EINTR, reenable IRQ*/ | 4595 | /*TODO Handle EINTR, reenable IRQ*/ |
4543 | } | 4596 | } |
4544 | 4597 | ||
4598 | /** | ||
4599 | * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. | ||
4600 | * @state: The atomic state to commit | ||
4601 | * | ||
4602 | * This will tell DC to commit the constructed DC state from atomic_check, | ||
4603 | * programming the hardware. Any failures here implies a hardware failure, since | ||
4604 | * atomic check should have filtered anything non-kosher. | ||
4605 | */ | ||
4545 | static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | 4606 | static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) |
4546 | { | 4607 | { |
4547 | struct drm_device *dev = state->dev; | 4608 | struct drm_device *dev = state->dev; |
@@ -5313,6 +5374,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru | |||
5313 | struct dc_stream_update stream_update; | 5374 | struct dc_stream_update stream_update; |
5314 | enum surface_update_type update_type = UPDATE_TYPE_FAST; | 5375 | enum surface_update_type update_type = UPDATE_TYPE_FAST; |
5315 | 5376 | ||
5377 | if (!updates || !surface) { | ||
5378 | DRM_ERROR("Plane or surface update failed to allocate"); | ||
5379 | /* Set type to FULL to avoid crashing in DC*/ | ||
5380 | update_type = UPDATE_TYPE_FULL; | ||
5381 | goto ret; | ||
5382 | } | ||
5316 | 5383 | ||
5317 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5384 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
5318 | new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); | 5385 | new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); |
@@ -5388,6 +5455,31 @@ ret: | |||
5388 | return update_type; | 5455 | return update_type; |
5389 | } | 5456 | } |
5390 | 5457 | ||
5458 | /** | ||
5459 | * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. | ||
5460 | * @dev: The DRM device | ||
5461 | * @state: The atomic state to commit | ||
5462 | * | ||
5463 | * Validate that the given atomic state is programmable by DC into hardware. | ||
5464 | * This involves constructing a &struct dc_state reflecting the new hardware | ||
5465 | * state we wish to commit, then querying DC to see if it is programmable. It's | ||
5466 | * important not to modify the existing DC state. Otherwise, atomic_check | ||
5467 | * may unexpectedly commit hardware changes. | ||
5468 | * | ||
5469 | * When validating the DC state, it's important that the right locks are | ||
5470 | * acquired. For full updates case which removes/adds/updates streams on one | ||
5471 | * CRTC while flipping on another CRTC, acquiring global lock will guarantee | ||
5472 | * that any such full update commit will wait for completion of any outstanding | ||
5473 | * flip using DRMs synchronization events. See | ||
5474 | * dm_determine_update_type_for_commit() | ||
5475 | * | ||
5476 | * Note that DM adds the affected connectors for all CRTCs in state, when that | ||
5477 | * might not seem necessary. This is because DC stream creation requires the | ||
5478 | * DC sink, which is tied to the DRM connector state. Cleaning this up should | ||
5479 | * be possible but non-trivial - a possible TODO item. | ||
5480 | * | ||
5481 | * Return: -Error code if validation failed. | ||
5482 | */ | ||
5391 | static int amdgpu_dm_atomic_check(struct drm_device *dev, | 5483 | static int amdgpu_dm_atomic_check(struct drm_device *dev, |
5392 | struct drm_atomic_state *state) | 5484 | struct drm_atomic_state *state) |
5393 | { | 5485 | { |
@@ -5490,15 +5582,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
5490 | lock_and_validation_needed = true; | 5582 | lock_and_validation_needed = true; |
5491 | } | 5583 | } |
5492 | 5584 | ||
5493 | /* | ||
5494 | * For full updates case when | ||
5495 | * removing/adding/updating streams on one CRTC while flipping | ||
5496 | * on another CRTC, | ||
5497 | * acquiring global lock will guarantee that any such full | ||
5498 | * update commit | ||
5499 | * will wait for completion of any outstanding flip using DRMs | ||
5500 | * synchronization events. | ||
5501 | */ | ||
5502 | update_type = dm_determine_update_type_for_commit(dc, state); | 5585 | update_type = dm_determine_update_type_for_commit(dc, state); |
5503 | 5586 | ||
5504 | if (overall_update_type < update_type) | 5587 | if (overall_update_type < update_type) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 978b34a5011c..d6960644d714 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | |||
@@ -59,49 +59,100 @@ struct common_irq_params { | |||
59 | enum dc_irq_source irq_src; | 59 | enum dc_irq_source irq_src; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | /** | ||
63 | * struct irq_list_head - Linked-list for low context IRQ handlers. | ||
64 | * | ||
65 | * @head: The list_head within &struct handler_data | ||
66 | * @work: A work_struct containing the deferred handler work | ||
67 | */ | ||
62 | struct irq_list_head { | 68 | struct irq_list_head { |
63 | struct list_head head; | 69 | struct list_head head; |
64 | /* In case this interrupt needs post-processing, 'work' will be queued*/ | 70 | /* In case this interrupt needs post-processing, 'work' will be queued*/ |
65 | struct work_struct work; | 71 | struct work_struct work; |
66 | }; | 72 | }; |
67 | 73 | ||
74 | /** | ||
75 | * struct dm_compressor_info - Buffer info used by frame buffer compression | ||
76 | * @cpu_addr: MMIO cpu addr | ||
77 | * @bo_ptr: Pointer to the buffer object | ||
78 | * @gpu_addr: MMIO gpu addr | ||
79 | */ | ||
68 | struct dm_comressor_info { | 80 | struct dm_comressor_info { |
69 | void *cpu_addr; | 81 | void *cpu_addr; |
70 | struct amdgpu_bo *bo_ptr; | 82 | struct amdgpu_bo *bo_ptr; |
71 | uint64_t gpu_addr; | 83 | uint64_t gpu_addr; |
72 | }; | 84 | }; |
73 | 85 | ||
86 | /** | ||
87 | * struct amdgpu_display_manager - Central amdgpu display manager device | ||
88 | * | ||
89 | * @dc: Display Core control structure | ||
90 | * @adev: AMDGPU base driver structure | ||
91 | * @ddev: DRM base driver structure | ||
92 | * @display_indexes_num: Max number of display streams supported | ||
93 | * @irq_handler_list_table_lock: Synchronizes access to IRQ tables | ||
94 | * @backlight_dev: Backlight control device | ||
95 | * @cached_state: Caches device atomic state for suspend/resume | ||
96 | * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info | ||
97 | */ | ||
74 | struct amdgpu_display_manager { | 98 | struct amdgpu_display_manager { |
99 | |||
75 | struct dc *dc; | 100 | struct dc *dc; |
101 | |||
102 | /** | ||
103 | * @cgs_device: | ||
104 | * | ||
105 | * The Common Graphics Services device. It provides an interface for | ||
106 | * accessing registers. | ||
107 | */ | ||
76 | struct cgs_device *cgs_device; | 108 | struct cgs_device *cgs_device; |
77 | 109 | ||
78 | struct amdgpu_device *adev; /*AMD base driver*/ | 110 | struct amdgpu_device *adev; |
79 | struct drm_device *ddev; /*DRM base driver*/ | 111 | struct drm_device *ddev; |
80 | u16 display_indexes_num; | 112 | u16 display_indexes_num; |
81 | 113 | ||
82 | /* | 114 | /** |
83 | * 'irq_source_handler_table' holds a list of handlers | 115 | * @irq_handler_list_low_tab: |
84 | * per (DAL) IRQ source. | 116 | * |
117 | * Low priority IRQ handler table. | ||
85 | * | 118 | * |
86 | * Each IRQ source may need to be handled at different contexts. | 119 | * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ |
87 | * By 'context' we mean, for example: | 120 | * source. Low priority IRQ handlers are deferred to a workqueue to be |
88 | * - The ISR context, which is the direct interrupt handler. | 121 | * processed. Hence, they can sleep. |
89 | * - The 'deferred' context - this is the post-processing of the | ||
90 | * interrupt, but at a lower priority. | ||
91 | * | 122 | * |
92 | * Note that handlers are called in the same order as they were | 123 | * Note that handlers are called in the same order as they were |
93 | * registered (FIFO). | 124 | * registered (FIFO). |
94 | */ | 125 | */ |
95 | struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; | 126 | struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; |
127 | |||
128 | /** | ||
129 | * @irq_handler_list_high_tab: | ||
130 | * | ||
131 | * High priority IRQ handler table. | ||
132 | * | ||
133 | * It is a n*m table, same as &irq_handler_list_low_tab. However, | ||
134 | * handlers in this table are not deferred and are called immediately. | ||
135 | */ | ||
96 | struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; | 136 | struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; |
97 | 137 | ||
138 | /** | ||
139 | * @pflip_params: | ||
140 | * | ||
141 | * Page flip IRQ parameters, passed to registered handlers when | ||
142 | * triggered. | ||
143 | */ | ||
98 | struct common_irq_params | 144 | struct common_irq_params |
99 | pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; | 145 | pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; |
100 | 146 | ||
147 | /** | ||
148 | * @vblank_params: | ||
149 | * | ||
150 | * Vertical blanking IRQ parameters, passed to registered handlers when | ||
151 | * triggered. | ||
152 | */ | ||
101 | struct common_irq_params | 153 | struct common_irq_params |
102 | vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; | 154 | vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; |
103 | 155 | ||
104 | /* this spin lock synchronizes access to 'irq_handler_list_table' */ | ||
105 | spinlock_t irq_handler_list_table_lock; | 156 | spinlock_t irq_handler_list_table_lock; |
106 | 157 | ||
107 | struct backlight_device *backlight_dev; | 158 | struct backlight_device *backlight_dev; |
@@ -110,9 +161,6 @@ struct amdgpu_display_manager { | |||
110 | 161 | ||
111 | struct mod_freesync *freesync_module; | 162 | struct mod_freesync *freesync_module; |
112 | 163 | ||
113 | /** | ||
114 | * Caches device atomic state for suspend/resume | ||
115 | */ | ||
116 | struct drm_atomic_state *cached_state; | 164 | struct drm_atomic_state *cached_state; |
117 | 165 | ||
118 | struct dm_comressor_info compressor; | 166 | struct dm_comressor_info compressor; |
@@ -160,8 +208,6 @@ struct amdgpu_dm_connector { | |||
160 | struct mutex hpd_lock; | 208 | struct mutex hpd_lock; |
161 | 209 | ||
162 | bool fake_enable; | 210 | bool fake_enable; |
163 | |||
164 | bool mst_connected; | ||
165 | }; | 211 | }; |
166 | 212 | ||
167 | #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) | 213 | #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index be19e6861189..216e48cec716 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | |||
@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) | |||
164 | */ | 164 | */ |
165 | stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; | 165 | stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; |
166 | ret = mod_color_calculate_regamma_params(stream->out_transfer_func, | 166 | ret = mod_color_calculate_regamma_params(stream->out_transfer_func, |
167 | gamma, true, adev->asic_type <= CHIP_RAVEN); | 167 | gamma, true, adev->asic_type <= CHIP_RAVEN, NULL); |
168 | dc_gamma_release(&gamma); | 168 | dc_gamma_release(&gamma); |
169 | if (!ret) { | 169 | if (!ret) { |
170 | stream->out_transfer_func->type = old_type; | 170 | stream->out_transfer_func->type = old_type; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 01fc5717b657..f088ac585978 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | |||
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) | |||
75 | return -EINVAL; | 75 | return -EINVAL; |
76 | } | 76 | } |
77 | 77 | ||
78 | if (!stream_state) { | ||
79 | DRM_ERROR("No stream state for CRTC%d\n", crtc->index); | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | |||
78 | /* When enabling CRC, we should also disable dithering. */ | 83 | /* When enabling CRC, we should also disable dithering. */ |
79 | if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { | 84 | if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { |
80 | if (dc_stream_configure_crc(stream_state->ctx->dc, | 85 | if (dc_stream_configure_crc(stream_state->ctx->dc, |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a212178f2edc..cd10f77cdeb0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | |||
@@ -32,16 +32,55 @@ | |||
32 | #include "amdgpu_dm.h" | 32 | #include "amdgpu_dm.h" |
33 | #include "amdgpu_dm_irq.h" | 33 | #include "amdgpu_dm_irq.h" |
34 | 34 | ||
35 | /** | ||
36 | * DOC: overview | ||
37 | * | ||
38 | * DM provides another layer of IRQ management on top of what the base driver | ||
39 | * already provides. This is something that could be cleaned up, and is a | ||
40 | * future TODO item. | ||
41 | * | ||
42 | * The base driver provides IRQ source registration with DRM, handler | ||
43 | * registration into the base driver's IRQ table, and a handler callback | ||
44 | * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic | ||
45 | * handler looks up the IRQ table, and calls the respective | ||
46 | * &amdgpu_irq_src_funcs.process hookups. | ||
47 | * | ||
48 | * What DM provides on top are two IRQ tables specifically for top-half and | ||
49 | * bottom-half IRQ handling, with the bottom-half implementing workqueues: | ||
50 | * | ||
51 | * - &amdgpu_display_manager.irq_handler_list_high_tab | ||
52 | * - &amdgpu_display_manager.irq_handler_list_low_tab | ||
53 | * | ||
54 | * They override the base driver's IRQ table, and the effect can be seen | ||
55 | * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They | ||
56 | * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up | ||
57 | * DM's IRQ tables. However, in order for base driver to recognize this hook, DM | ||
58 | * still needs to register the IRQ with the base driver. See | ||
59 | * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). | ||
60 | * | ||
61 | * To expose DC's hardware interrupt toggle to the base driver, DM implements | ||
62 | * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through | ||
63 | * amdgpu_irq_update() to enable or disable the interrupt. | ||
64 | */ | ||
65 | |||
35 | /****************************************************************************** | 66 | /****************************************************************************** |
36 | * Private declarations. | 67 | * Private declarations. |
37 | *****************************************************************************/ | 68 | *****************************************************************************/ |
38 | 69 | ||
70 | /** | ||
71 | * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. | ||
72 | * | ||
73 | * @list: Linked list entry referencing the next/previous handler | ||
74 | * @handler: Handler function | ||
75 | * @handler_arg: Argument passed to the handler when triggered | ||
76 | * @dm: DM which this handler belongs to | ||
77 | * @irq_source: DC interrupt source that this handler is registered for | ||
78 | */ | ||
39 | struct amdgpu_dm_irq_handler_data { | 79 | struct amdgpu_dm_irq_handler_data { |
40 | struct list_head list; | 80 | struct list_head list; |
41 | interrupt_handler handler; | 81 | interrupt_handler handler; |
42 | void *handler_arg; | 82 | void *handler_arg; |
43 | 83 | ||
44 | /* DM which this handler belongs to */ | ||
45 | struct amdgpu_display_manager *dm; | 84 | struct amdgpu_display_manager *dm; |
46 | /* DAL irq source which registered for this interrupt. */ | 85 | /* DAL irq source which registered for this interrupt. */ |
47 | enum dc_irq_source irq_source; | 86 | enum dc_irq_source irq_source; |
@@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, | |||
68 | } | 107 | } |
69 | 108 | ||
70 | /** | 109 | /** |
71 | * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper. | 110 | * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. |
72 | * | 111 | * |
73 | * @work: work struct | 112 | * @work: work struct |
74 | */ | 113 | */ |
@@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work) | |||
99 | * (The most common use is HPD interrupt) */ | 138 | * (The most common use is HPD interrupt) */ |
100 | } | 139 | } |
101 | 140 | ||
102 | /** | 141 | /* |
103 | * Remove a handler and return a pointer to hander list from which the | 142 | * Remove a handler and return a pointer to handler list from which the |
104 | * handler was removed. | 143 | * handler was removed. |
105 | */ | 144 | */ |
106 | static struct list_head *remove_irq_handler(struct amdgpu_device *adev, | 145 | static struct list_head *remove_irq_handler(struct amdgpu_device *adev, |
@@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, | |||
203 | * Note: caller is responsible for input validation. | 242 | * Note: caller is responsible for input validation. |
204 | *****************************************************************************/ | 243 | *****************************************************************************/ |
205 | 244 | ||
245 | /** | ||
246 | * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. | ||
247 | * @adev: The base driver device containing the DM device. | ||
248 | * @int_params: Interrupt parameters containing the source, and handler context | ||
249 | * @ih: Function pointer to the interrupt handler to register | ||
250 | * @handler_args: Arguments passed to the handler when the interrupt occurs | ||
251 | * | ||
252 | * Register an interrupt handler for the given IRQ source, under the given | ||
253 | * context. The context can either be high or low. High context handlers are | ||
254 | * executed directly within ISR context, while low context is executed within a | ||
255 | * workqueue, thereby allowing operations that sleep. | ||
256 | * | ||
257 | * Registered handlers are called in a FIFO manner, i.e. the most recently | ||
258 | * registered handler will be called first. | ||
259 | * | ||
260 | * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ | ||
261 | * source, handler function, and args | ||
262 | */ | ||
206 | void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, | 263 | void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, |
207 | struct dc_interrupt_params *int_params, | 264 | struct dc_interrupt_params *int_params, |
208 | void (*ih)(void *), | 265 | void (*ih)(void *), |
@@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, | |||
261 | return handler_data; | 318 | return handler_data; |
262 | } | 319 | } |
263 | 320 | ||
321 | /** | ||
322 | * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table | ||
323 | * @adev: The base driver device containing the DM device | ||
324 | * @irq_source: IRQ source to remove the given handler from | ||
325 | * @ih: Function pointer to the interrupt handler to unregister | ||
326 | * | ||
327 | * Go through both low and high context IRQ tables, and find the given handler | ||
328 | * for the given irq source. If found, remove it. Otherwise, do nothing. | ||
329 | */ | ||
264 | void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, | 330 | void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, |
265 | enum dc_irq_source irq_source, | 331 | enum dc_irq_source irq_source, |
266 | void *ih) | 332 | void *ih) |
@@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, | |||
295 | } | 361 | } |
296 | } | 362 | } |
297 | 363 | ||
364 | /** | ||
365 | * amdgpu_dm_irq_init() - Initialize DM IRQ management | ||
366 | * @adev: The base driver device containing the DM device | ||
367 | * | ||
368 | * Initialize DM's high and low context IRQ tables. | ||
369 | * | ||
370 | * The N by M table contains N IRQ sources, with M | ||
371 | * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The | ||
372 | * list_heads are initialized here. When an interrupt n is triggered, all m | ||
373 | * handlers are called in sequence, FIFO according to registration order. | ||
374 | * | ||
375 | * The low context table requires special steps to initialize, since handlers | ||
376 | * will be deferred to a workqueue. See &struct irq_list_head. | ||
377 | */ | ||
298 | int amdgpu_dm_irq_init(struct amdgpu_device *adev) | 378 | int amdgpu_dm_irq_init(struct amdgpu_device *adev) |
299 | { | 379 | { |
300 | int src; | 380 | int src; |
@@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) | |||
317 | return 0; | 397 | return 0; |
318 | } | 398 | } |
319 | 399 | ||
320 | /* DM IRQ and timer resource release */ | 400 | /** |
401 | * amdgpu_dm_irq_fini() - Tear down DM IRQ management | ||
402 | * @adev: The base driver device containing the DM device | ||
403 | * | ||
404 | * Flush all work within the low context IRQ table. | ||
405 | */ | ||
321 | void amdgpu_dm_irq_fini(struct amdgpu_device *adev) | 406 | void amdgpu_dm_irq_fini(struct amdgpu_device *adev) |
322 | { | 407 | { |
323 | int src; | 408 | int src; |
@@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) | |||
414 | return 0; | 499 | return 0; |
415 | } | 500 | } |
416 | 501 | ||
417 | /** | 502 | /* |
418 | * amdgpu_dm_irq_schedule_work - schedule all work items registered for the | 503 | * amdgpu_dm_irq_schedule_work - schedule all work items registered for the |
419 | * "irq_source". | 504 | * "irq_source". |
420 | */ | 505 | */ |
@@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, | |||
439 | 524 | ||
440 | } | 525 | } |
441 | 526 | ||
442 | /** amdgpu_dm_irq_immediate_work | 527 | /* |
443 | * Callback high irq work immediately, don't send to work queue | 528 | * amdgpu_dm_irq_immediate_work |
529 | * Callback high irq work immediately, don't send to work queue | ||
444 | */ | 530 | */ |
445 | static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, | 531 | static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, |
446 | enum dc_irq_source irq_source) | 532 | enum dc_irq_source irq_source) |
@@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, | |||
467 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); | 553 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); |
468 | } | 554 | } |
469 | 555 | ||
470 | /* | 556 | /** |
471 | * amdgpu_dm_irq_handler | 557 | * amdgpu_dm_irq_handler - Generic DM IRQ handler |
558 | * @adev: amdgpu base driver device containing the DM device | ||
559 | * @source: Unused | ||
560 | * @entry: Data about the triggered interrupt | ||
472 | * | 561 | * |
473 | * Generic IRQ handler, calls all registered high irq work immediately, and | 562 | * Calls all registered high irq work immediately, and schedules work for low |
474 | * schedules work for low irq | 563 | * irq. The DM IRQ table is used to find the corresponding handlers. |
475 | */ | 564 | */ |
476 | static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, | 565 | static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, |
477 | struct amdgpu_irq_src *source, | 566 | struct amdgpu_irq_src *source, |
@@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) | |||
613 | adev->hpd_irq.funcs = &dm_hpd_irq_funcs; | 702 | adev->hpd_irq.funcs = &dm_hpd_irq_funcs; |
614 | } | 703 | } |
615 | 704 | ||
616 | /* | 705 | /** |
617 | * amdgpu_dm_hpd_init - hpd setup callback. | 706 | * amdgpu_dm_hpd_init - hpd setup callback. |
618 | * | 707 | * |
619 | * @adev: amdgpu_device pointer | 708 | * @adev: amdgpu_device pointer |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 03601d717fed..d02c32a1039c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | |||
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { | |||
205 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property | 205 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property |
206 | }; | 206 | }; |
207 | 207 | ||
208 | void dm_dp_mst_dc_sink_create(struct drm_connector *connector) | ||
209 | { | ||
210 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
211 | struct dc_sink *dc_sink; | ||
212 | struct dc_sink_init_data init_params = { | ||
213 | .link = aconnector->dc_link, | ||
214 | .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; | ||
215 | |||
216 | /* FIXME none of this is safe. we shouldn't touch aconnector here in | ||
217 | * atomic_check | ||
218 | */ | ||
219 | |||
220 | /* | ||
221 | * TODO: Need to further figure out why ddc.algo is NULL while MST port exists | ||
222 | */ | ||
223 | if (!aconnector->port || !aconnector->port->aux.ddc.algo) | ||
224 | return; | ||
225 | |||
226 | ASSERT(aconnector->edid); | ||
227 | |||
228 | dc_sink = dc_link_add_remote_sink( | ||
229 | aconnector->dc_link, | ||
230 | (uint8_t *)aconnector->edid, | ||
231 | (aconnector->edid->extensions + 1) * EDID_LENGTH, | ||
232 | &init_params); | ||
233 | |||
234 | dc_sink->priv = aconnector; | ||
235 | aconnector->dc_sink = dc_sink; | ||
236 | |||
237 | if (aconnector->dc_sink) | ||
238 | amdgpu_dm_update_freesync_caps( | ||
239 | connector, aconnector->edid); | ||
240 | } | ||
241 | |||
242 | static int dm_dp_mst_get_modes(struct drm_connector *connector) | 208 | static int dm_dp_mst_get_modes(struct drm_connector *connector) |
243 | { | 209 | { |
244 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | 210 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) | |||
319 | struct amdgpu_device *adev = dev->dev_private; | 285 | struct amdgpu_device *adev = dev->dev_private; |
320 | struct amdgpu_encoder *amdgpu_encoder; | 286 | struct amdgpu_encoder *amdgpu_encoder; |
321 | struct drm_encoder *encoder; | 287 | struct drm_encoder *encoder; |
322 | const struct drm_connector_helper_funcs *connector_funcs = | ||
323 | connector->base.helper_private; | ||
324 | struct drm_encoder *enc_master = | ||
325 | connector_funcs->best_encoder(&connector->base); | ||
326 | 288 | ||
327 | DRM_DEBUG_KMS("enc master is %p\n", enc_master); | ||
328 | amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); | 289 | amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); |
329 | if (!amdgpu_encoder) | 290 | if (!amdgpu_encoder) |
330 | return NULL; | 291 | return NULL; |
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
354 | struct amdgpu_device *adev = dev->dev_private; | 315 | struct amdgpu_device *adev = dev->dev_private; |
355 | struct amdgpu_dm_connector *aconnector; | 316 | struct amdgpu_dm_connector *aconnector; |
356 | struct drm_connector *connector; | 317 | struct drm_connector *connector; |
357 | struct drm_connector_list_iter conn_iter; | ||
358 | |||
359 | drm_connector_list_iter_begin(dev, &conn_iter); | ||
360 | drm_for_each_connector_iter(connector, &conn_iter) { | ||
361 | aconnector = to_amdgpu_dm_connector(connector); | ||
362 | if (aconnector->mst_port == master | ||
363 | && !aconnector->port) { | ||
364 | DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", | ||
365 | aconnector, connector->base.id, aconnector->mst_port); | ||
366 | |||
367 | aconnector->port = port; | ||
368 | drm_connector_set_path_property(connector, pathprop); | ||
369 | |||
370 | drm_connector_list_iter_end(&conn_iter); | ||
371 | aconnector->mst_connected = true; | ||
372 | return &aconnector->base; | ||
373 | } | ||
374 | } | ||
375 | drm_connector_list_iter_end(&conn_iter); | ||
376 | 318 | ||
377 | aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); | 319 | aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); |
378 | if (!aconnector) | 320 | if (!aconnector) |
@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
421 | */ | 363 | */ |
422 | amdgpu_dm_connector_funcs_reset(connector); | 364 | amdgpu_dm_connector_funcs_reset(connector); |
423 | 365 | ||
424 | aconnector->mst_connected = true; | ||
425 | |||
426 | DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", | 366 | DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", |
427 | aconnector, connector->base.id, aconnector->mst_port); | 367 | aconnector, connector->base.id, aconnector->mst_port); |
428 | 368 | ||
@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
434 | static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | 374 | static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
435 | struct drm_connector *connector) | 375 | struct drm_connector *connector) |
436 | { | 376 | { |
377 | struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); | ||
378 | struct drm_device *dev = master->base.dev; | ||
379 | struct amdgpu_device *adev = dev->dev_private; | ||
437 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | 380 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
438 | 381 | ||
439 | DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", | 382 | DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", |
@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
447 | aconnector->dc_sink = NULL; | 390 | aconnector->dc_sink = NULL; |
448 | } | 391 | } |
449 | 392 | ||
450 | aconnector->mst_connected = false; | 393 | drm_connector_unregister(connector); |
394 | if (adev->mode_info.rfbdev) | ||
395 | drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); | ||
396 | drm_connector_put(connector); | ||
451 | } | 397 | } |
452 | 398 | ||
453 | static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | 399 | static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) |
@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | |||
458 | drm_kms_helper_hotplug_event(dev); | 404 | drm_kms_helper_hotplug_event(dev); |
459 | } | 405 | } |
460 | 406 | ||
461 | static void dm_dp_mst_link_status_reset(struct drm_connector *connector) | ||
462 | { | ||
463 | mutex_lock(&connector->dev->mode_config.mutex); | ||
464 | drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); | ||
465 | mutex_unlock(&connector->dev->mode_config.mutex); | ||
466 | } | ||
467 | |||
468 | static void dm_dp_mst_register_connector(struct drm_connector *connector) | 407 | static void dm_dp_mst_register_connector(struct drm_connector *connector) |
469 | { | 408 | { |
470 | struct drm_device *dev = connector->dev; | 409 | struct drm_device *dev = connector->dev; |
471 | struct amdgpu_device *adev = dev->dev_private; | 410 | struct amdgpu_device *adev = dev->dev_private; |
472 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
473 | 411 | ||
474 | if (adev->mode_info.rfbdev) | 412 | if (adev->mode_info.rfbdev) |
475 | drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); | 413 | drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); |
@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) | |||
477 | DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); | 415 | DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); |
478 | 416 | ||
479 | drm_connector_register(connector); | 417 | drm_connector_register(connector); |
480 | |||
481 | if (aconnector->mst_connected) | ||
482 | dm_dp_mst_link_status_reset(connector); | ||
483 | } | 418 | } |
484 | 419 | ||
485 | static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { | 420 | static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 8cf51da26657..2da851b40042 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | |||
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector; | |||
31 | 31 | ||
32 | void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, | 32 | void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, |
33 | struct amdgpu_dm_connector *aconnector); | 33 | struct amdgpu_dm_connector *aconnector); |
34 | void dm_dp_mst_dc_sink_create(struct drm_connector *connector); | ||
35 | 34 | ||
36 | #endif | 35 | #endif |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 12001a006b2d..9d2d6986b983 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | |||
@@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, | |||
485 | return; | 485 | return; |
486 | 486 | ||
487 | clock.clock_type = amd_pp_dcf_clock; | 487 | clock.clock_type = amd_pp_dcf_clock; |
488 | clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; | 488 | clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000; |
489 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | 489 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); |
490 | 490 | ||
491 | clock.clock_type = amd_pp_f_clock; | 491 | clock.clock_type = amd_pp_f_clock; |
492 | clock.clock_freq_in_khz = req->hard_min_fclk_khz; | 492 | clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000; |
493 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | 493 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); |
494 | } | 494 | } |
495 | 495 | ||
@@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, | |||
518 | wm_dce_clocks[i].wm_set_id = | 518 | wm_dce_clocks[i].wm_set_id = |
519 | ranges->reader_wm_sets[i].wm_inst; | 519 | ranges->reader_wm_sets[i].wm_inst; |
520 | wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = | 520 | wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = |
521 | ranges->reader_wm_sets[i].max_drain_clk_khz; | 521 | ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; |
522 | wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = | 522 | wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = |
523 | ranges->reader_wm_sets[i].min_drain_clk_khz; | 523 | ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; |
524 | wm_dce_clocks[i].wm_max_mem_clk_in_khz = | 524 | wm_dce_clocks[i].wm_max_mem_clk_in_khz = |
525 | ranges->reader_wm_sets[i].max_fill_clk_khz; | 525 | ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; |
526 | wm_dce_clocks[i].wm_min_mem_clk_in_khz = | 526 | wm_dce_clocks[i].wm_min_mem_clk_in_khz = |
527 | ranges->reader_wm_sets[i].min_fill_clk_khz; | 527 | ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; |
528 | } | 528 | } |
529 | 529 | ||
530 | for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { | 530 | for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { |
@@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, | |||
534 | wm_soc_clocks[i].wm_set_id = | 534 | wm_soc_clocks[i].wm_set_id = |
535 | ranges->writer_wm_sets[i].wm_inst; | 535 | ranges->writer_wm_sets[i].wm_inst; |
536 | wm_soc_clocks[i].wm_max_socclk_clk_in_khz = | 536 | wm_soc_clocks[i].wm_max_socclk_clk_in_khz = |
537 | ranges->writer_wm_sets[i].max_fill_clk_khz; | 537 | ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; |
538 | wm_soc_clocks[i].wm_min_socclk_clk_in_khz = | 538 | wm_soc_clocks[i].wm_min_socclk_clk_in_khz = |
539 | ranges->writer_wm_sets[i].min_fill_clk_khz; | 539 | ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; |
540 | wm_soc_clocks[i].wm_max_mem_clk_in_khz = | 540 | wm_soc_clocks[i].wm_max_mem_clk_in_khz = |
541 | ranges->writer_wm_sets[i].max_drain_clk_khz; | 541 | ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; |
542 | wm_soc_clocks[i].wm_min_mem_clk_in_khz = | 542 | wm_soc_clocks[i].wm_min_mem_clk_in_khz = |
543 | ranges->writer_wm_sets[i].min_drain_clk_khz; | 543 | ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; |
544 | } | 544 | } |
545 | 545 | ||
546 | pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); | 546 | pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); |
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 0e1dc1b1a48d..c2ab026aee91 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | |||
@@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, | |||
2030 | static struct device_id device_type_from_device_id(uint16_t device_id) | 2030 | static struct device_id device_type_from_device_id(uint16_t device_id) |
2031 | { | 2031 | { |
2032 | 2032 | ||
2033 | struct device_id result_device_id; | 2033 | struct device_id result_device_id = {0}; |
2034 | 2034 | ||
2035 | switch (device_id) { | 2035 | switch (device_id) { |
2036 | case ATOM_DEVICE_LCD1_SUPPORT: | 2036 | case ATOM_DEVICE_LCD1_SUPPORT: |
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index ff764da21b6f..751bb614fc0e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | |||
@@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = { | |||
1884 | 1884 | ||
1885 | .is_accelerated_mode = bios_parser_is_accelerated_mode, | 1885 | .is_accelerated_mode = bios_parser_is_accelerated_mode, |
1886 | 1886 | ||
1887 | .is_active_display = bios_is_active_display, | ||
1888 | |||
1887 | .set_scratch_critical_state = bios_parser_set_scratch_critical_state, | 1889 | .set_scratch_critical_state = bios_parser_set_scratch_critical_state, |
1888 | 1890 | ||
1889 | 1891 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index d4589470985c..fdda8aa8e303 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c | |||
@@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays( | |||
88 | return active_disp; | 88 | return active_disp; |
89 | } | 89 | } |
90 | 90 | ||
91 | bool bios_is_active_display( | ||
92 | struct dc_bios *bios, | ||
93 | enum signal_type signal, | ||
94 | const struct connector_device_tag_info *device_tag) | ||
95 | { | ||
96 | uint32_t active = 0; | ||
97 | uint32_t connected = 0; | ||
98 | uint32_t bios_scratch_0 = 0; | ||
99 | uint32_t bios_scratch_3 = 0; | ||
100 | |||
101 | switch (signal) { | ||
102 | case SIGNAL_TYPE_DVI_SINGLE_LINK: | ||
103 | case SIGNAL_TYPE_DVI_DUAL_LINK: | ||
104 | case SIGNAL_TYPE_HDMI_TYPE_A: | ||
105 | case SIGNAL_TYPE_DISPLAY_PORT: | ||
106 | case SIGNAL_TYPE_DISPLAY_PORT_MST: | ||
107 | { | ||
108 | if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) { | ||
109 | switch (device_tag->dev_id.enum_id) { | ||
110 | case 1: | ||
111 | { | ||
112 | active = ATOM_S3_DFP1_ACTIVE; | ||
113 | connected = 0x0008; //ATOM_DISPLAY_DFP1_CONNECT | ||
114 | } | ||
115 | break; | ||
116 | |||
117 | case 2: | ||
118 | { | ||
119 | active = ATOM_S3_DFP2_ACTIVE; | ||
120 | connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT | ||
121 | } | ||
122 | break; | ||
123 | |||
124 | case 3: | ||
125 | { | ||
126 | active = ATOM_S3_DFP3_ACTIVE; | ||
127 | connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT | ||
128 | } | ||
129 | break; | ||
130 | |||
131 | case 4: | ||
132 | { | ||
133 | active = ATOM_S3_DFP4_ACTIVE; | ||
134 | connected = 0x0400; //ATOM_DISPLAY_DFP4_CONNECT | ||
135 | } | ||
136 | break; | ||
137 | |||
138 | case 5: | ||
139 | { | ||
140 | active = ATOM_S3_DFP5_ACTIVE; | ||
141 | connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT | ||
142 | } | ||
143 | break; | ||
144 | |||
145 | case 6: | ||
146 | { | ||
147 | active = ATOM_S3_DFP6_ACTIVE; | ||
148 | connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT | ||
149 | } | ||
150 | break; | ||
151 | |||
152 | default: | ||
153 | break; | ||
154 | } | ||
155 | } | ||
156 | } | ||
157 | break; | ||
158 | |||
159 | case SIGNAL_TYPE_LVDS: | ||
160 | case SIGNAL_TYPE_EDP: | ||
161 | { | ||
162 | active = ATOM_S3_LCD1_ACTIVE; | ||
163 | connected = 0x0002; //ATOM_DISPLAY_LCD1_CONNECT | ||
164 | } | ||
165 | break; | ||
166 | |||
167 | default: | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | |||
172 | if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/ | ||
173 | bios_scratch_0 = REG_READ(BIOS_SCRATCH_0); | ||
174 | if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/ | ||
175 | bios_scratch_3 = REG_READ(BIOS_SCRATCH_3); | ||
176 | |||
177 | bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK; | ||
178 | if ((active & bios_scratch_3) && (connected & bios_scratch_0)) | ||
179 | return true; | ||
180 | |||
181 | return false; | ||
182 | } | ||
183 | |||
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index 75a29e68fb27..f33cac2147e3 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h | |||
@@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios); | |||
35 | void bios_set_scratch_acc_mode_change(struct dc_bios *bios); | 35 | void bios_set_scratch_acc_mode_change(struct dc_bios *bios); |
36 | void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); | 36 | void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); |
37 | uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); | 37 | uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); |
38 | bool bios_is_active_display( | ||
39 | struct dc_bios *bios, | ||
40 | enum signal_type signal, | ||
41 | const struct connector_device_tag_info *device_tag); | ||
38 | 42 | ||
39 | #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) | 43 | #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) |
40 | 44 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 3208188b7ed4..43e4a2be0fa6 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | |||
@@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) | |||
1423 | ranges.num_reader_wm_sets = WM_SET_COUNT; | 1423 | ranges.num_reader_wm_sets = WM_SET_COUNT; |
1424 | ranges.num_writer_wm_sets = WM_SET_COUNT; | 1424 | ranges.num_writer_wm_sets = WM_SET_COUNT; |
1425 | ranges.reader_wm_sets[0].wm_inst = WM_A; | 1425 | ranges.reader_wm_sets[0].wm_inst = WM_A; |
1426 | ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz; | 1426 | ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000; |
1427 | ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive; | 1427 | ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; |
1428 | ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz; | 1428 | ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000; |
1429 | ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive; | 1429 | ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; |
1430 | ranges.writer_wm_sets[0].wm_inst = WM_A; | 1430 | ranges.writer_wm_sets[0].wm_inst = WM_A; |
1431 | ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz; | 1431 | ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000; |
1432 | ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive; | 1432 | ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; |
1433 | ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz; | 1433 | ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000; |
1434 | ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive; | 1434 | ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; |
1435 | 1435 | ||
1436 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { | 1436 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { |
1437 | ranges.reader_wm_sets[0].wm_inst = WM_A; | 1437 | ranges.reader_wm_sets[0].wm_inst = WM_A; |
1438 | ranges.reader_wm_sets[0].min_drain_clk_khz = 300000; | 1438 | ranges.reader_wm_sets[0].min_drain_clk_mhz = 300; |
1439 | ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000; | 1439 | ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000; |
1440 | ranges.reader_wm_sets[0].min_fill_clk_khz = 800000; | 1440 | ranges.reader_wm_sets[0].min_fill_clk_mhz = 800; |
1441 | ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000; | 1441 | ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000; |
1442 | ranges.writer_wm_sets[0].wm_inst = WM_A; | 1442 | ranges.writer_wm_sets[0].wm_inst = WM_A; |
1443 | ranges.writer_wm_sets[0].min_fill_clk_khz = 200000; | 1443 | ranges.writer_wm_sets[0].min_fill_clk_mhz = 200; |
1444 | ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000; | 1444 | ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000; |
1445 | ranges.writer_wm_sets[0].min_drain_clk_khz = 800000; | 1445 | ranges.writer_wm_sets[0].min_drain_clk_mhz = 800; |
1446 | ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000; | 1446 | ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000; |
1447 | } | 1447 | } |
1448 | 1448 | ||
1449 | ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; | 1449 | ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7c491c91465f..3279e26c3440 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
@@ -391,9 +391,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) | |||
391 | == stream) { | 391 | == stream) { |
392 | 392 | ||
393 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; | 393 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; |
394 | dc->hwss.program_csc_matrix(pipes, | 394 | dc->hwss.program_output_csc(dc, |
395 | stream->output_color_space, | 395 | pipes, |
396 | stream->csc_color_matrix.matrix); | 396 | stream->output_color_space, |
397 | stream->csc_color_matrix.matrix, | ||
398 | pipes->plane_res.hubp->opp_id); | ||
397 | ret = true; | 399 | ret = true; |
398 | } | 400 | } |
399 | } | 401 | } |
@@ -941,7 +943,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
941 | if (!dcb->funcs->is_accelerated_mode(dcb)) | 943 | if (!dcb->funcs->is_accelerated_mode(dcb)) |
942 | dc->hwss.enable_accelerated_mode(dc, context); | 944 | dc->hwss.enable_accelerated_mode(dc, context); |
943 | 945 | ||
944 | dc->hwss.set_bandwidth(dc, context, false); | 946 | dc->hwss.prepare_bandwidth(dc, context); |
945 | 947 | ||
946 | /* re-program planes for existing stream, in case we need to | 948 | /* re-program planes for existing stream, in case we need to |
947 | * free up plane resource for later use | 949 | * free up plane resource for later use |
@@ -957,8 +959,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
957 | } | 959 | } |
958 | 960 | ||
959 | /* Program hardware */ | 961 | /* Program hardware */ |
960 | dc->hwss.ready_shared_resources(dc, context); | ||
961 | |||
962 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | 962 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
963 | pipe = &context->res_ctx.pipe_ctx[i]; | 963 | pipe = &context->res_ctx.pipe_ctx[i]; |
964 | dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); | 964 | dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); |
@@ -1012,7 +1012,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
1012 | dc_enable_stereo(dc, context, dc_streams, context->stream_count); | 1012 | dc_enable_stereo(dc, context, dc_streams, context->stream_count); |
1013 | 1013 | ||
1014 | /* pplib is notified if disp_num changed */ | 1014 | /* pplib is notified if disp_num changed */ |
1015 | dc->hwss.set_bandwidth(dc, context, true); | 1015 | dc->hwss.optimize_bandwidth(dc, context); |
1016 | 1016 | ||
1017 | dc_release_state(dc->current_state); | 1017 | dc_release_state(dc->current_state); |
1018 | 1018 | ||
@@ -1020,8 +1020,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
1020 | 1020 | ||
1021 | dc_retain_state(dc->current_state); | 1021 | dc_retain_state(dc->current_state); |
1022 | 1022 | ||
1023 | dc->hwss.optimize_shared_resources(dc); | ||
1024 | |||
1025 | return result; | 1023 | return result; |
1026 | } | 1024 | } |
1027 | 1025 | ||
@@ -1063,7 +1061,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) | |||
1063 | 1061 | ||
1064 | dc->optimized_required = false; | 1062 | dc->optimized_required = false; |
1065 | 1063 | ||
1066 | dc->hwss.set_bandwidth(dc, context, true); | 1064 | dc->hwss.optimize_bandwidth(dc, context); |
1067 | return true; | 1065 | return true; |
1068 | } | 1066 | } |
1069 | 1067 | ||
@@ -1369,35 +1367,6 @@ static struct dc_stream_status *stream_get_status( | |||
1369 | 1367 | ||
1370 | static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; | 1368 | static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; |
1371 | 1369 | ||
1372 | static void notify_display_count_to_smu( | ||
1373 | struct dc *dc, | ||
1374 | struct dc_state *context) | ||
1375 | { | ||
1376 | int i, display_count; | ||
1377 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
1378 | |||
1379 | /* | ||
1380 | * if function pointer not set up, this message is | ||
1381 | * sent as part of pplib_apply_display_requirements. | ||
1382 | * So just return. | ||
1383 | */ | ||
1384 | if (!pp_smu || !pp_smu->set_display_count) | ||
1385 | return; | ||
1386 | |||
1387 | display_count = 0; | ||
1388 | for (i = 0; i < context->stream_count; i++) { | ||
1389 | const struct dc_stream_state *stream = context->streams[i]; | ||
1390 | |||
1391 | /* only notify active stream */ | ||
1392 | if (stream->dpms_off) | ||
1393 | continue; | ||
1394 | |||
1395 | display_count++; | ||
1396 | } | ||
1397 | |||
1398 | pp_smu->set_display_count(&pp_smu->pp_smu, display_count); | ||
1399 | } | ||
1400 | |||
1401 | static void commit_planes_do_stream_update(struct dc *dc, | 1370 | static void commit_planes_do_stream_update(struct dc *dc, |
1402 | struct dc_stream_state *stream, | 1371 | struct dc_stream_state *stream, |
1403 | struct dc_stream_update *stream_update, | 1372 | struct dc_stream_update *stream_update, |
@@ -1422,7 +1391,6 @@ static void commit_planes_do_stream_update(struct dc *dc, | |||
1422 | stream_update->adjust->v_total_max); | 1391 | stream_update->adjust->v_total_max); |
1423 | 1392 | ||
1424 | if (stream_update->periodic_fn_vsync_delta && | 1393 | if (stream_update->periodic_fn_vsync_delta && |
1425 | pipe_ctx->stream_res.tg && | ||
1426 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) | 1394 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) |
1427 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( | 1395 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( |
1428 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, | 1396 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, |
@@ -1448,19 +1416,13 @@ static void commit_planes_do_stream_update(struct dc *dc, | |||
1448 | if (stream_update->dpms_off) { | 1416 | if (stream_update->dpms_off) { |
1449 | if (*stream_update->dpms_off) { | 1417 | if (*stream_update->dpms_off) { |
1450 | core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE); | 1418 | core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE); |
1451 | dc->hwss.pplib_apply_display_requirements( | 1419 | dc->hwss.optimize_bandwidth(dc, dc->current_state); |
1452 | dc, dc->current_state); | ||
1453 | notify_display_count_to_smu(dc, dc->current_state); | ||
1454 | } else { | 1420 | } else { |
1455 | dc->hwss.pplib_apply_display_requirements( | 1421 | dc->hwss.prepare_bandwidth(dc, dc->current_state); |
1456 | dc, dc->current_state); | ||
1457 | notify_display_count_to_smu(dc, dc->current_state); | ||
1458 | core_link_enable_stream(dc->current_state, pipe_ctx); | 1422 | core_link_enable_stream(dc->current_state, pipe_ctx); |
1459 | } | 1423 | } |
1460 | } | 1424 | } |
1461 | 1425 | ||
1462 | |||
1463 | |||
1464 | if (stream_update->abm_level && pipe_ctx->stream_res.abm) { | 1426 | if (stream_update->abm_level && pipe_ctx->stream_res.abm) { |
1465 | if (pipe_ctx->stream_res.tg->funcs->is_blanked) { | 1427 | if (pipe_ctx->stream_res.tg->funcs->is_blanked) { |
1466 | // if otg funcs defined check if blanked before programming | 1428 | // if otg funcs defined check if blanked before programming |
@@ -1487,7 +1449,7 @@ static void commit_planes_for_stream(struct dc *dc, | |||
1487 | struct pipe_ctx *top_pipe_to_program = NULL; | 1449 | struct pipe_ctx *top_pipe_to_program = NULL; |
1488 | 1450 | ||
1489 | if (update_type == UPDATE_TYPE_FULL) { | 1451 | if (update_type == UPDATE_TYPE_FULL) { |
1490 | dc->hwss.set_bandwidth(dc, context, false); | 1452 | dc->hwss.prepare_bandwidth(dc, context); |
1491 | context_clock_trace(dc, context); | 1453 | context_clock_trace(dc, context); |
1492 | } | 1454 | } |
1493 | 1455 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index e1ebdf7b5eaf..73d049506618 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c | |||
@@ -311,7 +311,7 @@ void context_timing_trace( | |||
311 | { | 311 | { |
312 | int i; | 312 | int i; |
313 | struct dc *core_dc = dc; | 313 | struct dc *core_dc = dc; |
314 | int h_pos[MAX_PIPES], v_pos[MAX_PIPES]; | 314 | int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; |
315 | struct crtc_position position; | 315 | struct crtc_position position; |
316 | unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; | 316 | unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; |
317 | DC_LOGGER_INIT(dc->ctx->logger); | 317 | DC_LOGGER_INIT(dc->ctx->logger); |
@@ -322,8 +322,7 @@ void context_timing_trace( | |||
322 | /* get_position() returns CRTC vertical/horizontal counter | 322 | /* get_position() returns CRTC vertical/horizontal counter |
323 | * hence not applicable for underlay pipe | 323 | * hence not applicable for underlay pipe |
324 | */ | 324 | */ |
325 | if (pipe_ctx->stream == NULL | 325 | if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) |
326 | || pipe_ctx->pipe_idx == underlay_idx) | ||
327 | continue; | 326 | continue; |
328 | 327 | ||
329 | pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); | 328 | pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); |
@@ -333,7 +332,7 @@ void context_timing_trace( | |||
333 | for (i = 0; i < core_dc->res_pool->pipe_count; i++) { | 332 | for (i = 0; i < core_dc->res_pool->pipe_count; i++) { |
334 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | 333 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; |
335 | 334 | ||
336 | if (pipe_ctx->stream == NULL) | 335 | if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) |
337 | continue; | 336 | continue; |
338 | 337 | ||
339 | TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", | 338 | TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fb04a4ad141f..7ee9c033acbd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -1357,28 +1357,13 @@ static enum dc_status enable_link_dp( | |||
1357 | struct dc_link *link = stream->sink->link; | 1357 | struct dc_link *link = stream->sink->link; |
1358 | struct dc_link_settings link_settings = {0}; | 1358 | struct dc_link_settings link_settings = {0}; |
1359 | enum dp_panel_mode panel_mode; | 1359 | enum dp_panel_mode panel_mode; |
1360 | enum dc_link_rate max_link_rate = LINK_RATE_HIGH2; | ||
1361 | 1360 | ||
1362 | /* get link settings for video mode timing */ | 1361 | /* get link settings for video mode timing */ |
1363 | decide_link_settings(stream, &link_settings); | 1362 | decide_link_settings(stream, &link_settings); |
1364 | 1363 | ||
1365 | /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS | 1364 | pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = |
1366 | * logic for HBR3 still needs Nominal (0.8V) on VDDC rail | 1365 | link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; |
1367 | */ | 1366 | state->dccg->funcs->update_clocks(state->dccg, state, false); |
1368 | if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) | ||
1369 | max_link_rate = LINK_RATE_HIGH3; | ||
1370 | |||
1371 | if (link_settings.link_rate == max_link_rate) { | ||
1372 | struct dc_clocks clocks = state->bw.dcn.clk; | ||
1373 | |||
1374 | /* dce/dcn compat, do not update dispclk */ | ||
1375 | clocks.dispclk_khz = 0; | ||
1376 | /* 27mhz = 27000000hz= 27000khz */ | ||
1377 | clocks.phyclk_khz = link_settings.link_rate * 27000; | ||
1378 | |||
1379 | state->dis_clk->funcs->update_clocks( | ||
1380 | state->dis_clk, &clocks, false); | ||
1381 | } | ||
1382 | 1367 | ||
1383 | dp_enable_link_phy( | 1368 | dp_enable_link_phy( |
1384 | link, | 1369 | link, |
@@ -1722,7 +1707,7 @@ static void write_i2c_retimer_setting( | |||
1722 | i2c_success = i2c_write(pipe_ctx, slave_address, | 1707 | i2c_success = i2c_write(pipe_ctx, slave_address, |
1723 | buffer, sizeof(buffer)); | 1708 | buffer, sizeof(buffer)); |
1724 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ | 1709 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ |
1725 | offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", | 1710 | offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
1726 | slave_address, buffer[0], buffer[1], i2c_success?1:0); | 1711 | slave_address, buffer[0], buffer[1], i2c_success?1:0); |
1727 | if (!i2c_success) | 1712 | if (!i2c_success) |
1728 | /* Write failure */ | 1713 | /* Write failure */ |
@@ -1734,7 +1719,7 @@ static void write_i2c_retimer_setting( | |||
1734 | i2c_success = i2c_write(pipe_ctx, slave_address, | 1719 | i2c_success = i2c_write(pipe_ctx, slave_address, |
1735 | buffer, sizeof(buffer)); | 1720 | buffer, sizeof(buffer)); |
1736 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ | 1721 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ |
1737 | offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", | 1722 | offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
1738 | slave_address, buffer[0], buffer[1], i2c_success?1:0); | 1723 | slave_address, buffer[0], buffer[1], i2c_success?1:0); |
1739 | if (!i2c_success) | 1724 | if (!i2c_success) |
1740 | /* Write failure */ | 1725 | /* Write failure */ |
@@ -2156,14 +2141,16 @@ int dc_link_get_backlight_level(const struct dc_link *link) | |||
2156 | { | 2141 | { |
2157 | struct abm *abm = link->ctx->dc->res_pool->abm; | 2142 | struct abm *abm = link->ctx->dc->res_pool->abm; |
2158 | 2143 | ||
2159 | if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL) | 2144 | if (abm == NULL || abm->funcs->get_current_backlight == NULL) |
2160 | return DC_ERROR_UNEXPECTED; | 2145 | return DC_ERROR_UNEXPECTED; |
2161 | 2146 | ||
2162 | return (int) abm->funcs->get_current_backlight_8_bit(abm); | 2147 | return (int) abm->funcs->get_current_backlight(abm); |
2163 | } | 2148 | } |
2164 | 2149 | ||
2165 | bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, | 2150 | bool dc_link_set_backlight_level(const struct dc_link *link, |
2166 | uint32_t frame_ramp, const struct dc_stream_state *stream) | 2151 | uint32_t backlight_pwm_u16_16, |
2152 | uint32_t frame_ramp, | ||
2153 | const struct dc_stream_state *stream) | ||
2167 | { | 2154 | { |
2168 | struct dc *core_dc = link->ctx->dc; | 2155 | struct dc *core_dc = link->ctx->dc; |
2169 | struct abm *abm = core_dc->res_pool->abm; | 2156 | struct abm *abm = core_dc->res_pool->abm; |
@@ -2175,19 +2162,17 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, | |||
2175 | 2162 | ||
2176 | if ((dmcu == NULL) || | 2163 | if ((dmcu == NULL) || |
2177 | (abm == NULL) || | 2164 | (abm == NULL) || |
2178 | (abm->funcs->set_backlight_level == NULL)) | 2165 | (abm->funcs->set_backlight_level_pwm == NULL)) |
2179 | return false; | 2166 | return false; |
2180 | 2167 | ||
2181 | if (stream) { | 2168 | if (stream) |
2182 | if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL) | 2169 | ((struct dc_stream_state *)stream)->bl_pwm_level = |
2183 | frame_ramp = 0; | 2170 | backlight_pwm_u16_16; |
2184 | |||
2185 | ((struct dc_stream_state *)stream)->bl_pwm_level = level; | ||
2186 | } | ||
2187 | 2171 | ||
2188 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); | 2172 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); |
2189 | 2173 | ||
2190 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level); | 2174 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
2175 | backlight_pwm_u16_16, backlight_pwm_u16_16); | ||
2191 | 2176 | ||
2192 | if (dc_is_embedded_signal(link->connector_signal)) { | 2177 | if (dc_is_embedded_signal(link->connector_signal)) { |
2193 | if (stream != NULL) { | 2178 | if (stream != NULL) { |
@@ -2204,9 +2189,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, | |||
2204 | 1; | 2189 | 1; |
2205 | } | 2190 | } |
2206 | } | 2191 | } |
2207 | abm->funcs->set_backlight_level( | 2192 | abm->funcs->set_backlight_level_pwm( |
2208 | abm, | 2193 | abm, |
2209 | level, | 2194 | backlight_pwm_u16_16, |
2210 | frame_ramp, | 2195 | frame_ramp, |
2211 | controller_id, | 2196 | controller_id, |
2212 | use_smooth_brightness); | 2197 | use_smooth_brightness); |
@@ -2220,7 +2205,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link) | |||
2220 | struct dc *core_dc = link->ctx->dc; | 2205 | struct dc *core_dc = link->ctx->dc; |
2221 | struct abm *abm = core_dc->res_pool->abm; | 2206 | struct abm *abm = core_dc->res_pool->abm; |
2222 | 2207 | ||
2223 | if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL)) | 2208 | if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL)) |
2224 | return false; | 2209 | return false; |
2225 | 2210 | ||
2226 | abm->funcs->set_abm_immediate_disable(abm); | 2211 | abm->funcs->set_abm_immediate_disable(abm); |
@@ -2609,6 +2594,10 @@ void core_link_enable_stream( | |||
2609 | core_dc->hwss.unblank_stream(pipe_ctx, | 2594 | core_dc->hwss.unblank_stream(pipe_ctx, |
2610 | &pipe_ctx->stream->sink->link->cur_link_settings); | 2595 | &pipe_ctx->stream->sink->link->cur_link_settings); |
2611 | 2596 | ||
2597 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, | ||
2598 | pipe_ctx->stream->bl_pwm_level, | ||
2599 | 0, | ||
2600 | pipe_ctx->stream); | ||
2612 | } | 2601 | } |
2613 | 2602 | ||
2614 | } | 2603 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b6fe29b9fb65..fc65b0055167 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
@@ -499,8 +499,13 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
499 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; | 499 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; |
500 | bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; | 500 | bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; |
501 | 501 | ||
502 | |||
502 | /* | 503 | /* |
503 | * Need to calculate the scan direction for viewport to properly determine offset | 504 | * We need take horizontal mirror into account. On an unrotated surface this means |
505 | * that the viewport offset is actually the offset from the other side of source | ||
506 | * image so we have to subtract the right edge of the viewport from the right edge of | ||
507 | * the source window. Similar to mirror we need to take into account how offset is | ||
508 | * affected for 270/180 rotations | ||
504 | */ | 509 | */ |
505 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { | 510 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { |
506 | flip_vert_scan_dir = true; | 511 | flip_vert_scan_dir = true; |
@@ -510,6 +515,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
510 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | 515 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) |
511 | flip_horz_scan_dir = true; | 516 | flip_horz_scan_dir = true; |
512 | 517 | ||
518 | if (pipe_ctx->plane_state->horizontal_mirror) | ||
519 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
520 | |||
513 | if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || | 521 | if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || |
514 | stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { | 522 | stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { |
515 | pri_split = false; | 523 | pri_split = false; |
@@ -540,45 +548,27 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
540 | plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; | 548 | plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; |
541 | 549 | ||
542 | /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio | 550 | /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio |
551 | * note: surf_src.ofs should be added after rotation/mirror offset direction | ||
552 | * adjustment since it is already in viewport space | ||
543 | * num_pixels = clip.num_pix * scl_ratio | 553 | * num_pixels = clip.num_pix * scl_ratio |
544 | */ | 554 | */ |
545 | data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) * | 555 | data->viewport.x = (clip.x - plane_state->dst_rect.x) * |
546 | surf_src.width / plane_state->dst_rect.width; | 556 | surf_src.width / plane_state->dst_rect.width; |
547 | data->viewport.width = clip.width * | 557 | data->viewport.width = clip.width * |
548 | surf_src.width / plane_state->dst_rect.width; | 558 | surf_src.width / plane_state->dst_rect.width; |
549 | 559 | ||
550 | data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) * | 560 | data->viewport.y = (clip.y - plane_state->dst_rect.y) * |
551 | surf_src.height / plane_state->dst_rect.height; | 561 | surf_src.height / plane_state->dst_rect.height; |
552 | data->viewport.height = clip.height * | 562 | data->viewport.height = clip.height * |
553 | surf_src.height / plane_state->dst_rect.height; | 563 | surf_src.height / plane_state->dst_rect.height; |
554 | 564 | ||
555 | /* To transfer the x, y to correct coordinate on mirror image (camera). | 565 | if (flip_vert_scan_dir) |
556 | * deg 0 : transfer x, | 566 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; |
557 | * deg 90 : don't need to transfer, | 567 | if (flip_horz_scan_dir) |
558 | * deg180 : transfer y, | 568 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; |
559 | * deg270 : transfer x and y. | 569 | |
560 | * To transfer the x, y to correct coordinate on non-mirror image (video). | 570 | data->viewport.x += surf_src.x; |
561 | * deg 0 : don't need to transfer, | 571 | data->viewport.y += surf_src.y; |
562 | * deg 90 : transfer y, | ||
563 | * deg180 : transfer x and y, | ||
564 | * deg270 : transfer x. | ||
565 | */ | ||
566 | if (pipe_ctx->plane_state->horizontal_mirror) { | ||
567 | if (flip_horz_scan_dir && !flip_vert_scan_dir) { | ||
568 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; | ||
569 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; | ||
570 | } else if (flip_horz_scan_dir && flip_vert_scan_dir) | ||
571 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; | ||
572 | else { | ||
573 | if (!flip_horz_scan_dir && !flip_vert_scan_dir) | ||
574 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; | ||
575 | } | ||
576 | } else { | ||
577 | if (flip_horz_scan_dir) | ||
578 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; | ||
579 | if (flip_vert_scan_dir) | ||
580 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; | ||
581 | } | ||
582 | 572 | ||
583 | /* Round down, compensate in init */ | 573 | /* Round down, compensate in init */ |
584 | data->viewport_c.x = data->viewport.x / vpc_div; | 574 | data->viewport_c.x = data->viewport.x / vpc_div; |
@@ -773,22 +763,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r | |||
773 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | 763 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) |
774 | flip_horz_scan_dir = true; | 764 | flip_horz_scan_dir = true; |
775 | 765 | ||
766 | if (pipe_ctx->plane_state->horizontal_mirror) | ||
767 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
768 | |||
776 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || | 769 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || |
777 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { | 770 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { |
778 | rect_swap_helper(&src); | 771 | rect_swap_helper(&src); |
779 | rect_swap_helper(&data->viewport_c); | 772 | rect_swap_helper(&data->viewport_c); |
780 | rect_swap_helper(&data->viewport); | 773 | rect_swap_helper(&data->viewport); |
781 | 774 | } | |
782 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 && | ||
783 | pipe_ctx->plane_state->horizontal_mirror) { | ||
784 | flip_vert_scan_dir = true; | ||
785 | } | ||
786 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 && | ||
787 | pipe_ctx->plane_state->horizontal_mirror) { | ||
788 | flip_vert_scan_dir = false; | ||
789 | } | ||
790 | } else if (pipe_ctx->plane_state->horizontal_mirror) | ||
791 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
792 | 775 | ||
793 | /* | 776 | /* |
794 | * Init calculated according to formula: | 777 | * Init calculated according to formula: |
@@ -1115,9 +1098,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
1115 | pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( | 1098 | pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( |
1116 | pipe_ctx->plane_state->format); | 1099 | pipe_ctx->plane_state->format); |
1117 | 1100 | ||
1118 | if (pipe_ctx->stream->timing.flags.INTERLACE) | ||
1119 | pipe_ctx->stream->dst.height *= 2; | ||
1120 | |||
1121 | calculate_scaling_ratios(pipe_ctx); | 1101 | calculate_scaling_ratios(pipe_ctx); |
1122 | 1102 | ||
1123 | calculate_viewport(pipe_ctx); | 1103 | calculate_viewport(pipe_ctx); |
@@ -1138,9 +1118,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
1138 | 1118 | ||
1139 | pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; | 1119 | pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; |
1140 | pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; | 1120 | pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; |
1141 | if (pipe_ctx->stream->timing.flags.INTERLACE) | ||
1142 | pipe_ctx->plane_res.scl_data.v_active *= 2; | ||
1143 | |||
1144 | 1121 | ||
1145 | /* Taps calculations */ | 1122 | /* Taps calculations */ |
1146 | if (pipe_ctx->plane_res.xfm != NULL) | 1123 | if (pipe_ctx->plane_res.xfm != NULL) |
@@ -1185,9 +1162,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
1185 | plane_state->dst_rect.x, | 1162 | plane_state->dst_rect.x, |
1186 | plane_state->dst_rect.y); | 1163 | plane_state->dst_rect.y); |
1187 | 1164 | ||
1188 | if (pipe_ctx->stream->timing.flags.INTERLACE) | ||
1189 | pipe_ctx->stream->dst.height /= 2; | ||
1190 | |||
1191 | return res; | 1165 | return res; |
1192 | } | 1166 | } |
1193 | 1167 | ||
@@ -2071,7 +2045,7 @@ void dc_resource_state_construct( | |||
2071 | const struct dc *dc, | 2045 | const struct dc *dc, |
2072 | struct dc_state *dst_ctx) | 2046 | struct dc_state *dst_ctx) |
2073 | { | 2047 | { |
2074 | dst_ctx->dis_clk = dc->res_pool->dccg; | 2048 | dst_ctx->dccg = dc->res_pool->clk_mgr; |
2075 | } | 2049 | } |
2076 | 2050 | ||
2077 | enum dc_status dc_validate_global_state( | 2051 | enum dc_status dc_validate_global_state( |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 2ac848a106ba..e113439aaa86 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -106,6 +106,7 @@ static void construct(struct dc_stream_state *stream, | |||
106 | 106 | ||
107 | stream->out_transfer_func = dc_create_transfer_func(); | 107 | stream->out_transfer_func = dc_create_transfer_func(); |
108 | stream->out_transfer_func->type = TF_TYPE_BYPASS; | 108 | stream->out_transfer_func->type = TF_TYPE_BYPASS; |
109 | stream->out_transfer_func->ctx = stream->ctx; | ||
109 | } | 110 | } |
110 | 111 | ||
111 | static void destruct(struct dc_stream_state *stream) | 112 | static void destruct(struct dc_stream_state *stream) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 8fb3aefd195c..c60c9b4c3075 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c | |||
@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state | |||
44 | 44 | ||
45 | plane_state->in_transfer_func = dc_create_transfer_func(); | 45 | plane_state->in_transfer_func = dc_create_transfer_func(); |
46 | plane_state->in_transfer_func->type = TF_TYPE_BYPASS; | 46 | plane_state->in_transfer_func->type = TF_TYPE_BYPASS; |
47 | plane_state->in_transfer_func->ctx = ctx; | ||
47 | } | 48 | } |
48 | 49 | ||
49 | static void destruct(struct dc_plane_state *plane_state) | 50 | static void destruct(struct dc_plane_state *plane_state) |
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 199527171100..d16a20c84792 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
@@ -38,7 +38,7 @@ | |||
38 | #include "inc/compressor.h" | 38 | #include "inc/compressor.h" |
39 | #include "dml/display_mode_lib.h" | 39 | #include "dml/display_mode_lib.h" |
40 | 40 | ||
41 | #define DC_VER "3.1.68" | 41 | #define DC_VER "3.2.04" |
42 | 42 | ||
43 | #define MAX_SURFACES 3 | 43 | #define MAX_SURFACES 3 |
44 | #define MAX_STREAMS 6 | 44 | #define MAX_STREAMS 6 |
@@ -169,6 +169,7 @@ struct link_training_settings; | |||
169 | struct dc_config { | 169 | struct dc_config { |
170 | bool gpu_vm_support; | 170 | bool gpu_vm_support; |
171 | bool disable_disp_pll_sharing; | 171 | bool disable_disp_pll_sharing; |
172 | bool fbc_support; | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | enum visual_confirm { | 175 | enum visual_confirm { |
@@ -249,8 +250,6 @@ struct dc_debug_options { | |||
249 | bool disable_dmcu; | 250 | bool disable_dmcu; |
250 | bool disable_psr; | 251 | bool disable_psr; |
251 | bool force_abm_enable; | 252 | bool force_abm_enable; |
252 | bool disable_hbup_pg; | ||
253 | bool disable_dpp_pg; | ||
254 | bool disable_stereo_support; | 253 | bool disable_stereo_support; |
255 | bool vsr_support; | 254 | bool vsr_support; |
256 | bool performance_trace; | 255 | bool performance_trace; |
@@ -304,11 +303,6 @@ struct dc { | |||
304 | struct hw_sequencer_funcs hwss; | 303 | struct hw_sequencer_funcs hwss; |
305 | struct dce_hwseq *hwseq; | 304 | struct dce_hwseq *hwseq; |
306 | 305 | ||
307 | /* temp store of dm_pp_display_configuration | ||
308 | * to compare to see if display config changed | ||
309 | */ | ||
310 | struct dm_pp_display_configuration prev_display_config; | ||
311 | |||
312 | bool optimized_required; | 306 | bool optimized_required; |
313 | 307 | ||
314 | /* FBC compressor */ | 308 | /* FBC compressor */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 8130b95ccc53..a8b3cedf9431 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h | |||
@@ -86,6 +86,10 @@ struct dc_vbios_funcs { | |||
86 | 86 | ||
87 | bool (*is_accelerated_mode)( | 87 | bool (*is_accelerated_mode)( |
88 | struct dc_bios *bios); | 88 | struct dc_bios *bios); |
89 | bool (*is_active_display)( | ||
90 | struct dc_bios *bios, | ||
91 | enum signal_type signal, | ||
92 | const struct connector_device_tag_info *device_tag); | ||
89 | void (*set_scratch_critical_state)( | 93 | void (*set_scratch_critical_state)( |
90 | struct dc_bios *bios, | 94 | struct dc_bios *bios, |
91 | bool state); | 95 | bool state); |
@@ -141,6 +145,7 @@ struct dc_vbios_funcs { | |||
141 | }; | 145 | }; |
142 | 146 | ||
143 | struct bios_registers { | 147 | struct bios_registers { |
148 | uint32_t BIOS_SCRATCH_0; | ||
144 | uint32_t BIOS_SCRATCH_3; | 149 | uint32_t BIOS_SCRATCH_3; |
145 | uint32_t BIOS_SCRATCH_6; | 150 | uint32_t BIOS_SCRATCH_6; |
146 | }; | 151 | }; |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 3bfdccceb524..8738f27a8708 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
@@ -138,9 +138,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ | |||
138 | return dc->links[link_index]; | 138 | return dc->links[link_index]; |
139 | } | 139 | } |
140 | 140 | ||
141 | /* Set backlight level of an embedded panel (eDP, LVDS). */ | 141 | /* Set backlight level of an embedded panel (eDP, LVDS). |
142 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level, | 142 | * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer |
143 | uint32_t frame_ramp, const struct dc_stream_state *stream); | 143 | * and 16 bit fractional, where 1.0 is max backlight value. |
144 | */ | ||
145 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, | ||
146 | uint32_t backlight_pwm_u16_16, | ||
147 | uint32_t frame_ramp, | ||
148 | const struct dc_stream_state *stream); | ||
144 | 149 | ||
145 | int dc_link_get_backlight_level(const struct dc_link *dc_link); | 150 | int dc_link_get_backlight_level(const struct dc_link *dc_link); |
146 | 151 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 8f7f0e8b341f..6d7b64a743ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ | 29 | DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ |
30 | dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ | 30 | dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ |
31 | dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ | 31 | dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ |
32 | dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o | 32 | dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o |
33 | 33 | ||
34 | AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) | 34 | AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 29294db1a96b..2a342eae80fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #define MCP_DISABLE_ABM_IMMEDIATELY 255 | 54 | #define MCP_DISABLE_ABM_IMMEDIATELY 255 |
55 | 55 | ||
56 | 56 | ||
57 | static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce) | 57 | static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce) |
58 | { | 58 | { |
59 | uint64_t current_backlight; | 59 | uint64_t current_backlight; |
60 | uint32_t round_result; | 60 | uint32_t round_result; |
@@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce) | |||
103 | return (uint32_t)(current_backlight); | 103 | return (uint32_t)(current_backlight); |
104 | } | 104 | } |
105 | 105 | ||
106 | static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | 106 | static void driver_set_backlight_level(struct dce_abm *abm_dce, |
107 | uint32_t backlight_pwm_u16_16) | ||
107 | { | 108 | { |
108 | uint32_t backlight_24bit; | ||
109 | uint32_t backlight_17bit; | ||
110 | uint32_t backlight_16bit; | 109 | uint32_t backlight_16bit; |
111 | uint32_t masked_pwm_period; | 110 | uint32_t masked_pwm_period; |
112 | uint8_t rounding_bit; | ||
113 | uint8_t bit_count; | 111 | uint8_t bit_count; |
114 | uint64_t active_duty_cycle; | 112 | uint64_t active_duty_cycle; |
115 | uint32_t pwm_period_bitcnt; | 113 | uint32_t pwm_period_bitcnt; |
116 | 114 | ||
117 | /* | 115 | /* |
118 | * 1. Convert 8-bit value to 17 bit U1.16 format | 116 | * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight |
119 | * (1 integer, 16 fractional bits) | ||
120 | */ | ||
121 | |||
122 | /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value, | ||
123 | * effectively multiplying value by 256/255 | ||
124 | * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF | ||
125 | */ | ||
126 | backlight_24bit = level * 0x10101; | ||
127 | |||
128 | /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8 | ||
129 | * used for rounding, take most significant bit of fraction for | ||
130 | * rounding, e.g. for 0xEFEFEF, rounding bit is 1 | ||
131 | */ | ||
132 | rounding_bit = (backlight_24bit >> 7) & 1; | ||
133 | |||
134 | /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit | ||
135 | * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1 | ||
136 | */ | ||
137 | backlight_17bit = (backlight_24bit >> 8) + rounding_bit; | ||
138 | |||
139 | /* | ||
140 | * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight | ||
141 | * active duty cycle <= backlight period | 117 | * active duty cycle <= backlight period |
142 | */ | 118 | */ |
143 | 119 | ||
144 | /* 2.1 Apply bitmask for backlight period value based on value of BITCNT | 120 | /* 1.1 Apply bitmask for backlight period value based on value of BITCNT |
145 | */ | 121 | */ |
146 | REG_GET_2(BL_PWM_PERIOD_CNTL, | 122 | REG_GET_2(BL_PWM_PERIOD_CNTL, |
147 | BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt, | 123 | BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt, |
@@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | |||
155 | /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */ | 131 | /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */ |
156 | masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1); | 132 | masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1); |
157 | 133 | ||
158 | /* 2.2 Calculate integer active duty cycle required upper 16 bits | 134 | /* 1.2 Calculate integer active duty cycle required upper 16 bits |
159 | * contain integer component, lower 16 bits contain fractional component | 135 | * contain integer component, lower 16 bits contain fractional component |
160 | * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24 | 136 | * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24 |
161 | */ | 137 | */ |
162 | active_duty_cycle = backlight_17bit * masked_pwm_period; | 138 | active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period; |
163 | 139 | ||
164 | /* 2.3 Calculate 16 bit active duty cycle from integer and fractional | 140 | /* 1.3 Calculate 16 bit active duty cycle from integer and fractional |
165 | * components shift by bitCount then mask 16 bits and add rounding bit | 141 | * components shift by bitCount then mask 16 bits and add rounding bit |
166 | * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0 | 142 | * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0 |
167 | */ | 143 | */ |
@@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | |||
170 | backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1; | 146 | backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1; |
171 | 147 | ||
172 | /* | 148 | /* |
173 | * 3. Program register with updated value | 149 | * 2. Program register with updated value |
174 | */ | 150 | */ |
175 | 151 | ||
176 | /* 3.1 Lock group 2 backlight registers */ | 152 | /* 2.1 Lock group 2 backlight registers */ |
177 | 153 | ||
178 | REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK, | 154 | REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK, |
179 | BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1, | 155 | BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1, |
180 | BL_PWM_GRP1_REG_LOCK, 1); | 156 | BL_PWM_GRP1_REG_LOCK, 1); |
181 | 157 | ||
182 | // 3.2 Write new active duty cycle | 158 | // 2.2 Write new active duty cycle |
183 | REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit); | 159 | REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit); |
184 | 160 | ||
185 | /* 3.3 Unlock group 2 backlight registers */ | 161 | /* 2.3 Unlock group 2 backlight registers */ |
186 | REG_UPDATE(BL_PWM_GRP1_REG_LOCK, | 162 | REG_UPDATE(BL_PWM_GRP1_REG_LOCK, |
187 | BL_PWM_GRP1_REG_LOCK, 0); | 163 | BL_PWM_GRP1_REG_LOCK, 0); |
188 | 164 | ||
189 | /* 5.4.4 Wait for pending bit to be cleared */ | 165 | /* 3 Wait for pending bit to be cleared */ |
190 | REG_WAIT(BL_PWM_GRP1_REG_LOCK, | 166 | REG_WAIT(BL_PWM_GRP1_REG_LOCK, |
191 | BL_PWM_GRP1_REG_UPDATE_PENDING, 0, | 167 | BL_PWM_GRP1_REG_UPDATE_PENDING, 0, |
192 | 1, 10000); | 168 | 1, 10000); |
@@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | |||
194 | 170 | ||
195 | static void dmcu_set_backlight_level( | 171 | static void dmcu_set_backlight_level( |
196 | struct dce_abm *abm_dce, | 172 | struct dce_abm *abm_dce, |
197 | uint32_t level, | 173 | uint32_t backlight_pwm_u16_16, |
198 | uint32_t frame_ramp, | 174 | uint32_t frame_ramp, |
199 | uint32_t controller_id) | 175 | uint32_t controller_id) |
200 | { | 176 | { |
201 | unsigned int backlight_16_bit = (level * 0x10101) >> 8; | 177 | unsigned int backlight_8_bit = 0; |
202 | unsigned int backlight_17_bit = backlight_16_bit + | ||
203 | (((backlight_16_bit & 0x80) >> 7) & 1); | ||
204 | uint32_t rampingBoundary = 0xFFFF; | 178 | uint32_t rampingBoundary = 0xFFFF; |
205 | uint32_t s2; | 179 | uint32_t s2; |
206 | 180 | ||
181 | if (backlight_pwm_u16_16 & 0x10000) | ||
182 | // Check for max backlight condition | ||
183 | backlight_8_bit = 0xFF; | ||
184 | else | ||
185 | // Take MSB of fractional part since backlight is not max | ||
186 | backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF; | ||
187 | |||
207 | /* set ramping boundary */ | 188 | /* set ramping boundary */ |
208 | REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); | 189 | REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); |
209 | 190 | ||
@@ -220,7 +201,7 @@ static void dmcu_set_backlight_level( | |||
220 | 0, 1, 80000); | 201 | 0, 1, 80000); |
221 | 202 | ||
222 | /* setDMCUParam_BL */ | 203 | /* setDMCUParam_BL */ |
223 | REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit); | 204 | REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16); |
224 | 205 | ||
225 | /* write ramp */ | 206 | /* write ramp */ |
226 | if (controller_id == 0) | 207 | if (controller_id == 0) |
@@ -237,9 +218,9 @@ static void dmcu_set_backlight_level( | |||
237 | s2 = REG_READ(BIOS_SCRATCH_2); | 218 | s2 = REG_READ(BIOS_SCRATCH_2); |
238 | 219 | ||
239 | s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; | 220 | s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; |
240 | level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> | 221 | backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> |
241 | ATOM_S2_CURRENT_BL_LEVEL_SHIFT); | 222 | ATOM_S2_CURRENT_BL_LEVEL_SHIFT); |
242 | s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); | 223 | s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); |
243 | 224 | ||
244 | REG_WRITE(BIOS_SCRATCH_2, s2); | 225 | REG_WRITE(BIOS_SCRATCH_2, s2); |
245 | } | 226 | } |
@@ -247,7 +228,7 @@ static void dmcu_set_backlight_level( | |||
247 | static void dce_abm_init(struct abm *abm) | 228 | static void dce_abm_init(struct abm *abm) |
248 | { | 229 | { |
249 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | 230 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); |
250 | unsigned int backlight = get_current_backlight_16_bit(abm_dce); | 231 | unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce); |
251 | 232 | ||
252 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); | 233 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); |
253 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); | 234 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); |
@@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm) | |||
284 | ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); | 265 | ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); |
285 | } | 266 | } |
286 | 267 | ||
287 | static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm) | 268 | static unsigned int dce_abm_get_current_backlight(struct abm *abm) |
288 | { | 269 | { |
289 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | 270 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); |
290 | unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); | 271 | unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); |
291 | 272 | ||
292 | return (backlight >> 8); | 273 | /* return backlight in hardware format which is unsigned 17 bits, with |
274 | * 1 bit integer and 16 bit fractional | ||
275 | */ | ||
276 | return backlight; | ||
277 | } | ||
278 | |||
279 | static unsigned int dce_abm_get_target_backlight(struct abm *abm) | ||
280 | { | ||
281 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | ||
282 | unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); | ||
283 | |||
284 | /* return backlight in hardware format which is unsigned 17 bits, with | ||
285 | * 1 bit integer and 16 bit fractional | ||
286 | */ | ||
287 | return backlight; | ||
293 | } | 288 | } |
294 | 289 | ||
295 | static bool dce_abm_set_level(struct abm *abm, uint32_t level) | 290 | static bool dce_abm_set_level(struct abm *abm, uint32_t level) |
@@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm) | |||
396 | return true; | 391 | return true; |
397 | } | 392 | } |
398 | 393 | ||
399 | static bool dce_abm_set_backlight_level( | 394 | static bool dce_abm_set_backlight_level_pwm( |
400 | struct abm *abm, | 395 | struct abm *abm, |
401 | unsigned int backlight_level, | 396 | unsigned int backlight_pwm_u16_16, |
402 | unsigned int frame_ramp, | 397 | unsigned int frame_ramp, |
403 | unsigned int controller_id, | 398 | unsigned int controller_id, |
404 | bool use_smooth_brightness) | 399 | bool use_smooth_brightness) |
@@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level( | |||
406 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | 401 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); |
407 | 402 | ||
408 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", | 403 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
409 | backlight_level, backlight_level); | 404 | backlight_pwm_u16_16, backlight_pwm_u16_16); |
410 | 405 | ||
411 | /* If DMCU is in reset state, DMCU is uninitialized */ | 406 | /* If DMCU is in reset state, DMCU is uninitialized */ |
412 | if (use_smooth_brightness) | 407 | if (use_smooth_brightness) |
413 | dmcu_set_backlight_level(abm_dce, | 408 | dmcu_set_backlight_level(abm_dce, |
414 | backlight_level, | 409 | backlight_pwm_u16_16, |
415 | frame_ramp, | 410 | frame_ramp, |
416 | controller_id); | 411 | controller_id); |
417 | else | 412 | else |
418 | driver_set_backlight_level(abm_dce, backlight_level); | 413 | driver_set_backlight_level(abm_dce, backlight_pwm_u16_16); |
419 | 414 | ||
420 | return true; | 415 | return true; |
421 | } | 416 | } |
@@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = { | |||
424 | .abm_init = dce_abm_init, | 419 | .abm_init = dce_abm_init, |
425 | .set_abm_level = dce_abm_set_level, | 420 | .set_abm_level = dce_abm_set_level, |
426 | .init_backlight = dce_abm_init_backlight, | 421 | .init_backlight = dce_abm_init_backlight, |
427 | .set_backlight_level = dce_abm_set_backlight_level, | 422 | .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm, |
428 | .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit, | 423 | .get_current_backlight = dce_abm_get_current_backlight, |
424 | .get_target_backlight = dce_abm_get_target_backlight, | ||
429 | .set_abm_immediate_disable = dce_abm_immediate_disable | 425 | .set_abm_immediate_disable = dce_abm_immediate_disable |
430 | }; | 426 | }; |
431 | 427 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c new file mode 100644 index 000000000000..9a28a04417d1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | |||
@@ -0,0 +1,879 @@ | |||
1 | /* | ||
2 | * Copyright 2012-16 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "dce_clk_mgr.h" | ||
27 | |||
28 | #include "reg_helper.h" | ||
29 | #include "dmcu.h" | ||
30 | #include "core_types.h" | ||
31 | #include "dal_asic_id.h" | ||
32 | |||
33 | #define TO_DCE_CLK_MGR(clocks)\ | ||
34 | container_of(clocks, struct dce_clk_mgr, base) | ||
35 | |||
36 | #define REG(reg) \ | ||
37 | (clk_mgr_dce->regs->reg) | ||
38 | |||
39 | #undef FN | ||
40 | #define FN(reg_name, field_name) \ | ||
41 | clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name | ||
42 | |||
43 | #define CTX \ | ||
44 | clk_mgr_dce->base.ctx | ||
45 | #define DC_LOGGER \ | ||
46 | clk_mgr->ctx->logger | ||
47 | |||
48 | /* Max clock values for each state indexed by "enum clocks_state": */ | ||
49 | static const struct state_dependent_clocks dce80_max_clks_by_state[] = { | ||
50 | /* ClocksStateInvalid - should not be used */ | ||
51 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
52 | /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ | ||
53 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
54 | /* ClocksStateLow */ | ||
55 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, | ||
56 | /* ClocksStateNominal */ | ||
57 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, | ||
58 | /* ClocksStatePerformance */ | ||
59 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; | ||
60 | |||
61 | static const struct state_dependent_clocks dce110_max_clks_by_state[] = { | ||
62 | /*ClocksStateInvalid - should not be used*/ | ||
63 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
64 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
65 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
66 | /*ClocksStateLow*/ | ||
67 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
68 | /*ClocksStateNominal*/ | ||
69 | { .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, | ||
70 | /*ClocksStatePerformance*/ | ||
71 | { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; | ||
72 | |||
73 | static const struct state_dependent_clocks dce112_max_clks_by_state[] = { | ||
74 | /*ClocksStateInvalid - should not be used*/ | ||
75 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
76 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
77 | { .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, | ||
78 | /*ClocksStateLow*/ | ||
79 | { .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, | ||
80 | /*ClocksStateNominal*/ | ||
81 | { .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, | ||
82 | /*ClocksStatePerformance*/ | ||
83 | { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; | ||
84 | |||
85 | static const struct state_dependent_clocks dce120_max_clks_by_state[] = { | ||
86 | /*ClocksStateInvalid - should not be used*/ | ||
87 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
88 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
89 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
90 | /*ClocksStateLow*/ | ||
91 | { .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, | ||
92 | /*ClocksStateNominal*/ | ||
93 | { .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, | ||
94 | /*ClocksStatePerformance*/ | ||
95 | { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; | ||
96 | |||
97 | static int dentist_get_divider_from_did(int did) | ||
98 | { | ||
99 | if (did < DENTIST_BASE_DID_1) | ||
100 | did = DENTIST_BASE_DID_1; | ||
101 | if (did > DENTIST_MAX_DID) | ||
102 | did = DENTIST_MAX_DID; | ||
103 | |||
104 | if (did < DENTIST_BASE_DID_2) { | ||
105 | return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP | ||
106 | * (did - DENTIST_BASE_DID_1); | ||
107 | } else if (did < DENTIST_BASE_DID_3) { | ||
108 | return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP | ||
109 | * (did - DENTIST_BASE_DID_2); | ||
110 | } else if (did < DENTIST_BASE_DID_4) { | ||
111 | return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP | ||
112 | * (did - DENTIST_BASE_DID_3); | ||
113 | } else { | ||
114 | return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP | ||
115 | * (did - DENTIST_BASE_DID_4); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | /* SW will adjust DP REF Clock average value for all purposes | ||
120 | * (DP DTO / DP Audio DTO and DP GTC) | ||
121 | if clock is spread for all cases: | ||
122 | -if SS enabled on DP Ref clock and HW de-spreading enabled with SW | ||
123 | calculations for DS_INCR/DS_MODULO (this is planned to be default case) | ||
124 | -if SS enabled on DP Ref clock and HW de-spreading enabled with HW | ||
125 | calculations (not planned to be used, but average clock should still | ||
126 | be valid) | ||
127 | -if SS enabled on DP Ref clock and HW de-spreading disabled | ||
128 | (should not be case with CIK) then SW should program all rates | ||
129 | generated according to average value (case as with previous ASICs) | ||
130 | */ | ||
131 | static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz) | ||
132 | { | ||
133 | if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) { | ||
134 | struct fixed31_32 ss_percentage = dc_fixpt_div_int( | ||
135 | dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage, | ||
136 | clk_mgr_dce->dprefclk_ss_divider), 200); | ||
137 | struct fixed31_32 adj_dp_ref_clk_khz; | ||
138 | |||
139 | ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); | ||
140 | adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); | ||
141 | dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); | ||
142 | } | ||
143 | return dp_ref_clk_khz; | ||
144 | } | ||
145 | |||
146 | static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) | ||
147 | { | ||
148 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
149 | int dprefclk_wdivider; | ||
150 | int dprefclk_src_sel; | ||
151 | int dp_ref_clk_khz = 600000; | ||
152 | int target_div; | ||
153 | |||
154 | /* ASSERT DP Reference Clock source is from DFS*/ | ||
155 | REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); | ||
156 | ASSERT(dprefclk_src_sel == 0); | ||
157 | |||
158 | /* Read the mmDENTIST_DISPCLK_CNTL to get the currently | ||
159 | * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ | ||
160 | REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); | ||
161 | |||
162 | /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ | ||
163 | target_div = dentist_get_divider_from_did(dprefclk_wdivider); | ||
164 | |||
165 | /* Calculate the current DFS clock, in kHz.*/ | ||
166 | dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR | ||
167 | * clk_mgr_dce->dentist_vco_freq_khz) / target_div; | ||
168 | |||
169 | return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz); | ||
170 | } | ||
171 | |||
172 | int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) | ||
173 | { | ||
174 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
175 | |||
176 | return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz); | ||
177 | } | ||
178 | |||
179 | /* unit: in_khz before mode set, get pixel clock from context. ASIC register | ||
180 | * may not be programmed yet | ||
181 | */ | ||
182 | static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) | ||
183 | { | ||
184 | uint32_t max_pix_clk = 0; | ||
185 | int i; | ||
186 | |||
187 | for (i = 0; i < MAX_PIPES; i++) { | ||
188 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
189 | |||
190 | if (pipe_ctx->stream == NULL) | ||
191 | continue; | ||
192 | |||
193 | /* do not check under lay */ | ||
194 | if (pipe_ctx->top_pipe) | ||
195 | continue; | ||
196 | |||
197 | if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) | ||
198 | max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; | ||
199 | |||
200 | /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS | ||
201 | * logic for HBR3 still needs Nominal (0.8V) on VDDC rail | ||
202 | */ | ||
203 | if (dc_is_dp_signal(pipe_ctx->stream->signal) && | ||
204 | pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk) | ||
205 | max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk; | ||
206 | } | ||
207 | |||
208 | return max_pix_clk; | ||
209 | } | ||
210 | |||
211 | static enum dm_pp_clocks_state dce_get_required_clocks_state( | ||
212 | struct clk_mgr *clk_mgr, | ||
213 | struct dc_state *context) | ||
214 | { | ||
215 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
216 | int i; | ||
217 | enum dm_pp_clocks_state low_req_clk; | ||
218 | int max_pix_clk = get_max_pixel_clock_for_all_paths(context); | ||
219 | |||
220 | /* Iterate from highest supported to lowest valid state, and update | ||
221 | * lowest RequiredState with the lowest state that satisfies | ||
222 | * all required clocks | ||
223 | */ | ||
224 | for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) | ||
225 | if (context->bw.dce.dispclk_khz > | ||
226 | clk_mgr_dce->max_clks_by_state[i].display_clk_khz | ||
227 | || max_pix_clk > | ||
228 | clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) | ||
229 | break; | ||
230 | |||
231 | low_req_clk = i + 1; | ||
232 | if (low_req_clk > clk_mgr_dce->max_clks_state) { | ||
233 | /* set max clock state for high phyclock, invalid on exceeding display clock */ | ||
234 | if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz | ||
235 | < context->bw.dce.dispclk_khz) | ||
236 | low_req_clk = DM_PP_CLOCKS_STATE_INVALID; | ||
237 | else | ||
238 | low_req_clk = clk_mgr_dce->max_clks_state; | ||
239 | } | ||
240 | |||
241 | return low_req_clk; | ||
242 | } | ||
243 | |||
244 | static int dce_set_clock( | ||
245 | struct clk_mgr *clk_mgr, | ||
246 | int requested_clk_khz) | ||
247 | { | ||
248 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
249 | struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; | ||
250 | struct dc_bios *bp = clk_mgr->ctx->dc_bios; | ||
251 | int actual_clock = requested_clk_khz; | ||
252 | struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu; | ||
253 | |||
254 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
255 | if (requested_clk_khz > 0) | ||
256 | requested_clk_khz = max(requested_clk_khz, | ||
257 | clk_mgr_dce->dentist_vco_freq_khz / 64); | ||
258 | |||
259 | /* Prepare to program display clock*/ | ||
260 | pxl_clk_params.target_pixel_clock = requested_clk_khz; | ||
261 | pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
262 | |||
263 | if (clk_mgr_dce->dfs_bypass_active) | ||
264 | pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; | ||
265 | |||
266 | bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); | ||
267 | |||
268 | if (clk_mgr_dce->dfs_bypass_active) { | ||
269 | /* Cache the fixed display clock*/ | ||
270 | clk_mgr_dce->dfs_bypass_disp_clk = | ||
271 | pxl_clk_params.dfs_bypass_display_clock; | ||
272 | actual_clock = pxl_clk_params.dfs_bypass_display_clock; | ||
273 | } | ||
274 | |||
275 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
276 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
277 | if (requested_clk_khz == 0) | ||
278 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
279 | |||
280 | dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); | ||
281 | |||
282 | return actual_clock; | ||
283 | } | ||
284 | |||
285 | int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) | ||
286 | { | ||
287 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
288 | struct bp_set_dce_clock_parameters dce_clk_params; | ||
289 | struct dc_bios *bp = clk_mgr->ctx->dc_bios; | ||
290 | struct dc *core_dc = clk_mgr->ctx->dc; | ||
291 | struct dmcu *dmcu = core_dc->res_pool->dmcu; | ||
292 | int actual_clock = requested_clk_khz; | ||
293 | /* Prepare to program display clock*/ | ||
294 | memset(&dce_clk_params, 0, sizeof(dce_clk_params)); | ||
295 | |||
296 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
297 | if (requested_clk_khz > 0) | ||
298 | requested_clk_khz = max(requested_clk_khz, | ||
299 | clk_mgr_dce->dentist_vco_freq_khz / 62); | ||
300 | |||
301 | dce_clk_params.target_clock_frequency = requested_clk_khz; | ||
302 | dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
303 | dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; | ||
304 | |||
305 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
306 | actual_clock = dce_clk_params.target_clock_frequency; | ||
307 | |||
308 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
309 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
310 | if (requested_clk_khz == 0) | ||
311 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
312 | |||
313 | /*Program DP ref Clock*/ | ||
314 | /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ | ||
315 | dce_clk_params.target_clock_frequency = 0; | ||
316 | dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; | ||
317 | if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev)) | ||
318 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = | ||
319 | (dce_clk_params.pll_id == | ||
320 | CLOCK_SOURCE_COMBO_DISPLAY_PLL0); | ||
321 | else | ||
322 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; | ||
323 | |||
324 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
325 | |||
326 | if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { | ||
327 | if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) | ||
328 | dmcu->funcs->set_psr_wait_loop(dmcu, | ||
329 | actual_clock / 1000 / 7); | ||
330 | } | ||
331 | |||
332 | clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; | ||
333 | return actual_clock; | ||
334 | } | ||
335 | |||
336 | static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce) | ||
337 | { | ||
338 | struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug; | ||
339 | struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; | ||
340 | struct integrated_info info = { { { 0 } } }; | ||
341 | struct dc_firmware_info fw_info = { { 0 } }; | ||
342 | int i; | ||
343 | |||
344 | if (bp->integrated_info) | ||
345 | info = *bp->integrated_info; | ||
346 | |||
347 | clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq; | ||
348 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) { | ||
349 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
350 | clk_mgr_dce->dentist_vco_freq_khz = | ||
351 | fw_info.smu_gpu_pll_output_freq; | ||
352 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) | ||
353 | clk_mgr_dce->dentist_vco_freq_khz = 3600000; | ||
354 | } | ||
355 | |||
356 | /*update the maximum display clock for each power state*/ | ||
357 | for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { | ||
358 | enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
359 | |||
360 | switch (i) { | ||
361 | case 0: | ||
362 | clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; | ||
363 | break; | ||
364 | |||
365 | case 1: | ||
366 | clk_state = DM_PP_CLOCKS_STATE_LOW; | ||
367 | break; | ||
368 | |||
369 | case 2: | ||
370 | clk_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
371 | break; | ||
372 | |||
373 | case 3: | ||
374 | clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; | ||
375 | break; | ||
376 | |||
377 | default: | ||
378 | clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
379 | break; | ||
380 | } | ||
381 | |||
382 | /*Do not allow bad VBIOS/SBIOS to override with invalid values, | ||
383 | * check for > 100MHz*/ | ||
384 | if (info.disp_clk_voltage[i].max_supported_clk >= 100000) | ||
385 | clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz = | ||
386 | info.disp_clk_voltage[i].max_supported_clk; | ||
387 | } | ||
388 | |||
389 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
390 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
391 | clk_mgr_dce->dfs_bypass_enabled = true; | ||
392 | } | ||
393 | |||
394 | void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) | ||
395 | { | ||
396 | struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; | ||
397 | int ss_info_num = bp->funcs->get_ss_entry_number( | ||
398 | bp, AS_SIGNAL_TYPE_GPU_PLL); | ||
399 | |||
400 | if (ss_info_num) { | ||
401 | struct spread_spectrum_info info = { { 0 } }; | ||
402 | enum bp_result result = bp->funcs->get_spread_spectrum_info( | ||
403 | bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); | ||
404 | |||
405 | /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS | ||
406 | * even if SS not enabled and in that case | ||
407 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
408 | * that SS is enabled | ||
409 | */ | ||
410 | if (result == BP_RESULT_OK && | ||
411 | info.spread_spectrum_percentage != 0) { | ||
412 | clk_mgr_dce->ss_on_dprefclk = true; | ||
413 | clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
414 | |||
415 | if (info.type.CENTER_MODE == 0) { | ||
416 | /* TODO: Currently for DP Reference clock we | ||
417 | * need only SS percentage for | ||
418 | * downspread */ | ||
419 | clk_mgr_dce->dprefclk_ss_percentage = | ||
420 | info.spread_spectrum_percentage; | ||
421 | } | ||
422 | |||
423 | return; | ||
424 | } | ||
425 | |||
426 | result = bp->funcs->get_spread_spectrum_info( | ||
427 | bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); | ||
428 | |||
429 | /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS | ||
430 | * even if SS not enabled and in that case | ||
431 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
432 | * that SS is enabled | ||
433 | */ | ||
434 | if (result == BP_RESULT_OK && | ||
435 | info.spread_spectrum_percentage != 0) { | ||
436 | clk_mgr_dce->ss_on_dprefclk = true; | ||
437 | clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
438 | |||
439 | if (info.type.CENTER_MODE == 0) { | ||
440 | /* Currently for DP Reference clock we | ||
441 | * need only SS percentage for | ||
442 | * downspread */ | ||
443 | clk_mgr_dce->dprefclk_ss_percentage = | ||
444 | info.spread_spectrum_percentage; | ||
445 | } | ||
446 | } | ||
447 | } | ||
448 | } | ||
449 | |||
450 | void dce110_fill_display_configs( | ||
451 | const struct dc_state *context, | ||
452 | struct dm_pp_display_configuration *pp_display_cfg) | ||
453 | { | ||
454 | int j; | ||
455 | int num_cfgs = 0; | ||
456 | |||
457 | for (j = 0; j < context->stream_count; j++) { | ||
458 | int k; | ||
459 | |||
460 | const struct dc_stream_state *stream = context->streams[j]; | ||
461 | struct dm_pp_single_disp_config *cfg = | ||
462 | &pp_display_cfg->disp_configs[num_cfgs]; | ||
463 | const struct pipe_ctx *pipe_ctx = NULL; | ||
464 | |||
465 | for (k = 0; k < MAX_PIPES; k++) | ||
466 | if (stream == context->res_ctx.pipe_ctx[k].stream) { | ||
467 | pipe_ctx = &context->res_ctx.pipe_ctx[k]; | ||
468 | break; | ||
469 | } | ||
470 | |||
471 | ASSERT(pipe_ctx != NULL); | ||
472 | |||
473 | /* only notify active stream */ | ||
474 | if (stream->dpms_off) | ||
475 | continue; | ||
476 | |||
477 | num_cfgs++; | ||
478 | cfg->signal = pipe_ctx->stream->signal; | ||
479 | cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; | ||
480 | cfg->src_height = stream->src.height; | ||
481 | cfg->src_width = stream->src.width; | ||
482 | cfg->ddi_channel_mapping = | ||
483 | stream->sink->link->ddi_channel_mapping.raw; | ||
484 | cfg->transmitter = | ||
485 | stream->sink->link->link_enc->transmitter; | ||
486 | cfg->link_settings.lane_count = | ||
487 | stream->sink->link->cur_link_settings.lane_count; | ||
488 | cfg->link_settings.link_rate = | ||
489 | stream->sink->link->cur_link_settings.link_rate; | ||
490 | cfg->link_settings.link_spread = | ||
491 | stream->sink->link->cur_link_settings.link_spread; | ||
492 | cfg->sym_clock = stream->phy_pix_clk; | ||
493 | /* Round v_refresh*/ | ||
494 | cfg->v_refresh = stream->timing.pix_clk_khz * 1000; | ||
495 | cfg->v_refresh /= stream->timing.h_total; | ||
496 | cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) | ||
497 | / stream->timing.v_total; | ||
498 | } | ||
499 | |||
500 | pp_display_cfg->display_count = num_cfgs; | ||
501 | } | ||
502 | |||
503 | static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) | ||
504 | { | ||
505 | uint8_t j; | ||
506 | uint32_t min_vertical_blank_time = -1; | ||
507 | |||
508 | for (j = 0; j < context->stream_count; j++) { | ||
509 | struct dc_stream_state *stream = context->streams[j]; | ||
510 | uint32_t vertical_blank_in_pixels = 0; | ||
511 | uint32_t vertical_blank_time = 0; | ||
512 | |||
513 | vertical_blank_in_pixels = stream->timing.h_total * | ||
514 | (stream->timing.v_total | ||
515 | - stream->timing.v_addressable); | ||
516 | |||
517 | vertical_blank_time = vertical_blank_in_pixels | ||
518 | * 1000 / stream->timing.pix_clk_khz; | ||
519 | |||
520 | if (min_vertical_blank_time > vertical_blank_time) | ||
521 | min_vertical_blank_time = vertical_blank_time; | ||
522 | } | ||
523 | |||
524 | return min_vertical_blank_time; | ||
525 | } | ||
526 | |||
527 | static int determine_sclk_from_bounding_box( | ||
528 | const struct dc *dc, | ||
529 | int required_sclk) | ||
530 | { | ||
531 | int i; | ||
532 | |||
533 | /* | ||
534 | * Some asics do not give us sclk levels, so we just report the actual | ||
535 | * required sclk | ||
536 | */ | ||
537 | if (dc->sclk_lvls.num_levels == 0) | ||
538 | return required_sclk; | ||
539 | |||
540 | for (i = 0; i < dc->sclk_lvls.num_levels; i++) { | ||
541 | if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) | ||
542 | return dc->sclk_lvls.clocks_in_khz[i]; | ||
543 | } | ||
544 | /* | ||
545 | * even maximum level could not satisfy requirement, this | ||
546 | * is unexpected at this stage, should have been caught at | ||
547 | * validation time | ||
548 | */ | ||
549 | ASSERT(0); | ||
550 | return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; | ||
551 | } | ||
552 | |||
553 | static void dce_pplib_apply_display_requirements( | ||
554 | struct dc *dc, | ||
555 | struct dc_state *context) | ||
556 | { | ||
557 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
558 | |||
559 | pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); | ||
560 | |||
561 | dce110_fill_display_configs(context, pp_display_cfg); | ||
562 | |||
563 | if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) | ||
564 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
565 | } | ||
566 | |||
567 | static void dce11_pplib_apply_display_requirements( | ||
568 | struct dc *dc, | ||
569 | struct dc_state *context) | ||
570 | { | ||
571 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
572 | |||
573 | pp_display_cfg->all_displays_in_sync = | ||
574 | context->bw.dce.all_displays_in_sync; | ||
575 | pp_display_cfg->nb_pstate_switch_disable = | ||
576 | context->bw.dce.nbp_state_change_enable == false; | ||
577 | pp_display_cfg->cpu_cc6_disable = | ||
578 | context->bw.dce.cpuc_state_change_enable == false; | ||
579 | pp_display_cfg->cpu_pstate_disable = | ||
580 | context->bw.dce.cpup_state_change_enable == false; | ||
581 | pp_display_cfg->cpu_pstate_separation_time = | ||
582 | context->bw.dce.blackout_recovery_time_us; | ||
583 | |||
584 | pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz | ||
585 | / MEMORY_TYPE_MULTIPLIER_CZ; | ||
586 | |||
587 | pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( | ||
588 | dc, | ||
589 | context->bw.dce.sclk_khz); | ||
590 | |||
591 | pp_display_cfg->min_engine_clock_deep_sleep_khz | ||
592 | = context->bw.dce.sclk_deep_sleep_khz; | ||
593 | |||
594 | pp_display_cfg->avail_mclk_switch_time_us = | ||
595 | dce110_get_min_vblank_time_us(context); | ||
596 | /* TODO: dce11.2*/ | ||
597 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; | ||
598 | |||
599 | pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; | ||
600 | |||
601 | dce110_fill_display_configs(context, pp_display_cfg); | ||
602 | |||
603 | /* TODO: is this still applicable?*/ | ||
604 | if (pp_display_cfg->display_count == 1) { | ||
605 | const struct dc_crtc_timing *timing = | ||
606 | &context->streams[0]->timing; | ||
607 | |||
608 | pp_display_cfg->crtc_index = | ||
609 | pp_display_cfg->disp_configs[0].pipe_idx; | ||
610 | pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz; | ||
611 | } | ||
612 | |||
613 | if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) | ||
614 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
615 | } | ||
616 | |||
617 | static void dce_update_clocks(struct clk_mgr *clk_mgr, | ||
618 | struct dc_state *context, | ||
619 | bool safe_to_lower) | ||
620 | { | ||
621 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
622 | struct dm_pp_power_level_change_request level_change_req; | ||
623 | int unpatched_disp_clk = context->bw.dce.dispclk_khz; | ||
624 | |||
625 | /*TODO: W/A for dal3 linux, investigate why this works */ | ||
626 | if (!clk_mgr_dce->dfs_bypass_active) | ||
627 | context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; | ||
628 | |||
629 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | ||
630 | /* get max clock state from PPLIB */ | ||
631 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) | ||
632 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | ||
633 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | ||
634 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | ||
635 | } | ||
636 | |||
637 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
638 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
639 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
640 | } | ||
641 | dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
642 | |||
643 | context->bw.dce.dispclk_khz = unpatched_disp_clk; | ||
644 | } | ||
645 | |||
646 | static void dce11_update_clocks(struct clk_mgr *clk_mgr, | ||
647 | struct dc_state *context, | ||
648 | bool safe_to_lower) | ||
649 | { | ||
650 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
651 | struct dm_pp_power_level_change_request level_change_req; | ||
652 | |||
653 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | ||
654 | /* get max clock state from PPLIB */ | ||
655 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) | ||
656 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | ||
657 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | ||
658 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | ||
659 | } | ||
660 | |||
661 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
662 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
663 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
664 | } | ||
665 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
666 | } | ||
667 | |||
668 | static void dce112_update_clocks(struct clk_mgr *clk_mgr, | ||
669 | struct dc_state *context, | ||
670 | bool safe_to_lower) | ||
671 | { | ||
672 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
673 | struct dm_pp_power_level_change_request level_change_req; | ||
674 | |||
675 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | ||
676 | /* get max clock state from PPLIB */ | ||
677 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) | ||
678 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | ||
679 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | ||
680 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | ||
681 | } | ||
682 | |||
683 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
684 | context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
685 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
686 | } | ||
687 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
688 | } | ||
689 | |||
690 | static void dce12_update_clocks(struct clk_mgr *clk_mgr, | ||
691 | struct dc_state *context, | ||
692 | bool safe_to_lower) | ||
693 | { | ||
694 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
695 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
696 | int max_pix_clk = get_max_pixel_clock_for_all_paths(context); | ||
697 | int unpatched_disp_clk = context->bw.dce.dispclk_khz; | ||
698 | |||
699 | /*TODO: W/A for dal3 linux, investigate why this works */ | ||
700 | if (!clk_mgr_dce->dfs_bypass_active) | ||
701 | context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; | ||
702 | |||
703 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
704 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; | ||
705 | clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz; | ||
706 | context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
707 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
708 | |||
709 | dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); | ||
710 | } | ||
711 | |||
712 | if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) { | ||
713 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; | ||
714 | clock_voltage_req.clocks_in_khz = max_pix_clk; | ||
715 | clk_mgr->clks.phyclk_khz = max_pix_clk; | ||
716 | |||
717 | dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); | ||
718 | } | ||
719 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
720 | |||
721 | context->bw.dce.dispclk_khz = unpatched_disp_clk; | ||
722 | } | ||
723 | |||
724 | static const struct clk_mgr_funcs dce120_funcs = { | ||
725 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
726 | .update_clocks = dce12_update_clocks | ||
727 | }; | ||
728 | |||
729 | static const struct clk_mgr_funcs dce112_funcs = { | ||
730 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
731 | .update_clocks = dce112_update_clocks | ||
732 | }; | ||
733 | |||
734 | static const struct clk_mgr_funcs dce110_funcs = { | ||
735 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
736 | .update_clocks = dce11_update_clocks, | ||
737 | }; | ||
738 | |||
739 | static const struct clk_mgr_funcs dce_funcs = { | ||
740 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
741 | .update_clocks = dce_update_clocks | ||
742 | }; | ||
743 | |||
744 | static void dce_clk_mgr_construct( | ||
745 | struct dce_clk_mgr *clk_mgr_dce, | ||
746 | struct dc_context *ctx, | ||
747 | const struct clk_mgr_registers *regs, | ||
748 | const struct clk_mgr_shift *clk_shift, | ||
749 | const struct clk_mgr_mask *clk_mask) | ||
750 | { | ||
751 | struct clk_mgr *base = &clk_mgr_dce->base; | ||
752 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
753 | |||
754 | base->ctx = ctx; | ||
755 | base->funcs = &dce_funcs; | ||
756 | |||
757 | clk_mgr_dce->regs = regs; | ||
758 | clk_mgr_dce->clk_mgr_shift = clk_shift; | ||
759 | clk_mgr_dce->clk_mgr_mask = clk_mask; | ||
760 | |||
761 | clk_mgr_dce->dfs_bypass_disp_clk = 0; | ||
762 | |||
763 | clk_mgr_dce->dprefclk_ss_percentage = 0; | ||
764 | clk_mgr_dce->dprefclk_ss_divider = 1000; | ||
765 | clk_mgr_dce->ss_on_dprefclk = false; | ||
766 | |||
767 | |||
768 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
769 | clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state; | ||
770 | else | ||
771 | clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
772 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; | ||
773 | |||
774 | dce_clock_read_integrated_info(clk_mgr_dce); | ||
775 | dce_clock_read_ss_info(clk_mgr_dce); | ||
776 | } | ||
777 | |||
778 | struct clk_mgr *dce_clk_mgr_create( | ||
779 | struct dc_context *ctx, | ||
780 | const struct clk_mgr_registers *regs, | ||
781 | const struct clk_mgr_shift *clk_shift, | ||
782 | const struct clk_mgr_mask *clk_mask) | ||
783 | { | ||
784 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
785 | |||
786 | if (clk_mgr_dce == NULL) { | ||
787 | BREAK_TO_DEBUGGER(); | ||
788 | return NULL; | ||
789 | } | ||
790 | |||
791 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
792 | dce80_max_clks_by_state, | ||
793 | sizeof(dce80_max_clks_by_state)); | ||
794 | |||
795 | dce_clk_mgr_construct( | ||
796 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | ||
797 | |||
798 | return &clk_mgr_dce->base; | ||
799 | } | ||
800 | |||
801 | struct clk_mgr *dce110_clk_mgr_create( | ||
802 | struct dc_context *ctx, | ||
803 | const struct clk_mgr_registers *regs, | ||
804 | const struct clk_mgr_shift *clk_shift, | ||
805 | const struct clk_mgr_mask *clk_mask) | ||
806 | { | ||
807 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
808 | |||
809 | if (clk_mgr_dce == NULL) { | ||
810 | BREAK_TO_DEBUGGER(); | ||
811 | return NULL; | ||
812 | } | ||
813 | |||
814 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
815 | dce110_max_clks_by_state, | ||
816 | sizeof(dce110_max_clks_by_state)); | ||
817 | |||
818 | dce_clk_mgr_construct( | ||
819 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | ||
820 | |||
821 | clk_mgr_dce->base.funcs = &dce110_funcs; | ||
822 | |||
823 | return &clk_mgr_dce->base; | ||
824 | } | ||
825 | |||
826 | struct clk_mgr *dce112_clk_mgr_create( | ||
827 | struct dc_context *ctx, | ||
828 | const struct clk_mgr_registers *regs, | ||
829 | const struct clk_mgr_shift *clk_shift, | ||
830 | const struct clk_mgr_mask *clk_mask) | ||
831 | { | ||
832 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
833 | |||
834 | if (clk_mgr_dce == NULL) { | ||
835 | BREAK_TO_DEBUGGER(); | ||
836 | return NULL; | ||
837 | } | ||
838 | |||
839 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
840 | dce112_max_clks_by_state, | ||
841 | sizeof(dce112_max_clks_by_state)); | ||
842 | |||
843 | dce_clk_mgr_construct( | ||
844 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | ||
845 | |||
846 | clk_mgr_dce->base.funcs = &dce112_funcs; | ||
847 | |||
848 | return &clk_mgr_dce->base; | ||
849 | } | ||
850 | |||
851 | struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) | ||
852 | { | ||
853 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
854 | |||
855 | if (clk_mgr_dce == NULL) { | ||
856 | BREAK_TO_DEBUGGER(); | ||
857 | return NULL; | ||
858 | } | ||
859 | |||
860 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
861 | dce120_max_clks_by_state, | ||
862 | sizeof(dce120_max_clks_by_state)); | ||
863 | |||
864 | dce_clk_mgr_construct( | ||
865 | clk_mgr_dce, ctx, NULL, NULL, NULL); | ||
866 | |||
867 | clk_mgr_dce->dprefclk_khz = 600000; | ||
868 | clk_mgr_dce->base.funcs = &dce120_funcs; | ||
869 | |||
870 | return &clk_mgr_dce->base; | ||
871 | } | ||
872 | |||
873 | void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) | ||
874 | { | ||
875 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); | ||
876 | |||
877 | kfree(clk_mgr_dce); | ||
878 | *clk_mgr = NULL; | ||
879 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h index 34fdb386c884..046077797416 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h | |||
@@ -24,10 +24,13 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | 26 | ||
27 | #ifndef _DCE_CLOCKS_H_ | 27 | #ifndef _DCE_CLK_MGR_H_ |
28 | #define _DCE_CLOCKS_H_ | 28 | #define _DCE_CLK_MGR_H_ |
29 | 29 | ||
30 | #include "display_clock.h" | 30 | #include "clk_mgr.h" |
31 | #include "dccg.h" | ||
32 | |||
33 | #define MEMORY_TYPE_MULTIPLIER_CZ 4 | ||
31 | 34 | ||
32 | #define CLK_COMMON_REG_LIST_DCE_BASE() \ | 35 | #define CLK_COMMON_REG_LIST_DCE_BASE() \ |
33 | .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \ | 36 | .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \ |
@@ -53,24 +56,31 @@ | |||
53 | type DENTIST_DISPCLK_WDIVIDER; \ | 56 | type DENTIST_DISPCLK_WDIVIDER; \ |
54 | type DENTIST_DISPCLK_CHG_DONE; | 57 | type DENTIST_DISPCLK_CHG_DONE; |
55 | 58 | ||
56 | struct dccg_shift { | 59 | struct clk_mgr_shift { |
57 | CLK_REG_FIELD_LIST(uint8_t) | 60 | CLK_REG_FIELD_LIST(uint8_t) |
58 | }; | 61 | }; |
59 | 62 | ||
60 | struct dccg_mask { | 63 | struct clk_mgr_mask { |
61 | CLK_REG_FIELD_LIST(uint32_t) | 64 | CLK_REG_FIELD_LIST(uint32_t) |
62 | }; | 65 | }; |
63 | 66 | ||
64 | struct dccg_registers { | 67 | struct clk_mgr_registers { |
65 | uint32_t DPREFCLK_CNTL; | 68 | uint32_t DPREFCLK_CNTL; |
66 | uint32_t DENTIST_DISPCLK_CNTL; | 69 | uint32_t DENTIST_DISPCLK_CNTL; |
67 | }; | 70 | }; |
68 | 71 | ||
69 | struct dce_dccg { | 72 | struct state_dependent_clocks { |
70 | struct dccg base; | 73 | int display_clk_khz; |
71 | const struct dccg_registers *regs; | 74 | int pixel_clk_khz; |
72 | const struct dccg_shift *clk_shift; | 75 | }; |
73 | const struct dccg_mask *clk_mask; | 76 | |
77 | struct dce_clk_mgr { | ||
78 | struct clk_mgr base; | ||
79 | const struct clk_mgr_registers *regs; | ||
80 | const struct clk_mgr_shift *clk_mgr_shift; | ||
81 | const struct clk_mgr_mask *clk_mgr_mask; | ||
82 | |||
83 | struct dccg *dccg; | ||
74 | 84 | ||
75 | struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; | 85 | struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; |
76 | 86 | ||
@@ -91,33 +101,68 @@ struct dce_dccg { | |||
91 | /* DPREFCLK SS percentage Divider (100 or 1000) */ | 101 | /* DPREFCLK SS percentage Divider (100 or 1000) */ |
92 | int dprefclk_ss_divider; | 102 | int dprefclk_ss_divider; |
93 | int dprefclk_khz; | 103 | int dprefclk_khz; |
104 | |||
105 | enum dm_pp_clocks_state max_clks_state; | ||
106 | enum dm_pp_clocks_state cur_min_clks_state; | ||
94 | }; | 107 | }; |
95 | 108 | ||
109 | /* Starting DID for each range */ | ||
110 | enum dentist_base_divider_id { | ||
111 | DENTIST_BASE_DID_1 = 0x08, | ||
112 | DENTIST_BASE_DID_2 = 0x40, | ||
113 | DENTIST_BASE_DID_3 = 0x60, | ||
114 | DENTIST_BASE_DID_4 = 0x7e, | ||
115 | DENTIST_MAX_DID = 0x7f | ||
116 | }; | ||
96 | 117 | ||
97 | struct dccg *dce_dccg_create( | 118 | /* Starting point and step size for each divider range.*/ |
98 | struct dc_context *ctx, | 119 | enum dentist_divider_range { |
99 | const struct dccg_registers *regs, | 120 | DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */ |
100 | const struct dccg_shift *clk_shift, | 121 | DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */ |
101 | const struct dccg_mask *clk_mask); | 122 | DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */ |
123 | DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */ | ||
124 | DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */ | ||
125 | DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */ | ||
126 | DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */ | ||
127 | DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */ | ||
128 | DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4 | ||
129 | }; | ||
130 | |||
131 | static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk) | ||
132 | { | ||
133 | return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk); | ||
134 | } | ||
135 | |||
136 | void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce); | ||
137 | |||
138 | int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg); | ||
102 | 139 | ||
103 | struct dccg *dce110_dccg_create( | 140 | void dce110_fill_display_configs( |
141 | const struct dc_state *context, | ||
142 | struct dm_pp_display_configuration *pp_display_cfg); | ||
143 | |||
144 | int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz); | ||
145 | |||
146 | struct clk_mgr *dce_clk_mgr_create( | ||
104 | struct dc_context *ctx, | 147 | struct dc_context *ctx, |
105 | const struct dccg_registers *regs, | 148 | const struct clk_mgr_registers *regs, |
106 | const struct dccg_shift *clk_shift, | 149 | const struct clk_mgr_shift *clk_shift, |
107 | const struct dccg_mask *clk_mask); | 150 | const struct clk_mgr_mask *clk_mask); |
108 | 151 | ||
109 | struct dccg *dce112_dccg_create( | 152 | struct clk_mgr *dce110_clk_mgr_create( |
110 | struct dc_context *ctx, | 153 | struct dc_context *ctx, |
111 | const struct dccg_registers *regs, | 154 | const struct clk_mgr_registers *regs, |
112 | const struct dccg_shift *clk_shift, | 155 | const struct clk_mgr_shift *clk_shift, |
113 | const struct dccg_mask *clk_mask); | 156 | const struct clk_mgr_mask *clk_mask); |
114 | 157 | ||
115 | struct dccg *dce120_dccg_create(struct dc_context *ctx); | 158 | struct clk_mgr *dce112_clk_mgr_create( |
159 | struct dc_context *ctx, | ||
160 | const struct clk_mgr_registers *regs, | ||
161 | const struct clk_mgr_shift *clk_shift, | ||
162 | const struct clk_mgr_mask *clk_mask); | ||
116 | 163 | ||
117 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | 164 | struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx); |
118 | struct dccg *dcn1_dccg_create(struct dc_context *ctx); | ||
119 | #endif | ||
120 | 165 | ||
121 | void dce_dccg_destroy(struct dccg **dccg); | 166 | void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); |
122 | 167 | ||
123 | #endif /* _DCE_CLOCKS_H_ */ | 168 | #endif /* _DCE_CLK_MGR_H_ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c deleted file mode 100644 index d89a097ba936..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ /dev/null | |||
@@ -1,947 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2012-16 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "dce_clocks.h" | ||
27 | #include "dm_services.h" | ||
28 | #include "reg_helper.h" | ||
29 | #include "fixed31_32.h" | ||
30 | #include "bios_parser_interface.h" | ||
31 | #include "dc.h" | ||
32 | #include "dmcu.h" | ||
33 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) | ||
34 | #include "dcn_calcs.h" | ||
35 | #endif | ||
36 | #include "core_types.h" | ||
37 | #include "dc_types.h" | ||
38 | #include "dal_asic_id.h" | ||
39 | |||
40 | #define TO_DCE_CLOCKS(clocks)\ | ||
41 | container_of(clocks, struct dce_dccg, base) | ||
42 | |||
43 | #define REG(reg) \ | ||
44 | (clk_dce->regs->reg) | ||
45 | |||
46 | #undef FN | ||
47 | #define FN(reg_name, field_name) \ | ||
48 | clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name | ||
49 | |||
50 | #define CTX \ | ||
51 | clk_dce->base.ctx | ||
52 | #define DC_LOGGER \ | ||
53 | clk->ctx->logger | ||
54 | |||
55 | /* Max clock values for each state indexed by "enum clocks_state": */ | ||
56 | static const struct state_dependent_clocks dce80_max_clks_by_state[] = { | ||
57 | /* ClocksStateInvalid - should not be used */ | ||
58 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
59 | /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ | ||
60 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
61 | /* ClocksStateLow */ | ||
62 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, | ||
63 | /* ClocksStateNominal */ | ||
64 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, | ||
65 | /* ClocksStatePerformance */ | ||
66 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; | ||
67 | |||
68 | static const struct state_dependent_clocks dce110_max_clks_by_state[] = { | ||
69 | /*ClocksStateInvalid - should not be used*/ | ||
70 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
71 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
72 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
73 | /*ClocksStateLow*/ | ||
74 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
75 | /*ClocksStateNominal*/ | ||
76 | { .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, | ||
77 | /*ClocksStatePerformance*/ | ||
78 | { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; | ||
79 | |||
80 | static const struct state_dependent_clocks dce112_max_clks_by_state[] = { | ||
81 | /*ClocksStateInvalid - should not be used*/ | ||
82 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
83 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
84 | { .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, | ||
85 | /*ClocksStateLow*/ | ||
86 | { .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, | ||
87 | /*ClocksStateNominal*/ | ||
88 | { .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, | ||
89 | /*ClocksStatePerformance*/ | ||
90 | { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; | ||
91 | |||
92 | static const struct state_dependent_clocks dce120_max_clks_by_state[] = { | ||
93 | /*ClocksStateInvalid - should not be used*/ | ||
94 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
95 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
96 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
97 | /*ClocksStateLow*/ | ||
98 | { .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, | ||
99 | /*ClocksStateNominal*/ | ||
100 | { .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, | ||
101 | /*ClocksStatePerformance*/ | ||
102 | { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; | ||
103 | |||
104 | /* Starting DID for each range */ | ||
105 | enum dentist_base_divider_id { | ||
106 | DENTIST_BASE_DID_1 = 0x08, | ||
107 | DENTIST_BASE_DID_2 = 0x40, | ||
108 | DENTIST_BASE_DID_3 = 0x60, | ||
109 | DENTIST_BASE_DID_4 = 0x7e, | ||
110 | DENTIST_MAX_DID = 0x7f | ||
111 | }; | ||
112 | |||
113 | /* Starting point and step size for each divider range.*/ | ||
114 | enum dentist_divider_range { | ||
115 | DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */ | ||
116 | DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */ | ||
117 | DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */ | ||
118 | DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */ | ||
119 | DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */ | ||
120 | DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */ | ||
121 | DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */ | ||
122 | DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */ | ||
123 | DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4 | ||
124 | }; | ||
125 | |||
126 | static int dentist_get_divider_from_did(int did) | ||
127 | { | ||
128 | if (did < DENTIST_BASE_DID_1) | ||
129 | did = DENTIST_BASE_DID_1; | ||
130 | if (did > DENTIST_MAX_DID) | ||
131 | did = DENTIST_MAX_DID; | ||
132 | |||
133 | if (did < DENTIST_BASE_DID_2) { | ||
134 | return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP | ||
135 | * (did - DENTIST_BASE_DID_1); | ||
136 | } else if (did < DENTIST_BASE_DID_3) { | ||
137 | return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP | ||
138 | * (did - DENTIST_BASE_DID_2); | ||
139 | } else if (did < DENTIST_BASE_DID_4) { | ||
140 | return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP | ||
141 | * (did - DENTIST_BASE_DID_3); | ||
142 | } else { | ||
143 | return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP | ||
144 | * (did - DENTIST_BASE_DID_4); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* SW will adjust DP REF Clock average value for all purposes | ||
149 | * (DP DTO / DP Audio DTO and DP GTC) | ||
150 | if clock is spread for all cases: | ||
151 | -if SS enabled on DP Ref clock and HW de-spreading enabled with SW | ||
152 | calculations for DS_INCR/DS_MODULO (this is planned to be default case) | ||
153 | -if SS enabled on DP Ref clock and HW de-spreading enabled with HW | ||
154 | calculations (not planned to be used, but average clock should still | ||
155 | be valid) | ||
156 | -if SS enabled on DP Ref clock and HW de-spreading disabled | ||
157 | (should not be case with CIK) then SW should program all rates | ||
158 | generated according to average value (case as with previous ASICs) | ||
159 | */ | ||
160 | static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz) | ||
161 | { | ||
162 | if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) { | ||
163 | struct fixed31_32 ss_percentage = dc_fixpt_div_int( | ||
164 | dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage, | ||
165 | clk_dce->dprefclk_ss_divider), 200); | ||
166 | struct fixed31_32 adj_dp_ref_clk_khz; | ||
167 | |||
168 | ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); | ||
169 | adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); | ||
170 | dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); | ||
171 | } | ||
172 | return dp_ref_clk_khz; | ||
173 | } | ||
174 | |||
175 | static int dce_get_dp_ref_freq_khz(struct dccg *clk) | ||
176 | { | ||
177 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
178 | int dprefclk_wdivider; | ||
179 | int dprefclk_src_sel; | ||
180 | int dp_ref_clk_khz = 600000; | ||
181 | int target_div; | ||
182 | |||
183 | /* ASSERT DP Reference Clock source is from DFS*/ | ||
184 | REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); | ||
185 | ASSERT(dprefclk_src_sel == 0); | ||
186 | |||
187 | /* Read the mmDENTIST_DISPCLK_CNTL to get the currently | ||
188 | * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ | ||
189 | REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); | ||
190 | |||
191 | /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ | ||
192 | target_div = dentist_get_divider_from_did(dprefclk_wdivider); | ||
193 | |||
194 | /* Calculate the current DFS clock, in kHz.*/ | ||
195 | dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR | ||
196 | * clk_dce->dentist_vco_freq_khz) / target_div; | ||
197 | |||
198 | return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz); | ||
199 | } | ||
200 | |||
201 | static int dce12_get_dp_ref_freq_khz(struct dccg *clk) | ||
202 | { | ||
203 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
204 | |||
205 | return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz); | ||
206 | } | ||
207 | |||
208 | static enum dm_pp_clocks_state dce_get_required_clocks_state( | ||
209 | struct dccg *clk, | ||
210 | struct dc_clocks *req_clocks) | ||
211 | { | ||
212 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
213 | int i; | ||
214 | enum dm_pp_clocks_state low_req_clk; | ||
215 | |||
216 | /* Iterate from highest supported to lowest valid state, and update | ||
217 | * lowest RequiredState with the lowest state that satisfies | ||
218 | * all required clocks | ||
219 | */ | ||
220 | for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) | ||
221 | if (req_clocks->dispclk_khz > | ||
222 | clk_dce->max_clks_by_state[i].display_clk_khz | ||
223 | || req_clocks->phyclk_khz > | ||
224 | clk_dce->max_clks_by_state[i].pixel_clk_khz) | ||
225 | break; | ||
226 | |||
227 | low_req_clk = i + 1; | ||
228 | if (low_req_clk > clk->max_clks_state) { | ||
229 | /* set max clock state for high phyclock, invalid on exceeding display clock */ | ||
230 | if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz | ||
231 | < req_clocks->dispclk_khz) | ||
232 | low_req_clk = DM_PP_CLOCKS_STATE_INVALID; | ||
233 | else | ||
234 | low_req_clk = clk->max_clks_state; | ||
235 | } | ||
236 | |||
237 | return low_req_clk; | ||
238 | } | ||
239 | |||
240 | static int dce_set_clock( | ||
241 | struct dccg *clk, | ||
242 | int requested_clk_khz) | ||
243 | { | ||
244 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
245 | struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; | ||
246 | struct dc_bios *bp = clk->ctx->dc_bios; | ||
247 | int actual_clock = requested_clk_khz; | ||
248 | |||
249 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
250 | if (requested_clk_khz > 0) | ||
251 | requested_clk_khz = max(requested_clk_khz, | ||
252 | clk_dce->dentist_vco_freq_khz / 64); | ||
253 | |||
254 | /* Prepare to program display clock*/ | ||
255 | pxl_clk_params.target_pixel_clock = requested_clk_khz; | ||
256 | pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
257 | |||
258 | if (clk_dce->dfs_bypass_active) | ||
259 | pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; | ||
260 | |||
261 | bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); | ||
262 | |||
263 | if (clk_dce->dfs_bypass_active) { | ||
264 | /* Cache the fixed display clock*/ | ||
265 | clk_dce->dfs_bypass_disp_clk = | ||
266 | pxl_clk_params.dfs_bypass_display_clock; | ||
267 | actual_clock = pxl_clk_params.dfs_bypass_display_clock; | ||
268 | } | ||
269 | |||
270 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
271 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
272 | if (requested_clk_khz == 0) | ||
273 | clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
274 | return actual_clock; | ||
275 | } | ||
276 | |||
277 | static int dce_psr_set_clock( | ||
278 | struct dccg *clk, | ||
279 | int requested_clk_khz) | ||
280 | { | ||
281 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
282 | struct dc_context *ctx = clk_dce->base.ctx; | ||
283 | struct dc *core_dc = ctx->dc; | ||
284 | struct dmcu *dmcu = core_dc->res_pool->dmcu; | ||
285 | int actual_clk_khz = requested_clk_khz; | ||
286 | |||
287 | actual_clk_khz = dce_set_clock(clk, requested_clk_khz); | ||
288 | |||
289 | dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7); | ||
290 | return actual_clk_khz; | ||
291 | } | ||
292 | |||
293 | static int dce112_set_clock( | ||
294 | struct dccg *clk, | ||
295 | int requested_clk_khz) | ||
296 | { | ||
297 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
298 | struct bp_set_dce_clock_parameters dce_clk_params; | ||
299 | struct dc_bios *bp = clk->ctx->dc_bios; | ||
300 | struct dc *core_dc = clk->ctx->dc; | ||
301 | struct dmcu *dmcu = core_dc->res_pool->dmcu; | ||
302 | int actual_clock = requested_clk_khz; | ||
303 | /* Prepare to program display clock*/ | ||
304 | memset(&dce_clk_params, 0, sizeof(dce_clk_params)); | ||
305 | |||
306 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
307 | if (requested_clk_khz > 0) | ||
308 | requested_clk_khz = max(requested_clk_khz, | ||
309 | clk_dce->dentist_vco_freq_khz / 62); | ||
310 | |||
311 | dce_clk_params.target_clock_frequency = requested_clk_khz; | ||
312 | dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
313 | dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; | ||
314 | |||
315 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
316 | actual_clock = dce_clk_params.target_clock_frequency; | ||
317 | |||
318 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
319 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
320 | if (requested_clk_khz == 0) | ||
321 | clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
322 | |||
323 | /*Program DP ref Clock*/ | ||
324 | /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ | ||
325 | dce_clk_params.target_clock_frequency = 0; | ||
326 | dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; | ||
327 | if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev)) | ||
328 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = | ||
329 | (dce_clk_params.pll_id == | ||
330 | CLOCK_SOURCE_COMBO_DISPLAY_PLL0); | ||
331 | else | ||
332 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; | ||
333 | |||
334 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
335 | |||
336 | if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { | ||
337 | if (clk_dce->dfs_bypass_disp_clk != actual_clock) | ||
338 | dmcu->funcs->set_psr_wait_loop(dmcu, | ||
339 | actual_clock / 1000 / 7); | ||
340 | } | ||
341 | |||
342 | clk_dce->dfs_bypass_disp_clk = actual_clock; | ||
343 | return actual_clock; | ||
344 | } | ||
345 | |||
346 | static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce) | ||
347 | { | ||
348 | struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug; | ||
349 | struct dc_bios *bp = clk_dce->base.ctx->dc_bios; | ||
350 | struct integrated_info info = { { { 0 } } }; | ||
351 | struct dc_firmware_info fw_info = { { 0 } }; | ||
352 | int i; | ||
353 | |||
354 | if (bp->integrated_info) | ||
355 | info = *bp->integrated_info; | ||
356 | |||
357 | clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq; | ||
358 | if (clk_dce->dentist_vco_freq_khz == 0) { | ||
359 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
360 | clk_dce->dentist_vco_freq_khz = | ||
361 | fw_info.smu_gpu_pll_output_freq; | ||
362 | if (clk_dce->dentist_vco_freq_khz == 0) | ||
363 | clk_dce->dentist_vco_freq_khz = 3600000; | ||
364 | } | ||
365 | |||
366 | /*update the maximum display clock for each power state*/ | ||
367 | for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { | ||
368 | enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
369 | |||
370 | switch (i) { | ||
371 | case 0: | ||
372 | clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; | ||
373 | break; | ||
374 | |||
375 | case 1: | ||
376 | clk_state = DM_PP_CLOCKS_STATE_LOW; | ||
377 | break; | ||
378 | |||
379 | case 2: | ||
380 | clk_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
381 | break; | ||
382 | |||
383 | case 3: | ||
384 | clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; | ||
385 | break; | ||
386 | |||
387 | default: | ||
388 | clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
389 | break; | ||
390 | } | ||
391 | |||
392 | /*Do not allow bad VBIOS/SBIOS to override with invalid values, | ||
393 | * check for > 100MHz*/ | ||
394 | if (info.disp_clk_voltage[i].max_supported_clk >= 100000) | ||
395 | clk_dce->max_clks_by_state[clk_state].display_clk_khz = | ||
396 | info.disp_clk_voltage[i].max_supported_clk; | ||
397 | } | ||
398 | |||
399 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
400 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
401 | clk_dce->dfs_bypass_enabled = true; | ||
402 | } | ||
403 | |||
404 | static void dce_clock_read_ss_info(struct dce_dccg *clk_dce) | ||
405 | { | ||
406 | struct dc_bios *bp = clk_dce->base.ctx->dc_bios; | ||
407 | int ss_info_num = bp->funcs->get_ss_entry_number( | ||
408 | bp, AS_SIGNAL_TYPE_GPU_PLL); | ||
409 | |||
410 | if (ss_info_num) { | ||
411 | struct spread_spectrum_info info = { { 0 } }; | ||
412 | enum bp_result result = bp->funcs->get_spread_spectrum_info( | ||
413 | bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); | ||
414 | |||
415 | /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS | ||
416 | * even if SS not enabled and in that case | ||
417 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
418 | * that SS is enabled | ||
419 | */ | ||
420 | if (result == BP_RESULT_OK && | ||
421 | info.spread_spectrum_percentage != 0) { | ||
422 | clk_dce->ss_on_dprefclk = true; | ||
423 | clk_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
424 | |||
425 | if (info.type.CENTER_MODE == 0) { | ||
426 | /* TODO: Currently for DP Reference clock we | ||
427 | * need only SS percentage for | ||
428 | * downspread */ | ||
429 | clk_dce->dprefclk_ss_percentage = | ||
430 | info.spread_spectrum_percentage; | ||
431 | } | ||
432 | |||
433 | return; | ||
434 | } | ||
435 | |||
436 | result = bp->funcs->get_spread_spectrum_info( | ||
437 | bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); | ||
438 | |||
439 | /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS | ||
440 | * even if SS not enabled and in that case | ||
441 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
442 | * that SS is enabled | ||
443 | */ | ||
444 | if (result == BP_RESULT_OK && | ||
445 | info.spread_spectrum_percentage != 0) { | ||
446 | clk_dce->ss_on_dprefclk = true; | ||
447 | clk_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
448 | |||
449 | if (info.type.CENTER_MODE == 0) { | ||
450 | /* Currently for DP Reference clock we | ||
451 | * need only SS percentage for | ||
452 | * downspread */ | ||
453 | clk_dce->dprefclk_ss_percentage = | ||
454 | info.spread_spectrum_percentage; | ||
455 | } | ||
456 | } | ||
457 | } | ||
458 | } | ||
459 | |||
460 | static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk) | ||
461 | { | ||
462 | return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk); | ||
463 | } | ||
464 | |||
465 | static void dce12_update_clocks(struct dccg *dccg, | ||
466 | struct dc_clocks *new_clocks, | ||
467 | bool safe_to_lower) | ||
468 | { | ||
469 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
470 | |||
471 | /* TODO: Investigate why this is needed to fix display corruption. */ | ||
472 | new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; | ||
473 | |||
474 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { | ||
475 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; | ||
476 | clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; | ||
477 | new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); | ||
478 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
479 | |||
480 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
481 | } | ||
482 | |||
483 | if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) { | ||
484 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; | ||
485 | clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz; | ||
486 | dccg->clks.phyclk_khz = new_clocks->phyclk_khz; | ||
487 | |||
488 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | ||
493 | static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks) | ||
494 | { | ||
495 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
496 | bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz; | ||
497 | int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; | ||
498 | bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz; | ||
499 | |||
500 | /* increase clock, looking for div is 0 for current, request div is 1*/ | ||
501 | if (dispclk_increase) { | ||
502 | /* already divided by 2, no need to reach target clk with 2 steps*/ | ||
503 | if (cur_dpp_div) | ||
504 | return new_clocks->dispclk_khz; | ||
505 | |||
506 | /* request disp clk is lower than maximum supported dpp clk, | ||
507 | * no need to reach target clk with two steps. | ||
508 | */ | ||
509 | if (new_clocks->dispclk_khz <= disp_clk_threshold) | ||
510 | return new_clocks->dispclk_khz; | ||
511 | |||
512 | /* target dpp clk not request divided by 2, still within threshold */ | ||
513 | if (!request_dpp_div) | ||
514 | return new_clocks->dispclk_khz; | ||
515 | |||
516 | } else { | ||
517 | /* decrease clock, looking for current dppclk divided by 2, | ||
518 | * request dppclk not divided by 2. | ||
519 | */ | ||
520 | |||
521 | /* current dpp clk not divided by 2, no need to ramp*/ | ||
522 | if (!cur_dpp_div) | ||
523 | return new_clocks->dispclk_khz; | ||
524 | |||
525 | /* current disp clk is lower than current maximum dpp clk, | ||
526 | * no need to ramp | ||
527 | */ | ||
528 | if (dccg->clks.dispclk_khz <= disp_clk_threshold) | ||
529 | return new_clocks->dispclk_khz; | ||
530 | |||
531 | /* request dpp clk need to be divided by 2 */ | ||
532 | if (request_dpp_div) | ||
533 | return new_clocks->dispclk_khz; | ||
534 | } | ||
535 | |||
536 | return disp_clk_threshold; | ||
537 | } | ||
538 | |||
539 | static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks) | ||
540 | { | ||
541 | struct dc *dc = dccg->ctx->dc; | ||
542 | int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks); | ||
543 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
544 | int i; | ||
545 | |||
546 | /* set disp clk to dpp clk threshold */ | ||
547 | dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold); | ||
548 | |||
549 | /* update request dpp clk division option */ | ||
550 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
551 | struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | ||
552 | |||
553 | if (!pipe_ctx->plane_state) | ||
554 | continue; | ||
555 | |||
556 | pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( | ||
557 | pipe_ctx->plane_res.dpp, | ||
558 | request_dpp_div, | ||
559 | true); | ||
560 | } | ||
561 | |||
562 | /* If target clk not same as dppclk threshold, set to target clock */ | ||
563 | if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) | ||
564 | dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); | ||
565 | |||
566 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
567 | dccg->clks.dppclk_khz = new_clocks->dppclk_khz; | ||
568 | dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; | ||
569 | } | ||
570 | |||
571 | static void dcn1_update_clocks(struct dccg *dccg, | ||
572 | struct dc_clocks *new_clocks, | ||
573 | bool safe_to_lower) | ||
574 | { | ||
575 | struct dc *dc = dccg->ctx->dc; | ||
576 | struct pp_smu_display_requirement_rv *smu_req_cur = | ||
577 | &dc->res_pool->pp_smu_req; | ||
578 | struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; | ||
579 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
580 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
581 | bool send_request_to_increase = false; | ||
582 | bool send_request_to_lower = false; | ||
583 | |||
584 | if (new_clocks->phyclk_khz) | ||
585 | smu_req.display_count = 1; | ||
586 | else | ||
587 | smu_req.display_count = 0; | ||
588 | |||
589 | if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz | ||
590 | || new_clocks->phyclk_khz > dccg->clks.phyclk_khz | ||
591 | || new_clocks->fclk_khz > dccg->clks.fclk_khz | ||
592 | || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) | ||
593 | send_request_to_increase = true; | ||
594 | |||
595 | if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) { | ||
596 | dccg->clks.phyclk_khz = new_clocks->phyclk_khz; | ||
597 | |||
598 | send_request_to_lower = true; | ||
599 | } | ||
600 | |||
601 | if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) { | ||
602 | dccg->clks.fclk_khz = new_clocks->fclk_khz; | ||
603 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; | ||
604 | clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; | ||
605 | smu_req.hard_min_fclk_khz = new_clocks->fclk_khz; | ||
606 | |||
607 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
608 | send_request_to_lower = true; | ||
609 | } | ||
610 | |||
611 | if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) { | ||
612 | dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz; | ||
613 | smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz; | ||
614 | |||
615 | send_request_to_lower = true; | ||
616 | } | ||
617 | |||
618 | if (should_set_clock(safe_to_lower, | ||
619 | new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) { | ||
620 | dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; | ||
621 | smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz; | ||
622 | |||
623 | send_request_to_lower = true; | ||
624 | } | ||
625 | |||
626 | /* make sure dcf clk is before dpp clk to | ||
627 | * make sure we have enough voltage to run dpp clk | ||
628 | */ | ||
629 | if (send_request_to_increase) { | ||
630 | /*use dcfclk to request voltage*/ | ||
631 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
632 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
633 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
634 | if (pp_smu->set_display_requirement) | ||
635 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
636 | } | ||
637 | |||
638 | /* dcn1 dppclk is tied to dispclk */ | ||
639 | /* program dispclk on = as a w/a for sleep resume clock ramping issues */ | ||
640 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz) | ||
641 | || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) { | ||
642 | dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks); | ||
643 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
644 | |||
645 | send_request_to_lower = true; | ||
646 | } | ||
647 | |||
648 | if (!send_request_to_increase && send_request_to_lower) { | ||
649 | /*use dcfclk to request voltage*/ | ||
650 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
651 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
652 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
653 | if (pp_smu->set_display_requirement) | ||
654 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
655 | } | ||
656 | |||
657 | |||
658 | *smu_req_cur = smu_req; | ||
659 | } | ||
660 | #endif | ||
661 | |||
662 | static void dce_update_clocks(struct dccg *dccg, | ||
663 | struct dc_clocks *new_clocks, | ||
664 | bool safe_to_lower) | ||
665 | { | ||
666 | struct dm_pp_power_level_change_request level_change_req; | ||
667 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); | ||
668 | |||
669 | /* TODO: Investigate why this is needed to fix display corruption. */ | ||
670 | if (!clk_dce->dfs_bypass_active) | ||
671 | new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; | ||
672 | |||
673 | level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks); | ||
674 | /* get max clock state from PPLIB */ | ||
675 | if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower) | ||
676 | || level_change_req.power_level > dccg->cur_min_clks_state) { | ||
677 | if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req)) | ||
678 | dccg->cur_min_clks_state = level_change_req.power_level; | ||
679 | } | ||
680 | |||
681 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { | ||
682 | new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); | ||
683 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
684 | } | ||
685 | } | ||
686 | |||
687 | static bool dce_update_dfs_bypass( | ||
688 | struct dccg *dccg, | ||
689 | struct dc *dc, | ||
690 | struct dc_state *context, | ||
691 | int requested_clock_khz) | ||
692 | { | ||
693 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); | ||
694 | struct resource_context *res_ctx = &context->res_ctx; | ||
695 | enum signal_type signal_type = SIGNAL_TYPE_NONE; | ||
696 | bool was_active = clk_dce->dfs_bypass_active; | ||
697 | int i; | ||
698 | |||
699 | /* Disable DFS bypass by default. */ | ||
700 | clk_dce->dfs_bypass_active = false; | ||
701 | |||
702 | /* Check that DFS bypass is available. */ | ||
703 | if (!clk_dce->dfs_bypass_enabled) | ||
704 | goto update; | ||
705 | |||
706 | /* Check if the requested display clock is below the threshold. */ | ||
707 | if (requested_clock_khz >= 400000) | ||
708 | goto update; | ||
709 | |||
710 | /* DFS-bypass should only be enabled on single stream setups */ | ||
711 | if (context->stream_count != 1) | ||
712 | goto update; | ||
713 | |||
714 | /* Check that the stream's signal type is an embedded panel */ | ||
715 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
716 | if (res_ctx->pipe_ctx[i].stream) { | ||
717 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | ||
718 | |||
719 | signal_type = pipe_ctx->stream->sink->link->connector_signal; | ||
720 | break; | ||
721 | } | ||
722 | } | ||
723 | |||
724 | if (signal_type == SIGNAL_TYPE_EDP || | ||
725 | signal_type == SIGNAL_TYPE_LVDS) | ||
726 | clk_dce->dfs_bypass_active = true; | ||
727 | |||
728 | update: | ||
729 | /* Update the clock state. We don't need to respect safe_to_lower | ||
730 | * because DFS bypass should always be greater than the current | ||
731 | * display clock frequency. | ||
732 | */ | ||
733 | if (was_active != clk_dce->dfs_bypass_active) { | ||
734 | dccg->clks.dispclk_khz = | ||
735 | dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz); | ||
736 | return true; | ||
737 | } | ||
738 | |||
739 | return false; | ||
740 | } | ||
741 | |||
742 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | ||
743 | static const struct display_clock_funcs dcn1_funcs = { | ||
744 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
745 | .set_dispclk = dce112_set_clock, | ||
746 | .update_clocks = dcn1_update_clocks | ||
747 | }; | ||
748 | #endif | ||
749 | |||
750 | static const struct display_clock_funcs dce120_funcs = { | ||
751 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
752 | .set_dispclk = dce112_set_clock, | ||
753 | .update_clocks = dce12_update_clocks | ||
754 | }; | ||
755 | |||
756 | static const struct display_clock_funcs dce112_funcs = { | ||
757 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
758 | .set_dispclk = dce112_set_clock, | ||
759 | .update_clocks = dce_update_clocks | ||
760 | }; | ||
761 | |||
762 | static const struct display_clock_funcs dce110_funcs = { | ||
763 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
764 | .set_dispclk = dce_psr_set_clock, | ||
765 | .update_clocks = dce_update_clocks, | ||
766 | .update_dfs_bypass = dce_update_dfs_bypass | ||
767 | }; | ||
768 | |||
769 | static const struct display_clock_funcs dce_funcs = { | ||
770 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
771 | .set_dispclk = dce_set_clock, | ||
772 | .update_clocks = dce_update_clocks | ||
773 | }; | ||
774 | |||
775 | static void dce_dccg_construct( | ||
776 | struct dce_dccg *clk_dce, | ||
777 | struct dc_context *ctx, | ||
778 | const struct dccg_registers *regs, | ||
779 | const struct dccg_shift *clk_shift, | ||
780 | const struct dccg_mask *clk_mask) | ||
781 | { | ||
782 | struct dccg *base = &clk_dce->base; | ||
783 | |||
784 | base->ctx = ctx; | ||
785 | base->funcs = &dce_funcs; | ||
786 | |||
787 | clk_dce->regs = regs; | ||
788 | clk_dce->clk_shift = clk_shift; | ||
789 | clk_dce->clk_mask = clk_mask; | ||
790 | |||
791 | clk_dce->dfs_bypass_disp_clk = 0; | ||
792 | |||
793 | clk_dce->dprefclk_ss_percentage = 0; | ||
794 | clk_dce->dprefclk_ss_divider = 1000; | ||
795 | clk_dce->ss_on_dprefclk = false; | ||
796 | |||
797 | base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
798 | base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; | ||
799 | |||
800 | dce_clock_read_integrated_info(clk_dce); | ||
801 | dce_clock_read_ss_info(clk_dce); | ||
802 | } | ||
803 | |||
804 | struct dccg *dce_dccg_create( | ||
805 | struct dc_context *ctx, | ||
806 | const struct dccg_registers *regs, | ||
807 | const struct dccg_shift *clk_shift, | ||
808 | const struct dccg_mask *clk_mask) | ||
809 | { | ||
810 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
811 | |||
812 | if (clk_dce == NULL) { | ||
813 | BREAK_TO_DEBUGGER(); | ||
814 | return NULL; | ||
815 | } | ||
816 | |||
817 | memcpy(clk_dce->max_clks_by_state, | ||
818 | dce80_max_clks_by_state, | ||
819 | sizeof(dce80_max_clks_by_state)); | ||
820 | |||
821 | dce_dccg_construct( | ||
822 | clk_dce, ctx, regs, clk_shift, clk_mask); | ||
823 | |||
824 | return &clk_dce->base; | ||
825 | } | ||
826 | |||
827 | struct dccg *dce110_dccg_create( | ||
828 | struct dc_context *ctx, | ||
829 | const struct dccg_registers *regs, | ||
830 | const struct dccg_shift *clk_shift, | ||
831 | const struct dccg_mask *clk_mask) | ||
832 | { | ||
833 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
834 | |||
835 | if (clk_dce == NULL) { | ||
836 | BREAK_TO_DEBUGGER(); | ||
837 | return NULL; | ||
838 | } | ||
839 | |||
840 | memcpy(clk_dce->max_clks_by_state, | ||
841 | dce110_max_clks_by_state, | ||
842 | sizeof(dce110_max_clks_by_state)); | ||
843 | |||
844 | dce_dccg_construct( | ||
845 | clk_dce, ctx, regs, clk_shift, clk_mask); | ||
846 | |||
847 | clk_dce->base.funcs = &dce110_funcs; | ||
848 | |||
849 | return &clk_dce->base; | ||
850 | } | ||
851 | |||
852 | struct dccg *dce112_dccg_create( | ||
853 | struct dc_context *ctx, | ||
854 | const struct dccg_registers *regs, | ||
855 | const struct dccg_shift *clk_shift, | ||
856 | const struct dccg_mask *clk_mask) | ||
857 | { | ||
858 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
859 | |||
860 | if (clk_dce == NULL) { | ||
861 | BREAK_TO_DEBUGGER(); | ||
862 | return NULL; | ||
863 | } | ||
864 | |||
865 | memcpy(clk_dce->max_clks_by_state, | ||
866 | dce112_max_clks_by_state, | ||
867 | sizeof(dce112_max_clks_by_state)); | ||
868 | |||
869 | dce_dccg_construct( | ||
870 | clk_dce, ctx, regs, clk_shift, clk_mask); | ||
871 | |||
872 | clk_dce->base.funcs = &dce112_funcs; | ||
873 | |||
874 | return &clk_dce->base; | ||
875 | } | ||
876 | |||
877 | struct dccg *dce120_dccg_create(struct dc_context *ctx) | ||
878 | { | ||
879 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
880 | |||
881 | if (clk_dce == NULL) { | ||
882 | BREAK_TO_DEBUGGER(); | ||
883 | return NULL; | ||
884 | } | ||
885 | |||
886 | memcpy(clk_dce->max_clks_by_state, | ||
887 | dce120_max_clks_by_state, | ||
888 | sizeof(dce120_max_clks_by_state)); | ||
889 | |||
890 | dce_dccg_construct( | ||
891 | clk_dce, ctx, NULL, NULL, NULL); | ||
892 | |||
893 | clk_dce->dprefclk_khz = 600000; | ||
894 | clk_dce->base.funcs = &dce120_funcs; | ||
895 | |||
896 | return &clk_dce->base; | ||
897 | } | ||
898 | |||
899 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | ||
900 | struct dccg *dcn1_dccg_create(struct dc_context *ctx) | ||
901 | { | ||
902 | struct dc_debug_options *debug = &ctx->dc->debug; | ||
903 | struct dc_bios *bp = ctx->dc_bios; | ||
904 | struct dc_firmware_info fw_info = { { 0 } }; | ||
905 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
906 | |||
907 | if (clk_dce == NULL) { | ||
908 | BREAK_TO_DEBUGGER(); | ||
909 | return NULL; | ||
910 | } | ||
911 | |||
912 | clk_dce->base.ctx = ctx; | ||
913 | clk_dce->base.funcs = &dcn1_funcs; | ||
914 | |||
915 | clk_dce->dfs_bypass_disp_clk = 0; | ||
916 | |||
917 | clk_dce->dprefclk_ss_percentage = 0; | ||
918 | clk_dce->dprefclk_ss_divider = 1000; | ||
919 | clk_dce->ss_on_dprefclk = false; | ||
920 | |||
921 | clk_dce->dprefclk_khz = 600000; | ||
922 | if (bp->integrated_info) | ||
923 | clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; | ||
924 | if (clk_dce->dentist_vco_freq_khz == 0) { | ||
925 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
926 | clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; | ||
927 | if (clk_dce->dentist_vco_freq_khz == 0) | ||
928 | clk_dce->dentist_vco_freq_khz = 3600000; | ||
929 | } | ||
930 | |||
931 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
932 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
933 | clk_dce->dfs_bypass_enabled = true; | ||
934 | |||
935 | dce_clock_read_ss_info(clk_dce); | ||
936 | |||
937 | return &clk_dce->base; | ||
938 | } | ||
939 | #endif | ||
940 | |||
941 | void dce_dccg_destroy(struct dccg **dccg) | ||
942 | { | ||
943 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg); | ||
944 | |||
945 | kfree(clk_dce); | ||
946 | *dccg = NULL; | ||
947 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 64dc75378541..c83a7f05f14c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | |||
@@ -233,6 +233,16 @@ struct dce_hwseq_registers { | |||
233 | uint32_t DOMAIN5_PG_CONFIG; | 233 | uint32_t DOMAIN5_PG_CONFIG; |
234 | uint32_t DOMAIN6_PG_CONFIG; | 234 | uint32_t DOMAIN6_PG_CONFIG; |
235 | uint32_t DOMAIN7_PG_CONFIG; | 235 | uint32_t DOMAIN7_PG_CONFIG; |
236 | uint32_t DOMAIN8_PG_CONFIG; | ||
237 | uint32_t DOMAIN9_PG_CONFIG; | ||
238 | uint32_t DOMAIN10_PG_CONFIG; | ||
239 | uint32_t DOMAIN11_PG_CONFIG; | ||
240 | uint32_t DOMAIN16_PG_CONFIG; | ||
241 | uint32_t DOMAIN17_PG_CONFIG; | ||
242 | uint32_t DOMAIN18_PG_CONFIG; | ||
243 | uint32_t DOMAIN19_PG_CONFIG; | ||
244 | uint32_t DOMAIN20_PG_CONFIG; | ||
245 | uint32_t DOMAIN21_PG_CONFIG; | ||
236 | uint32_t DOMAIN0_PG_STATUS; | 246 | uint32_t DOMAIN0_PG_STATUS; |
237 | uint32_t DOMAIN1_PG_STATUS; | 247 | uint32_t DOMAIN1_PG_STATUS; |
238 | uint32_t DOMAIN2_PG_STATUS; | 248 | uint32_t DOMAIN2_PG_STATUS; |
@@ -241,6 +251,16 @@ struct dce_hwseq_registers { | |||
241 | uint32_t DOMAIN5_PG_STATUS; | 251 | uint32_t DOMAIN5_PG_STATUS; |
242 | uint32_t DOMAIN6_PG_STATUS; | 252 | uint32_t DOMAIN6_PG_STATUS; |
243 | uint32_t DOMAIN7_PG_STATUS; | 253 | uint32_t DOMAIN7_PG_STATUS; |
254 | uint32_t DOMAIN8_PG_STATUS; | ||
255 | uint32_t DOMAIN9_PG_STATUS; | ||
256 | uint32_t DOMAIN10_PG_STATUS; | ||
257 | uint32_t DOMAIN11_PG_STATUS; | ||
258 | uint32_t DOMAIN16_PG_STATUS; | ||
259 | uint32_t DOMAIN17_PG_STATUS; | ||
260 | uint32_t DOMAIN18_PG_STATUS; | ||
261 | uint32_t DOMAIN19_PG_STATUS; | ||
262 | uint32_t DOMAIN20_PG_STATUS; | ||
263 | uint32_t DOMAIN21_PG_STATUS; | ||
244 | uint32_t DIO_MEM_PWR_CTRL; | 264 | uint32_t DIO_MEM_PWR_CTRL; |
245 | uint32_t DCCG_GATE_DISABLE_CNTL; | 265 | uint32_t DCCG_GATE_DISABLE_CNTL; |
246 | uint32_t DCCG_GATE_DISABLE_CNTL2; | 266 | uint32_t DCCG_GATE_DISABLE_CNTL2; |
@@ -262,6 +282,8 @@ struct dce_hwseq_registers { | |||
262 | uint32_t D2VGA_CONTROL; | 282 | uint32_t D2VGA_CONTROL; |
263 | uint32_t D3VGA_CONTROL; | 283 | uint32_t D3VGA_CONTROL; |
264 | uint32_t D4VGA_CONTROL; | 284 | uint32_t D4VGA_CONTROL; |
285 | uint32_t D5VGA_CONTROL; | ||
286 | uint32_t D6VGA_CONTROL; | ||
265 | uint32_t VGA_TEST_CONTROL; | 287 | uint32_t VGA_TEST_CONTROL; |
266 | /* MMHUB registers. read only. temporary hack */ | 288 | /* MMHUB registers. read only. temporary hack */ |
267 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; | 289 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; |
@@ -489,6 +511,26 @@ struct dce_hwseq_registers { | |||
489 | type DOMAIN6_POWER_GATE; \ | 511 | type DOMAIN6_POWER_GATE; \ |
490 | type DOMAIN7_POWER_FORCEON; \ | 512 | type DOMAIN7_POWER_FORCEON; \ |
491 | type DOMAIN7_POWER_GATE; \ | 513 | type DOMAIN7_POWER_GATE; \ |
514 | type DOMAIN8_POWER_FORCEON; \ | ||
515 | type DOMAIN8_POWER_GATE; \ | ||
516 | type DOMAIN9_POWER_FORCEON; \ | ||
517 | type DOMAIN9_POWER_GATE; \ | ||
518 | type DOMAIN10_POWER_FORCEON; \ | ||
519 | type DOMAIN10_POWER_GATE; \ | ||
520 | type DOMAIN11_POWER_FORCEON; \ | ||
521 | type DOMAIN11_POWER_GATE; \ | ||
522 | type DOMAIN16_POWER_FORCEON; \ | ||
523 | type DOMAIN16_POWER_GATE; \ | ||
524 | type DOMAIN17_POWER_FORCEON; \ | ||
525 | type DOMAIN17_POWER_GATE; \ | ||
526 | type DOMAIN18_POWER_FORCEON; \ | ||
527 | type DOMAIN18_POWER_GATE; \ | ||
528 | type DOMAIN19_POWER_FORCEON; \ | ||
529 | type DOMAIN19_POWER_GATE; \ | ||
530 | type DOMAIN20_POWER_FORCEON; \ | ||
531 | type DOMAIN20_POWER_GATE; \ | ||
532 | type DOMAIN21_POWER_FORCEON; \ | ||
533 | type DOMAIN21_POWER_GATE; \ | ||
492 | type DOMAIN0_PGFSM_PWR_STATUS; \ | 534 | type DOMAIN0_PGFSM_PWR_STATUS; \ |
493 | type DOMAIN1_PGFSM_PWR_STATUS; \ | 535 | type DOMAIN1_PGFSM_PWR_STATUS; \ |
494 | type DOMAIN2_PGFSM_PWR_STATUS; \ | 536 | type DOMAIN2_PGFSM_PWR_STATUS; \ |
@@ -497,6 +539,16 @@ struct dce_hwseq_registers { | |||
497 | type DOMAIN5_PGFSM_PWR_STATUS; \ | 539 | type DOMAIN5_PGFSM_PWR_STATUS; \ |
498 | type DOMAIN6_PGFSM_PWR_STATUS; \ | 540 | type DOMAIN6_PGFSM_PWR_STATUS; \ |
499 | type DOMAIN7_PGFSM_PWR_STATUS; \ | 541 | type DOMAIN7_PGFSM_PWR_STATUS; \ |
542 | type DOMAIN8_PGFSM_PWR_STATUS; \ | ||
543 | type DOMAIN9_PGFSM_PWR_STATUS; \ | ||
544 | type DOMAIN10_PGFSM_PWR_STATUS; \ | ||
545 | type DOMAIN11_PGFSM_PWR_STATUS; \ | ||
546 | type DOMAIN16_PGFSM_PWR_STATUS; \ | ||
547 | type DOMAIN17_PGFSM_PWR_STATUS; \ | ||
548 | type DOMAIN18_PGFSM_PWR_STATUS; \ | ||
549 | type DOMAIN19_PGFSM_PWR_STATUS; \ | ||
550 | type DOMAIN20_PGFSM_PWR_STATUS; \ | ||
551 | type DOMAIN21_PGFSM_PWR_STATUS; \ | ||
500 | type DCFCLK_GATE_DIS; \ | 552 | type DCFCLK_GATE_DIS; \ |
501 | type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ | 553 | type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ |
502 | type VGA_TEST_ENABLE; \ | 554 | type VGA_TEST_ENABLE; \ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 366bc8c2c643..3e18ea84b1f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | |||
@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output( | |||
645 | return false; | 645 | return false; |
646 | 646 | ||
647 | /* DCE11 HW does not support 420 */ | 647 | /* DCE11 HW does not support 420 */ |
648 | if (!enc110->base.features.ycbcr420_supported && | 648 | if (!enc110->base.features.hdmi_ycbcr420_supported && |
649 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) | 649 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) |
650 | return false; | 650 | return false; |
651 | 651 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index 74c05e878807..bc50a8e25f4f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c | |||
@@ -105,74 +105,18 @@ bool dce100_enable_display_power_gating( | |||
105 | return false; | 105 | return false; |
106 | } | 106 | } |
107 | 107 | ||
108 | static void dce100_pplib_apply_display_requirements( | 108 | void dce100_prepare_bandwidth( |
109 | struct dc *dc, | ||
110 | struct dc_state *context) | ||
111 | { | ||
112 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
113 | |||
114 | pp_display_cfg->avail_mclk_switch_time_us = | ||
115 | dce110_get_min_vblank_time_us(context); | ||
116 | /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz | ||
117 | / MEMORY_TYPE_MULTIPLIER;*/ | ||
118 | |||
119 | dce110_fill_display_configs(context, pp_display_cfg); | ||
120 | |||
121 | if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( | ||
122 | struct dm_pp_display_configuration)) != 0) | ||
123 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
124 | |||
125 | dc->prev_display_config = *pp_display_cfg; | ||
126 | } | ||
127 | |||
128 | /* unit: in_khz before mode set, get pixel clock from context. ASIC register | ||
129 | * may not be programmed yet | ||
130 | */ | ||
131 | static uint32_t get_max_pixel_clock_for_all_paths( | ||
132 | struct dc *dc, | ||
133 | struct dc_state *context) | ||
134 | { | ||
135 | uint32_t max_pix_clk = 0; | ||
136 | int i; | ||
137 | |||
138 | for (i = 0; i < MAX_PIPES; i++) { | ||
139 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
140 | |||
141 | if (pipe_ctx->stream == NULL) | ||
142 | continue; | ||
143 | |||
144 | /* do not check under lay */ | ||
145 | if (pipe_ctx->top_pipe) | ||
146 | continue; | ||
147 | |||
148 | if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) | ||
149 | max_pix_clk = | ||
150 | pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; | ||
151 | } | ||
152 | return max_pix_clk; | ||
153 | } | ||
154 | |||
155 | void dce100_set_bandwidth( | ||
156 | struct dc *dc, | 109 | struct dc *dc, |
157 | struct dc_state *context, | 110 | struct dc_state *context) |
158 | bool decrease_allowed) | ||
159 | { | 111 | { |
160 | struct dc_clocks req_clks; | ||
161 | |||
162 | req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; | ||
163 | req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); | ||
164 | |||
165 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); | 112 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); |
166 | 113 | ||
167 | dc->res_pool->dccg->funcs->update_clocks( | 114 | dc->res_pool->clk_mgr->funcs->update_clocks( |
168 | dc->res_pool->dccg, | 115 | dc->res_pool->clk_mgr, |
169 | &req_clks, | 116 | context, |
170 | decrease_allowed); | 117 | false); |
171 | |||
172 | dce100_pplib_apply_display_requirements(dc, context); | ||
173 | } | 118 | } |
174 | 119 | ||
175 | |||
176 | /**************************************************************************/ | 120 | /**************************************************************************/ |
177 | 121 | ||
178 | void dce100_hw_sequencer_construct(struct dc *dc) | 122 | void dce100_hw_sequencer_construct(struct dc *dc) |
@@ -180,8 +124,7 @@ void dce100_hw_sequencer_construct(struct dc *dc) | |||
180 | dce110_hw_sequencer_construct(dc); | 124 | dce110_hw_sequencer_construct(dc); |
181 | 125 | ||
182 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; | 126 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; |
183 | dc->hwss.set_bandwidth = dce100_set_bandwidth; | 127 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; |
184 | dc->hwss.pplib_apply_display_requirements = | 128 | dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; |
185 | dce100_pplib_apply_display_requirements; | ||
186 | } | 129 | } |
187 | 130 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index c6ec0ed6ec3d..acd418515346 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h | |||
@@ -33,10 +33,9 @@ struct dc_state; | |||
33 | 33 | ||
34 | void dce100_hw_sequencer_construct(struct dc *dc); | 34 | void dce100_hw_sequencer_construct(struct dc *dc); |
35 | 35 | ||
36 | void dce100_set_bandwidth( | 36 | void dce100_prepare_bandwidth( |
37 | struct dc *dc, | 37 | struct dc *dc, |
38 | struct dc_state *context, | 38 | struct dc_state *context); |
39 | bool decrease_allowed); | ||
40 | 39 | ||
41 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, | 40 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, |
42 | struct dc_bios *dcb, | 41 | struct dc_bios *dcb, |
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 14754a87156c..6ae51a5dfc04 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | |||
@@ -36,11 +36,11 @@ | |||
36 | #include "dce/dce_link_encoder.h" | 36 | #include "dce/dce_link_encoder.h" |
37 | #include "dce/dce_stream_encoder.h" | 37 | #include "dce/dce_stream_encoder.h" |
38 | 38 | ||
39 | #include "dce/dce_clk_mgr.h" | ||
39 | #include "dce/dce_mem_input.h" | 40 | #include "dce/dce_mem_input.h" |
40 | #include "dce/dce_ipp.h" | 41 | #include "dce/dce_ipp.h" |
41 | #include "dce/dce_transform.h" | 42 | #include "dce/dce_transform.h" |
42 | #include "dce/dce_opp.h" | 43 | #include "dce/dce_opp.h" |
43 | #include "dce/dce_clocks.h" | ||
44 | #include "dce/dce_clock_source.h" | 44 | #include "dce/dce_clock_source.h" |
45 | #include "dce/dce_audio.h" | 45 | #include "dce/dce_audio.h" |
46 | #include "dce/dce_hwseq.h" | 46 | #include "dce/dce_hwseq.h" |
@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { | |||
137 | .reg_name = mm ## block ## id ## _ ## reg_name | 137 | .reg_name = mm ## block ## id ## _ ## reg_name |
138 | 138 | ||
139 | 139 | ||
140 | static const struct dccg_registers disp_clk_regs = { | 140 | static const struct clk_mgr_registers disp_clk_regs = { |
141 | CLK_COMMON_REG_LIST_DCE_BASE() | 141 | CLK_COMMON_REG_LIST_DCE_BASE() |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static const struct dccg_shift disp_clk_shift = { | 144 | static const struct clk_mgr_shift disp_clk_shift = { |
145 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 145 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
146 | }; | 146 | }; |
147 | 147 | ||
148 | static const struct dccg_mask disp_clk_mask = { | 148 | static const struct clk_mgr_mask disp_clk_mask = { |
149 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 149 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
150 | }; | 150 | }; |
151 | 151 | ||
@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
722 | dce_aud_destroy(&pool->base.audios[i]); | 722 | dce_aud_destroy(&pool->base.audios[i]); |
723 | } | 723 | } |
724 | 724 | ||
725 | if (pool->base.dccg != NULL) | 725 | if (pool->base.clk_mgr != NULL) |
726 | dce_dccg_destroy(&pool->base.dccg); | 726 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
727 | 727 | ||
728 | if (pool->base.abm != NULL) | 728 | if (pool->base.abm != NULL) |
729 | dce_abm_destroy(&pool->base.abm); | 729 | dce_abm_destroy(&pool->base.abm); |
@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth( | |||
767 | if (at_least_one_pipe) { | 767 | if (at_least_one_pipe) { |
768 | /* TODO implement when needed but for now hardcode max value*/ | 768 | /* TODO implement when needed but for now hardcode max value*/ |
769 | context->bw.dce.dispclk_khz = 681000; | 769 | context->bw.dce.dispclk_khz = 681000; |
770 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; | 770 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; |
771 | } else { | 771 | } else { |
772 | context->bw.dce.dispclk_khz = 0; | 772 | context->bw.dce.dispclk_khz = 0; |
773 | context->bw.dce.yclk_khz = 0; | 773 | context->bw.dce.yclk_khz = 0; |
@@ -860,7 +860,6 @@ static bool construct( | |||
860 | struct dc_context *ctx = dc->ctx; | 860 | struct dc_context *ctx = dc->ctx; |
861 | struct dc_firmware_info info; | 861 | struct dc_firmware_info info; |
862 | struct dc_bios *bp; | 862 | struct dc_bios *bp; |
863 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
864 | 863 | ||
865 | ctx->dc_bios->regs = &bios_regs; | 864 | ctx->dc_bios->regs = &bios_regs; |
866 | 865 | ||
@@ -908,11 +907,11 @@ static bool construct( | |||
908 | } | 907 | } |
909 | } | 908 | } |
910 | 909 | ||
911 | pool->base.dccg = dce_dccg_create(ctx, | 910 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
912 | &disp_clk_regs, | 911 | &disp_clk_regs, |
913 | &disp_clk_shift, | 912 | &disp_clk_shift, |
914 | &disp_clk_mask); | 913 | &disp_clk_mask); |
915 | if (pool->base.dccg == NULL) { | 914 | if (pool->base.clk_mgr == NULL) { |
916 | dm_error("DC: failed to create display clock!\n"); | 915 | dm_error("DC: failed to create display clock!\n"); |
917 | BREAK_TO_DEBUGGER(); | 916 | BREAK_TO_DEBUGGER(); |
918 | goto res_create_fail; | 917 | goto res_create_fail; |
@@ -938,12 +937,6 @@ static bool construct( | |||
938 | goto res_create_fail; | 937 | goto res_create_fail; |
939 | } | 938 | } |
940 | 939 | ||
941 | /* get static clock information for PPLIB or firmware, save | ||
942 | * max_clock_state | ||
943 | */ | ||
944 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
945 | pool->base.dccg->max_clks_state = | ||
946 | static_clk_info.max_clocks_state; | ||
947 | { | 940 | { |
948 | struct irq_service_init_data init_data; | 941 | struct irq_service_init_data init_data; |
949 | init_data.ctx = dc->ctx; | 942 | init_data.ctx = dc->ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b75ede5f84f7..9724a17e352b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, | |||
548 | 548 | ||
549 | regamma_params->hw_points_num = hw_points; | 549 | regamma_params->hw_points_num = hw_points; |
550 | 550 | ||
551 | i = 1; | 551 | k = 0; |
552 | for (k = 0; k < 16 && i < 16; k++) { | 552 | for (i = 1; i < 16; i++) { |
553 | if (seg_distr[k] != -1) { | 553 | if (seg_distr[k] != -1) { |
554 | regamma_params->arr_curve_points[k].segments_num = seg_distr[k]; | 554 | regamma_params->arr_curve_points[k].segments_num = seg_distr[k]; |
555 | regamma_params->arr_curve_points[i].offset = | 555 | regamma_params->arr_curve_points[i].offset = |
556 | regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]); | 556 | regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]); |
557 | } | 557 | } |
558 | i++; | 558 | k++; |
559 | } | 559 | } |
560 | 560 | ||
561 | if (seg_distr[k] != -1) | 561 | if (seg_distr[k] != -1) |
@@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, | |||
1085 | 1085 | ||
1086 | if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { | 1086 | if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { |
1087 | link->dc->hwss.edp_backlight_control(link, true); | 1087 | link->dc->hwss.edp_backlight_control(link, true); |
1088 | stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL; | ||
1089 | } | 1088 | } |
1090 | } | 1089 | } |
1091 | void dce110_blank_stream(struct pipe_ctx *pipe_ctx) | 1090 | void dce110_blank_stream(struct pipe_ctx *pipe_ctx) |
@@ -1192,8 +1191,8 @@ static void build_audio_output( | |||
1192 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || | 1191 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || |
1193 | pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { | 1192 | pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { |
1194 | audio_output->pll_info.dp_dto_source_clock_in_khz = | 1193 | audio_output->pll_info.dp_dto_source_clock_in_khz = |
1195 | state->dis_clk->funcs->get_dp_ref_clk_frequency( | 1194 | state->dccg->funcs->get_dp_ref_clk_frequency( |
1196 | state->dis_clk); | 1195 | state->dccg); |
1197 | } | 1196 | } |
1198 | 1197 | ||
1199 | audio_output->pll_info.feed_back_divider = | 1198 | audio_output->pll_info.feed_back_divider = |
@@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) | |||
1547 | int i; | 1546 | int i; |
1548 | struct dc_link *edp_link_to_turnoff = NULL; | 1547 | struct dc_link *edp_link_to_turnoff = NULL; |
1549 | struct dc_link *edp_link = get_link_for_edp(dc); | 1548 | struct dc_link *edp_link = get_link_for_edp(dc); |
1549 | struct dc_bios *bios = dc->ctx->dc_bios; | ||
1550 | bool can_edp_fast_boot_optimize = false; | 1550 | bool can_edp_fast_boot_optimize = false; |
1551 | bool apply_edp_fast_boot_optimization = false; | 1551 | bool apply_edp_fast_boot_optimization = false; |
1552 | 1552 | ||
@@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) | |||
1573 | if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { | 1573 | if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { |
1574 | context->streams[i]->apply_edp_fast_boot_optimization = true; | 1574 | context->streams[i]->apply_edp_fast_boot_optimization = true; |
1575 | apply_edp_fast_boot_optimization = true; | 1575 | apply_edp_fast_boot_optimization = true; |
1576 | |||
1577 | /* When after S4 and S5, vbios may post edp and previous dpms_off | ||
1578 | * doesn't make sense. | ||
1579 | * Update dpms_off state to align hw and sw state via check | ||
1580 | * vBios scratch register. | ||
1581 | */ | ||
1582 | if (bios->funcs->is_active_display) { | ||
1583 | const struct connector_device_tag_info *device_tag = &(edp_link->device_tag); | ||
1584 | |||
1585 | if (bios->funcs->is_active_display(bios, | ||
1586 | context->streams[i]->signal, | ||
1587 | device_tag)) | ||
1588 | context->streams[i]->dpms_off = false; | ||
1589 | } | ||
1576 | } | 1590 | } |
1577 | } | 1591 | } |
1578 | } | 1592 | } |
@@ -1736,41 +1750,18 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx, | |||
1736 | if (events->force_trigger) | 1750 | if (events->force_trigger) |
1737 | value |= 0x1; | 1751 | value |= 0x1; |
1738 | 1752 | ||
1739 | value |= 0x84; | 1753 | if (num_pipes) { |
1754 | struct dc *dc = pipe_ctx[0]->stream->ctx->dc; | ||
1755 | |||
1756 | if (dc->fbc_compressor) | ||
1757 | value |= 0x84; | ||
1758 | } | ||
1740 | 1759 | ||
1741 | for (i = 0; i < num_pipes; i++) | 1760 | for (i = 0; i < num_pipes; i++) |
1742 | pipe_ctx[i]->stream_res.tg->funcs-> | 1761 | pipe_ctx[i]->stream_res.tg->funcs-> |
1743 | set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); | 1762 | set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); |
1744 | } | 1763 | } |
1745 | 1764 | ||
1746 | /* unit: in_khz before mode set, get pixel clock from context. ASIC register | ||
1747 | * may not be programmed yet | ||
1748 | */ | ||
1749 | static uint32_t get_max_pixel_clock_for_all_paths( | ||
1750 | struct dc *dc, | ||
1751 | struct dc_state *context) | ||
1752 | { | ||
1753 | uint32_t max_pix_clk = 0; | ||
1754 | int i; | ||
1755 | |||
1756 | for (i = 0; i < MAX_PIPES; i++) { | ||
1757 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
1758 | |||
1759 | if (pipe_ctx->stream == NULL) | ||
1760 | continue; | ||
1761 | |||
1762 | /* do not check under lay */ | ||
1763 | if (pipe_ctx->top_pipe) | ||
1764 | continue; | ||
1765 | |||
1766 | if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) | ||
1767 | max_pix_clk = | ||
1768 | pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; | ||
1769 | } | ||
1770 | |||
1771 | return max_pix_clk; | ||
1772 | } | ||
1773 | |||
1774 | /* | 1765 | /* |
1775 | * Check if FBC can be enabled | 1766 | * Check if FBC can be enabled |
1776 | */ | 1767 | */ |
@@ -2380,191 +2371,33 @@ static void init_hw(struct dc *dc) | |||
2380 | 2371 | ||
2381 | } | 2372 | } |
2382 | 2373 | ||
2383 | void dce110_fill_display_configs( | ||
2384 | const struct dc_state *context, | ||
2385 | struct dm_pp_display_configuration *pp_display_cfg) | ||
2386 | { | ||
2387 | int j; | ||
2388 | int num_cfgs = 0; | ||
2389 | |||
2390 | for (j = 0; j < context->stream_count; j++) { | ||
2391 | int k; | ||
2392 | |||
2393 | const struct dc_stream_state *stream = context->streams[j]; | ||
2394 | struct dm_pp_single_disp_config *cfg = | ||
2395 | &pp_display_cfg->disp_configs[num_cfgs]; | ||
2396 | const struct pipe_ctx *pipe_ctx = NULL; | ||
2397 | |||
2398 | for (k = 0; k < MAX_PIPES; k++) | ||
2399 | if (stream == context->res_ctx.pipe_ctx[k].stream) { | ||
2400 | pipe_ctx = &context->res_ctx.pipe_ctx[k]; | ||
2401 | break; | ||
2402 | } | ||
2403 | |||
2404 | ASSERT(pipe_ctx != NULL); | ||
2405 | |||
2406 | /* only notify active stream */ | ||
2407 | if (stream->dpms_off) | ||
2408 | continue; | ||
2409 | |||
2410 | num_cfgs++; | ||
2411 | cfg->signal = pipe_ctx->stream->signal; | ||
2412 | cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; | ||
2413 | cfg->src_height = stream->src.height; | ||
2414 | cfg->src_width = stream->src.width; | ||
2415 | cfg->ddi_channel_mapping = | ||
2416 | stream->sink->link->ddi_channel_mapping.raw; | ||
2417 | cfg->transmitter = | ||
2418 | stream->sink->link->link_enc->transmitter; | ||
2419 | cfg->link_settings.lane_count = | ||
2420 | stream->sink->link->cur_link_settings.lane_count; | ||
2421 | cfg->link_settings.link_rate = | ||
2422 | stream->sink->link->cur_link_settings.link_rate; | ||
2423 | cfg->link_settings.link_spread = | ||
2424 | stream->sink->link->cur_link_settings.link_spread; | ||
2425 | cfg->sym_clock = stream->phy_pix_clk; | ||
2426 | /* Round v_refresh*/ | ||
2427 | cfg->v_refresh = stream->timing.pix_clk_khz * 1000; | ||
2428 | cfg->v_refresh /= stream->timing.h_total; | ||
2429 | cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) | ||
2430 | / stream->timing.v_total; | ||
2431 | } | ||
2432 | |||
2433 | pp_display_cfg->display_count = num_cfgs; | ||
2434 | } | ||
2435 | |||
2436 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) | ||
2437 | { | ||
2438 | uint8_t j; | ||
2439 | uint32_t min_vertical_blank_time = -1; | ||
2440 | |||
2441 | for (j = 0; j < context->stream_count; j++) { | ||
2442 | struct dc_stream_state *stream = context->streams[j]; | ||
2443 | uint32_t vertical_blank_in_pixels = 0; | ||
2444 | uint32_t vertical_blank_time = 0; | ||
2445 | |||
2446 | vertical_blank_in_pixels = stream->timing.h_total * | ||
2447 | (stream->timing.v_total | ||
2448 | - stream->timing.v_addressable); | ||
2449 | |||
2450 | vertical_blank_time = vertical_blank_in_pixels | ||
2451 | * 1000 / stream->timing.pix_clk_khz; | ||
2452 | |||
2453 | if (min_vertical_blank_time > vertical_blank_time) | ||
2454 | min_vertical_blank_time = vertical_blank_time; | ||
2455 | } | ||
2456 | |||
2457 | return min_vertical_blank_time; | ||
2458 | } | ||
2459 | |||
2460 | static int determine_sclk_from_bounding_box( | ||
2461 | const struct dc *dc, | ||
2462 | int required_sclk) | ||
2463 | { | ||
2464 | int i; | ||
2465 | 2374 | ||
2466 | /* | 2375 | void dce110_prepare_bandwidth( |
2467 | * Some asics do not give us sclk levels, so we just report the actual | 2376 | struct dc *dc, |
2468 | * required sclk | 2377 | struct dc_state *context) |
2469 | */ | ||
2470 | if (dc->sclk_lvls.num_levels == 0) | ||
2471 | return required_sclk; | ||
2472 | |||
2473 | for (i = 0; i < dc->sclk_lvls.num_levels; i++) { | ||
2474 | if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) | ||
2475 | return dc->sclk_lvls.clocks_in_khz[i]; | ||
2476 | } | ||
2477 | /* | ||
2478 | * even maximum level could not satisfy requirement, this | ||
2479 | * is unexpected at this stage, should have been caught at | ||
2480 | * validation time | ||
2481 | */ | ||
2482 | ASSERT(0); | ||
2483 | return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; | ||
2484 | } | ||
2485 | |||
2486 | static void pplib_apply_display_requirements( | ||
2487 | struct dc *dc, | ||
2488 | struct dc_state *context) | ||
2489 | { | 2378 | { |
2490 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | 2379 | struct clk_mgr *dccg = dc->res_pool->clk_mgr; |
2491 | 2380 | ||
2492 | pp_display_cfg->all_displays_in_sync = | 2381 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); |
2493 | context->bw.dce.all_displays_in_sync; | ||
2494 | pp_display_cfg->nb_pstate_switch_disable = | ||
2495 | context->bw.dce.nbp_state_change_enable == false; | ||
2496 | pp_display_cfg->cpu_cc6_disable = | ||
2497 | context->bw.dce.cpuc_state_change_enable == false; | ||
2498 | pp_display_cfg->cpu_pstate_disable = | ||
2499 | context->bw.dce.cpup_state_change_enable == false; | ||
2500 | pp_display_cfg->cpu_pstate_separation_time = | ||
2501 | context->bw.dce.blackout_recovery_time_us; | ||
2502 | 2382 | ||
2503 | pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz | 2383 | dccg->funcs->update_clocks( |
2504 | / MEMORY_TYPE_MULTIPLIER; | 2384 | dccg, |
2505 | 2385 | context, | |
2506 | pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( | 2386 | false); |
2507 | dc, | ||
2508 | context->bw.dce.sclk_khz); | ||
2509 | |||
2510 | pp_display_cfg->min_engine_clock_deep_sleep_khz | ||
2511 | = context->bw.dce.sclk_deep_sleep_khz; | ||
2512 | |||
2513 | pp_display_cfg->avail_mclk_switch_time_us = | ||
2514 | dce110_get_min_vblank_time_us(context); | ||
2515 | /* TODO: dce11.2*/ | ||
2516 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; | ||
2517 | |||
2518 | pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz; | ||
2519 | |||
2520 | dce110_fill_display_configs(context, pp_display_cfg); | ||
2521 | |||
2522 | /* TODO: is this still applicable?*/ | ||
2523 | if (pp_display_cfg->display_count == 1) { | ||
2524 | const struct dc_crtc_timing *timing = | ||
2525 | &context->streams[0]->timing; | ||
2526 | |||
2527 | pp_display_cfg->crtc_index = | ||
2528 | pp_display_cfg->disp_configs[0].pipe_idx; | ||
2529 | pp_display_cfg->line_time_in_us = timing->h_total * 1000 | ||
2530 | / timing->pix_clk_khz; | ||
2531 | } | ||
2532 | |||
2533 | if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( | ||
2534 | struct dm_pp_display_configuration)) != 0) | ||
2535 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
2536 | |||
2537 | dc->prev_display_config = *pp_display_cfg; | ||
2538 | } | 2387 | } |
2539 | 2388 | ||
2540 | static void dce110_set_bandwidth( | 2389 | void dce110_optimize_bandwidth( |
2541 | struct dc *dc, | 2390 | struct dc *dc, |
2542 | struct dc_state *context, | 2391 | struct dc_state *context) |
2543 | bool decrease_allowed) | ||
2544 | { | 2392 | { |
2545 | struct dc_clocks req_clks; | 2393 | struct clk_mgr *dccg = dc->res_pool->clk_mgr; |
2546 | struct dccg *dccg = dc->res_pool->dccg; | ||
2547 | |||
2548 | req_clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
2549 | req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); | ||
2550 | |||
2551 | if (decrease_allowed) | ||
2552 | dce110_set_displaymarks(dc, context); | ||
2553 | else | ||
2554 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); | ||
2555 | 2394 | ||
2556 | if (dccg->funcs->update_dfs_bypass) | 2395 | dce110_set_displaymarks(dc, context); |
2557 | dccg->funcs->update_dfs_bypass( | ||
2558 | dccg, | ||
2559 | dc, | ||
2560 | context, | ||
2561 | req_clks.dispclk_khz); | ||
2562 | 2396 | ||
2563 | dccg->funcs->update_clocks( | 2397 | dccg->funcs->update_clocks( |
2564 | dccg, | 2398 | dccg, |
2565 | &req_clks, | 2399 | context, |
2566 | decrease_allowed); | 2400 | true); |
2567 | pplib_apply_display_requirements(dc, context); | ||
2568 | } | 2401 | } |
2569 | 2402 | ||
2570 | static void dce110_program_front_end_for_pipe( | 2403 | static void dce110_program_front_end_for_pipe( |
@@ -2769,28 +2602,6 @@ static void dce110_wait_for_mpcc_disconnect( | |||
2769 | /* do nothing*/ | 2602 | /* do nothing*/ |
2770 | } | 2603 | } |
2771 | 2604 | ||
2772 | static void program_csc_matrix(struct pipe_ctx *pipe_ctx, | ||
2773 | enum dc_color_space colorspace, | ||
2774 | uint16_t *matrix) | ||
2775 | { | ||
2776 | int i; | ||
2777 | struct out_csc_color_matrix tbl_entry; | ||
2778 | |||
2779 | if (pipe_ctx->stream->csc_color_matrix.enable_adjustment | ||
2780 | == true) { | ||
2781 | enum dc_color_space color_space = | ||
2782 | pipe_ctx->stream->output_color_space; | ||
2783 | |||
2784 | //uint16_t matrix[12]; | ||
2785 | for (i = 0; i < 12; i++) | ||
2786 | tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i]; | ||
2787 | |||
2788 | tbl_entry.color_space = color_space; | ||
2789 | //tbl_entry.regval = matrix; | ||
2790 | pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry); | ||
2791 | } | ||
2792 | } | ||
2793 | |||
2794 | void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) | 2605 | void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) |
2795 | { | 2606 | { |
2796 | struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; | 2607 | struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; |
@@ -2839,13 +2650,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) | |||
2839 | pipe_ctx->plane_res.xfm, attributes); | 2650 | pipe_ctx->plane_res.xfm, attributes); |
2840 | } | 2651 | } |
2841 | 2652 | ||
2842 | static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} | ||
2843 | |||
2844 | static void optimize_shared_resources(struct dc *dc) {} | ||
2845 | |||
2846 | static const struct hw_sequencer_funcs dce110_funcs = { | 2653 | static const struct hw_sequencer_funcs dce110_funcs = { |
2847 | .program_gamut_remap = program_gamut_remap, | 2654 | .program_gamut_remap = program_gamut_remap, |
2848 | .program_csc_matrix = program_csc_matrix, | ||
2849 | .init_hw = init_hw, | 2655 | .init_hw = init_hw, |
2850 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, | 2656 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, |
2851 | .apply_ctx_for_surface = dce110_apply_ctx_for_surface, | 2657 | .apply_ctx_for_surface = dce110_apply_ctx_for_surface, |
@@ -2868,7 +2674,8 @@ static const struct hw_sequencer_funcs dce110_funcs = { | |||
2868 | .enable_display_power_gating = dce110_enable_display_power_gating, | 2674 | .enable_display_power_gating = dce110_enable_display_power_gating, |
2869 | .disable_plane = dce110_power_down_fe, | 2675 | .disable_plane = dce110_power_down_fe, |
2870 | .pipe_control_lock = dce_pipe_control_lock, | 2676 | .pipe_control_lock = dce_pipe_control_lock, |
2871 | .set_bandwidth = dce110_set_bandwidth, | 2677 | .prepare_bandwidth = dce110_prepare_bandwidth, |
2678 | .optimize_bandwidth = dce110_optimize_bandwidth, | ||
2872 | .set_drr = set_drr, | 2679 | .set_drr = set_drr, |
2873 | .get_position = get_position, | 2680 | .get_position = get_position, |
2874 | .set_static_screen_control = set_static_screen_control, | 2681 | .set_static_screen_control = set_static_screen_control, |
@@ -2877,9 +2684,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { | |||
2877 | .setup_stereo = NULL, | 2684 | .setup_stereo = NULL, |
2878 | .set_avmute = dce110_set_avmute, | 2685 | .set_avmute = dce110_set_avmute, |
2879 | .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, | 2686 | .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, |
2880 | .ready_shared_resources = ready_shared_resources, | ||
2881 | .optimize_shared_resources = optimize_shared_resources, | ||
2882 | .pplib_apply_display_requirements = pplib_apply_display_requirements, | ||
2883 | .edp_backlight_control = hwss_edp_backlight_control, | 2687 | .edp_backlight_control = hwss_edp_backlight_control, |
2884 | .edp_power_control = hwss_edp_power_control, | 2688 | .edp_power_control = hwss_edp_power_control, |
2885 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, | 2689 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index d6db3dbd9015..cd3e36d52a52 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h | |||
@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw( | |||
40 | struct dc_state *context); | 40 | struct dc_state *context); |
41 | 41 | ||
42 | 42 | ||
43 | |||
44 | void dce110_enable_stream(struct pipe_ctx *pipe_ctx); | 43 | void dce110_enable_stream(struct pipe_ctx *pipe_ctx); |
45 | 44 | ||
46 | void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option); | 45 | void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option); |
@@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks( | |||
64 | struct resource_context *res_ctx, | 63 | struct resource_context *res_ctx, |
65 | const struct resource_pool *pool); | 64 | const struct resource_pool *pool); |
66 | 65 | ||
67 | void dce110_fill_display_configs( | 66 | void dce110_prepare_bandwidth( |
68 | const struct dc_state *context, | 67 | struct dc *dc, |
69 | struct dm_pp_display_configuration *pp_display_cfg); | 68 | struct dc_state *context); |
70 | 69 | ||
71 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); | 70 | void dce110_optimize_bandwidth( |
71 | struct dc *dc, | ||
72 | struct dc_state *context); | ||
72 | 73 | ||
73 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); | 74 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); |
74 | 75 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e3624ca24574..e33d11785b1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "resource.h" | 31 | #include "resource.h" |
32 | #include "dce110/dce110_resource.h" | 32 | #include "dce110/dce110_resource.h" |
33 | 33 | ||
34 | #include "dce/dce_clk_mgr.h" | ||
34 | #include "include/irq_service_interface.h" | 35 | #include "include/irq_service_interface.h" |
35 | #include "dce/dce_audio.h" | 36 | #include "dce/dce_audio.h" |
36 | #include "dce110/dce110_timing_generator.h" | 37 | #include "dce110/dce110_timing_generator.h" |
@@ -45,7 +46,6 @@ | |||
45 | #include "dce110/dce110_transform_v.h" | 46 | #include "dce110/dce110_transform_v.h" |
46 | #include "dce/dce_opp.h" | 47 | #include "dce/dce_opp.h" |
47 | #include "dce110/dce110_opp_v.h" | 48 | #include "dce110/dce110_opp_v.h" |
48 | #include "dce/dce_clocks.h" | ||
49 | #include "dce/dce_clock_source.h" | 49 | #include "dce/dce_clock_source.h" |
50 | #include "dce/dce_hwseq.h" | 50 | #include "dce/dce_hwseq.h" |
51 | #include "dce110/dce110_hw_sequencer.h" | 51 | #include "dce110/dce110_hw_sequencer.h" |
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { | |||
148 | #define SRI(reg_name, block, id)\ | 148 | #define SRI(reg_name, block, id)\ |
149 | .reg_name = mm ## block ## id ## _ ## reg_name | 149 | .reg_name = mm ## block ## id ## _ ## reg_name |
150 | 150 | ||
151 | static const struct dccg_registers disp_clk_regs = { | 151 | static const struct clk_mgr_registers disp_clk_regs = { |
152 | CLK_COMMON_REG_LIST_DCE_BASE() | 152 | CLK_COMMON_REG_LIST_DCE_BASE() |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static const struct dccg_shift disp_clk_shift = { | 155 | static const struct clk_mgr_shift disp_clk_shift = { |
156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static const struct dccg_mask disp_clk_mask = { | 159 | static const struct clk_mgr_mask disp_clk_mask = { |
160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
161 | }; | 161 | }; |
162 | 162 | ||
@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
760 | if (pool->base.dmcu != NULL) | 760 | if (pool->base.dmcu != NULL) |
761 | dce_dmcu_destroy(&pool->base.dmcu); | 761 | dce_dmcu_destroy(&pool->base.dmcu); |
762 | 762 | ||
763 | if (pool->base.dccg != NULL) | 763 | if (pool->base.clk_mgr != NULL) |
764 | dce_dccg_destroy(&pool->base.dccg); | 764 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
765 | 765 | ||
766 | if (pool->base.irqs != NULL) { | 766 | if (pool->base.irqs != NULL) { |
767 | dal_irq_service_destroy(&pool->base.irqs); | 767 | dal_irq_service_destroy(&pool->base.irqs); |
@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
1173 | &clks); | 1173 | &clks); |
1174 | 1174 | ||
1175 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 1175 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
1176 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000); | 1176 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
1177 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 1177 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
1178 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER, | 1178 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1179 | 1000); | 1179 | 1000); |
1180 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 1180 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
1181 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER, | 1181 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1182 | 1000); | 1182 | 1000); |
1183 | } | 1183 | } |
1184 | 1184 | ||
@@ -1201,7 +1201,6 @@ static bool construct( | |||
1201 | struct dc_context *ctx = dc->ctx; | 1201 | struct dc_context *ctx = dc->ctx; |
1202 | struct dc_firmware_info info; | 1202 | struct dc_firmware_info info; |
1203 | struct dc_bios *bp; | 1203 | struct dc_bios *bp; |
1204 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1205 | 1204 | ||
1206 | ctx->dc_bios->regs = &bios_regs; | 1205 | ctx->dc_bios->regs = &bios_regs; |
1207 | 1206 | ||
@@ -1257,11 +1256,11 @@ static bool construct( | |||
1257 | } | 1256 | } |
1258 | } | 1257 | } |
1259 | 1258 | ||
1260 | pool->base.dccg = dce110_dccg_create(ctx, | 1259 | pool->base.clk_mgr = dce110_clk_mgr_create(ctx, |
1261 | &disp_clk_regs, | 1260 | &disp_clk_regs, |
1262 | &disp_clk_shift, | 1261 | &disp_clk_shift, |
1263 | &disp_clk_mask); | 1262 | &disp_clk_mask); |
1264 | if (pool->base.dccg == NULL) { | 1263 | if (pool->base.clk_mgr == NULL) { |
1265 | dm_error("DC: failed to create display clock!\n"); | 1264 | dm_error("DC: failed to create display clock!\n"); |
1266 | BREAK_TO_DEBUGGER(); | 1265 | BREAK_TO_DEBUGGER(); |
1267 | goto res_create_fail; | 1266 | goto res_create_fail; |
@@ -1287,13 +1286,6 @@ static bool construct( | |||
1287 | goto res_create_fail; | 1286 | goto res_create_fail; |
1288 | } | 1287 | } |
1289 | 1288 | ||
1290 | /* get static clock information for PPLIB or firmware, save | ||
1291 | * max_clock_state | ||
1292 | */ | ||
1293 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1294 | pool->base.dccg->max_clks_state = | ||
1295 | static_clk_info.max_clocks_state; | ||
1296 | |||
1297 | { | 1289 | { |
1298 | struct irq_service_init_data init_data; | 1290 | struct irq_service_init_data init_data; |
1299 | init_data.ctx = dc->ctx; | 1291 | init_data.ctx = dc->ctx; |
@@ -1362,7 +1354,8 @@ static bool construct( | |||
1362 | pool->base.sw_i2cs[i] = NULL; | 1354 | pool->base.sw_i2cs[i] = NULL; |
1363 | } | 1355 | } |
1364 | 1356 | ||
1365 | dc->fbc_compressor = dce110_compressor_create(ctx); | 1357 | if (dc->config.fbc_support) |
1358 | dc->fbc_compressor = dce110_compressor_create(ctx); | ||
1366 | 1359 | ||
1367 | if (!underlay_create(ctx, &pool->base)) | 1360 | if (!underlay_create(ctx, &pool->base)) |
1368 | goto res_create_fail; | 1361 | goto res_create_fail; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 3ce79c208ddf..969d4e72dc94 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include "irq/dce110/irq_service_dce110.h" | 36 | #include "irq/dce110/irq_service_dce110.h" |
37 | 37 | ||
38 | #include "dce/dce_clk_mgr.h" | ||
38 | #include "dce/dce_mem_input.h" | 39 | #include "dce/dce_mem_input.h" |
39 | #include "dce/dce_transform.h" | 40 | #include "dce/dce_transform.h" |
40 | #include "dce/dce_link_encoder.h" | 41 | #include "dce/dce_link_encoder.h" |
@@ -42,7 +43,6 @@ | |||
42 | #include "dce/dce_audio.h" | 43 | #include "dce/dce_audio.h" |
43 | #include "dce/dce_opp.h" | 44 | #include "dce/dce_opp.h" |
44 | #include "dce/dce_ipp.h" | 45 | #include "dce/dce_ipp.h" |
45 | #include "dce/dce_clocks.h" | ||
46 | #include "dce/dce_clock_source.h" | 46 | #include "dce/dce_clock_source.h" |
47 | 47 | ||
48 | #include "dce/dce_hwseq.h" | 48 | #include "dce/dce_hwseq.h" |
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = { | |||
148 | .reg_name = mm ## block ## id ## _ ## reg_name | 148 | .reg_name = mm ## block ## id ## _ ## reg_name |
149 | 149 | ||
150 | 150 | ||
151 | static const struct dccg_registers disp_clk_regs = { | 151 | static const struct clk_mgr_registers disp_clk_regs = { |
152 | CLK_COMMON_REG_LIST_DCE_BASE() | 152 | CLK_COMMON_REG_LIST_DCE_BASE() |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static const struct dccg_shift disp_clk_shift = { | 155 | static const struct clk_mgr_shift disp_clk_shift = { |
156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static const struct dccg_mask disp_clk_mask = { | 159 | static const struct clk_mgr_mask disp_clk_mask = { |
160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
161 | }; | 161 | }; |
162 | 162 | ||
@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create( | |||
551 | static const struct encoder_feature_support link_enc_feature = { | 551 | static const struct encoder_feature_support link_enc_feature = { |
552 | .max_hdmi_deep_color = COLOR_DEPTH_121212, | 552 | .max_hdmi_deep_color = COLOR_DEPTH_121212, |
553 | .max_hdmi_pixel_clock = 600000, | 553 | .max_hdmi_pixel_clock = 600000, |
554 | .ycbcr420_supported = true, | 554 | .hdmi_ycbcr420_supported = true, |
555 | .dp_ycbcr420_supported = false, | ||
555 | .flags.bits.IS_HBR2_CAPABLE = true, | 556 | .flags.bits.IS_HBR2_CAPABLE = true, |
556 | .flags.bits.IS_HBR3_CAPABLE = true, | 557 | .flags.bits.IS_HBR3_CAPABLE = true, |
557 | .flags.bits.IS_TPS3_CAPABLE = true, | 558 | .flags.bits.IS_TPS3_CAPABLE = true, |
@@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
749 | if (pool->base.dmcu != NULL) | 750 | if (pool->base.dmcu != NULL) |
750 | dce_dmcu_destroy(&pool->base.dmcu); | 751 | dce_dmcu_destroy(&pool->base.dmcu); |
751 | 752 | ||
752 | if (pool->base.dccg != NULL) | 753 | if (pool->base.clk_mgr != NULL) |
753 | dce_dccg_destroy(&pool->base.dccg); | 754 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
754 | 755 | ||
755 | if (pool->base.irqs != NULL) { | 756 | if (pool->base.irqs != NULL) { |
756 | dal_irq_service_destroy(&pool->base.irqs); | 757 | dal_irq_service_destroy(&pool->base.irqs); |
@@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
1015 | &clks); | 1016 | &clks); |
1016 | 1017 | ||
1017 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 1018 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
1018 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000); | 1019 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
1019 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 1020 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
1020 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER, | 1021 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1021 | 1000); | 1022 | 1000); |
1022 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 1023 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
1023 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER, | 1024 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1024 | 1000); | 1025 | 1000); |
1025 | 1026 | ||
1026 | return; | 1027 | return; |
@@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
1056 | * YCLK = UMACLK*m_memoryTypeMultiplier | 1057 | * YCLK = UMACLK*m_memoryTypeMultiplier |
1057 | */ | 1058 | */ |
1058 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 1059 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
1059 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000); | 1060 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
1060 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 1061 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
1061 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 1062 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
1062 | 1000); | 1063 | 1000); |
1063 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 1064 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
1064 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 1065 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
1065 | 1000); | 1066 | 1000); |
1066 | 1067 | ||
1067 | /* Now notify PPLib/SMU about which Watermarks sets they should select | 1068 | /* Now notify PPLib/SMU about which Watermarks sets they should select |
@@ -1131,7 +1132,6 @@ static bool construct( | |||
1131 | { | 1132 | { |
1132 | unsigned int i; | 1133 | unsigned int i; |
1133 | struct dc_context *ctx = dc->ctx; | 1134 | struct dc_context *ctx = dc->ctx; |
1134 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1135 | 1135 | ||
1136 | ctx->dc_bios->regs = &bios_regs; | 1136 | ctx->dc_bios->regs = &bios_regs; |
1137 | 1137 | ||
@@ -1199,11 +1199,11 @@ static bool construct( | |||
1199 | } | 1199 | } |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | pool->base.dccg = dce112_dccg_create(ctx, | 1202 | pool->base.clk_mgr = dce112_clk_mgr_create(ctx, |
1203 | &disp_clk_regs, | 1203 | &disp_clk_regs, |
1204 | &disp_clk_shift, | 1204 | &disp_clk_shift, |
1205 | &disp_clk_mask); | 1205 | &disp_clk_mask); |
1206 | if (pool->base.dccg == NULL) { | 1206 | if (pool->base.clk_mgr == NULL) { |
1207 | dm_error("DC: failed to create display clock!\n"); | 1207 | dm_error("DC: failed to create display clock!\n"); |
1208 | BREAK_TO_DEBUGGER(); | 1208 | BREAK_TO_DEBUGGER(); |
1209 | goto res_create_fail; | 1209 | goto res_create_fail; |
@@ -1229,13 +1229,6 @@ static bool construct( | |||
1229 | goto res_create_fail; | 1229 | goto res_create_fail; |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | /* get static clock information for PPLIB or firmware, save | ||
1233 | * max_clock_state | ||
1234 | */ | ||
1235 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1236 | pool->base.dccg->max_clks_state = | ||
1237 | static_clk_info.max_clocks_state; | ||
1238 | |||
1239 | { | 1232 | { |
1240 | struct irq_service_init_data init_data; | 1233 | struct irq_service_init_data init_data; |
1241 | init_data.ctx = dc->ctx; | 1234 | init_data.ctx = dc->ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 79ab5f9f9115..f12696674eb0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "resource.h" | 31 | #include "resource.h" |
32 | #include "include/irq_service_interface.h" | 32 | #include "include/irq_service_interface.h" |
33 | #include "dce120_resource.h" | 33 | #include "dce120_resource.h" |
34 | |||
34 | #include "dce112/dce112_resource.h" | 35 | #include "dce112/dce112_resource.h" |
35 | 36 | ||
36 | #include "dce110/dce110_resource.h" | 37 | #include "dce110/dce110_resource.h" |
@@ -39,7 +40,6 @@ | |||
39 | #include "irq/dce120/irq_service_dce120.h" | 40 | #include "irq/dce120/irq_service_dce120.h" |
40 | #include "dce/dce_opp.h" | 41 | #include "dce/dce_opp.h" |
41 | #include "dce/dce_clock_source.h" | 42 | #include "dce/dce_clock_source.h" |
42 | #include "dce/dce_clocks.h" | ||
43 | #include "dce/dce_ipp.h" | 43 | #include "dce/dce_ipp.h" |
44 | #include "dce/dce_mem_input.h" | 44 | #include "dce/dce_mem_input.h" |
45 | 45 | ||
@@ -47,6 +47,7 @@ | |||
47 | #include "dce120/dce120_hw_sequencer.h" | 47 | #include "dce120/dce120_hw_sequencer.h" |
48 | #include "dce/dce_transform.h" | 48 | #include "dce/dce_transform.h" |
49 | 49 | ||
50 | #include "dce/dce_clk_mgr.h" | ||
50 | #include "dce/dce_audio.h" | 51 | #include "dce/dce_audio.h" |
51 | #include "dce/dce_link_encoder.h" | 52 | #include "dce/dce_link_encoder.h" |
52 | #include "dce/dce_stream_encoder.h" | 53 | #include "dce/dce_stream_encoder.h" |
@@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
573 | if (pool->base.dmcu != NULL) | 574 | if (pool->base.dmcu != NULL) |
574 | dce_dmcu_destroy(&pool->base.dmcu); | 575 | dce_dmcu_destroy(&pool->base.dmcu); |
575 | 576 | ||
576 | if (pool->base.dccg != NULL) | 577 | if (pool->base.clk_mgr != NULL) |
577 | dce_dccg_destroy(&pool->base.dccg); | 578 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
578 | } | 579 | } |
579 | 580 | ||
580 | static void read_dce_straps( | 581 | static void read_dce_straps( |
@@ -606,7 +607,8 @@ static struct audio *create_audio( | |||
606 | static const struct encoder_feature_support link_enc_feature = { | 607 | static const struct encoder_feature_support link_enc_feature = { |
607 | .max_hdmi_deep_color = COLOR_DEPTH_121212, | 608 | .max_hdmi_deep_color = COLOR_DEPTH_121212, |
608 | .max_hdmi_pixel_clock = 600000, | 609 | .max_hdmi_pixel_clock = 600000, |
609 | .ycbcr420_supported = true, | 610 | .hdmi_ycbcr420_supported = true, |
611 | .dp_ycbcr420_supported = false, | ||
610 | .flags.bits.IS_HBR2_CAPABLE = true, | 612 | .flags.bits.IS_HBR2_CAPABLE = true, |
611 | .flags.bits.IS_HBR3_CAPABLE = true, | 613 | .flags.bits.IS_HBR3_CAPABLE = true, |
612 | .flags.bits.IS_TPS3_CAPABLE = true, | 614 | .flags.bits.IS_TPS3_CAPABLE = true, |
@@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
834 | * YCLK = UMACLK*m_memoryTypeMultiplier | 836 | * YCLK = UMACLK*m_memoryTypeMultiplier |
835 | */ | 837 | */ |
836 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 838 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
837 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000); | 839 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
838 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 840 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
839 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 841 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
840 | 1000); | 842 | 1000); |
841 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 843 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
842 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 844 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
843 | 1000); | 845 | 1000); |
844 | 846 | ||
845 | /* Now notify PPLib/SMU about which Watermarks sets they should select | 847 | /* Now notify PPLib/SMU about which Watermarks sets they should select |
@@ -973,8 +975,8 @@ static bool construct( | |||
973 | } | 975 | } |
974 | } | 976 | } |
975 | 977 | ||
976 | pool->base.dccg = dce120_dccg_create(ctx); | 978 | pool->base.clk_mgr = dce120_clk_mgr_create(ctx); |
977 | if (pool->base.dccg == NULL) { | 979 | if (pool->base.clk_mgr == NULL) { |
978 | dm_error("DC: failed to create display clock!\n"); | 980 | dm_error("DC: failed to create display clock!\n"); |
979 | BREAK_TO_DEBUGGER(); | 981 | BREAK_TO_DEBUGGER(); |
980 | goto dccg_create_fail; | 982 | goto dccg_create_fail; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index 6c6a1a16af19..a60a90e68d91 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c | |||
@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc) | |||
76 | 76 | ||
77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; | 77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; |
78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; | 78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; |
79 | dc->hwss.set_bandwidth = dce100_set_bandwidth; | 79 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; |
80 | dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; | ||
80 | } | 81 | } |
81 | 82 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index d68f951f9869..6d40b3d54ac1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "dce110/dce110_timing_generator.h" | 37 | #include "dce110/dce110_timing_generator.h" |
38 | #include "dce110/dce110_resource.h" | 38 | #include "dce110/dce110_resource.h" |
39 | #include "dce80/dce80_timing_generator.h" | 39 | #include "dce80/dce80_timing_generator.h" |
40 | #include "dce/dce_clk_mgr.h" | ||
40 | #include "dce/dce_mem_input.h" | 41 | #include "dce/dce_mem_input.h" |
41 | #include "dce/dce_link_encoder.h" | 42 | #include "dce/dce_link_encoder.h" |
42 | #include "dce/dce_stream_encoder.h" | 43 | #include "dce/dce_stream_encoder.h" |
@@ -44,7 +45,6 @@ | |||
44 | #include "dce/dce_ipp.h" | 45 | #include "dce/dce_ipp.h" |
45 | #include "dce/dce_transform.h" | 46 | #include "dce/dce_transform.h" |
46 | #include "dce/dce_opp.h" | 47 | #include "dce/dce_opp.h" |
47 | #include "dce/dce_clocks.h" | ||
48 | #include "dce/dce_clock_source.h" | 48 | #include "dce/dce_clock_source.h" |
49 | #include "dce/dce_audio.h" | 49 | #include "dce/dce_audio.h" |
50 | #include "dce/dce_hwseq.h" | 50 | #include "dce/dce_hwseq.h" |
@@ -155,15 +155,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = { | |||
155 | .reg_name = mm ## block ## id ## _ ## reg_name | 155 | .reg_name = mm ## block ## id ## _ ## reg_name |
156 | 156 | ||
157 | 157 | ||
158 | static const struct dccg_registers disp_clk_regs = { | 158 | static const struct clk_mgr_registers disp_clk_regs = { |
159 | CLK_COMMON_REG_LIST_DCE_BASE() | 159 | CLK_COMMON_REG_LIST_DCE_BASE() |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static const struct dccg_shift disp_clk_shift = { | 162 | static const struct clk_mgr_shift disp_clk_shift = { |
163 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 163 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
164 | }; | 164 | }; |
165 | 165 | ||
166 | static const struct dccg_mask disp_clk_mask = { | 166 | static const struct clk_mgr_mask disp_clk_mask = { |
167 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 167 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
168 | }; | 168 | }; |
169 | 169 | ||
@@ -779,8 +779,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
779 | } | 779 | } |
780 | } | 780 | } |
781 | 781 | ||
782 | if (pool->base.dccg != NULL) | 782 | if (pool->base.clk_mgr != NULL) |
783 | dce_dccg_destroy(&pool->base.dccg); | 783 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
784 | 784 | ||
785 | if (pool->base.irqs != NULL) { | 785 | if (pool->base.irqs != NULL) { |
786 | dal_irq_service_destroy(&pool->base.irqs); | 786 | dal_irq_service_destroy(&pool->base.irqs); |
@@ -793,7 +793,7 @@ bool dce80_validate_bandwidth( | |||
793 | { | 793 | { |
794 | /* TODO implement when needed but for now hardcode max value*/ | 794 | /* TODO implement when needed but for now hardcode max value*/ |
795 | context->bw.dce.dispclk_khz = 681000; | 795 | context->bw.dce.dispclk_khz = 681000; |
796 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; | 796 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; |
797 | 797 | ||
798 | return true; | 798 | return true; |
799 | } | 799 | } |
@@ -855,7 +855,6 @@ static bool dce80_construct( | |||
855 | struct dc_context *ctx = dc->ctx; | 855 | struct dc_context *ctx = dc->ctx; |
856 | struct dc_firmware_info info; | 856 | struct dc_firmware_info info; |
857 | struct dc_bios *bp; | 857 | struct dc_bios *bp; |
858 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
859 | 858 | ||
860 | ctx->dc_bios->regs = &bios_regs; | 859 | ctx->dc_bios->regs = &bios_regs; |
861 | 860 | ||
@@ -918,11 +917,11 @@ static bool dce80_construct( | |||
918 | } | 917 | } |
919 | } | 918 | } |
920 | 919 | ||
921 | pool->base.dccg = dce_dccg_create(ctx, | 920 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
922 | &disp_clk_regs, | 921 | &disp_clk_regs, |
923 | &disp_clk_shift, | 922 | &disp_clk_shift, |
924 | &disp_clk_mask); | 923 | &disp_clk_mask); |
925 | if (pool->base.dccg == NULL) { | 924 | if (pool->base.clk_mgr == NULL) { |
926 | dm_error("DC: failed to create display clock!\n"); | 925 | dm_error("DC: failed to create display clock!\n"); |
927 | BREAK_TO_DEBUGGER(); | 926 | BREAK_TO_DEBUGGER(); |
928 | goto res_create_fail; | 927 | goto res_create_fail; |
@@ -948,10 +947,6 @@ static bool dce80_construct( | |||
948 | goto res_create_fail; | 947 | goto res_create_fail; |
949 | } | 948 | } |
950 | 949 | ||
951 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
952 | pool->base.dccg->max_clks_state = | ||
953 | static_clk_info.max_clocks_state; | ||
954 | |||
955 | { | 950 | { |
956 | struct irq_service_init_data init_data; | 951 | struct irq_service_init_data init_data; |
957 | init_data.ctx = dc->ctx; | 952 | init_data.ctx = dc->ctx; |
@@ -1065,7 +1060,6 @@ static bool dce81_construct( | |||
1065 | struct dc_context *ctx = dc->ctx; | 1060 | struct dc_context *ctx = dc->ctx; |
1066 | struct dc_firmware_info info; | 1061 | struct dc_firmware_info info; |
1067 | struct dc_bios *bp; | 1062 | struct dc_bios *bp; |
1068 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1069 | 1063 | ||
1070 | ctx->dc_bios->regs = &bios_regs; | 1064 | ctx->dc_bios->regs = &bios_regs; |
1071 | 1065 | ||
@@ -1128,11 +1122,11 @@ static bool dce81_construct( | |||
1128 | } | 1122 | } |
1129 | } | 1123 | } |
1130 | 1124 | ||
1131 | pool->base.dccg = dce_dccg_create(ctx, | 1125 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
1132 | &disp_clk_regs, | 1126 | &disp_clk_regs, |
1133 | &disp_clk_shift, | 1127 | &disp_clk_shift, |
1134 | &disp_clk_mask); | 1128 | &disp_clk_mask); |
1135 | if (pool->base.dccg == NULL) { | 1129 | if (pool->base.clk_mgr == NULL) { |
1136 | dm_error("DC: failed to create display clock!\n"); | 1130 | dm_error("DC: failed to create display clock!\n"); |
1137 | BREAK_TO_DEBUGGER(); | 1131 | BREAK_TO_DEBUGGER(); |
1138 | goto res_create_fail; | 1132 | goto res_create_fail; |
@@ -1158,10 +1152,6 @@ static bool dce81_construct( | |||
1158 | goto res_create_fail; | 1152 | goto res_create_fail; |
1159 | } | 1153 | } |
1160 | 1154 | ||
1161 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1162 | pool->base.dccg->max_clks_state = | ||
1163 | static_clk_info.max_clocks_state; | ||
1164 | |||
1165 | { | 1155 | { |
1166 | struct irq_service_init_data init_data; | 1156 | struct irq_service_init_data init_data; |
1167 | init_data.ctx = dc->ctx; | 1157 | init_data.ctx = dc->ctx; |
@@ -1275,7 +1265,6 @@ static bool dce83_construct( | |||
1275 | struct dc_context *ctx = dc->ctx; | 1265 | struct dc_context *ctx = dc->ctx; |
1276 | struct dc_firmware_info info; | 1266 | struct dc_firmware_info info; |
1277 | struct dc_bios *bp; | 1267 | struct dc_bios *bp; |
1278 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1279 | 1268 | ||
1280 | ctx->dc_bios->regs = &bios_regs; | 1269 | ctx->dc_bios->regs = &bios_regs; |
1281 | 1270 | ||
@@ -1334,11 +1323,11 @@ static bool dce83_construct( | |||
1334 | } | 1323 | } |
1335 | } | 1324 | } |
1336 | 1325 | ||
1337 | pool->base.dccg = dce_dccg_create(ctx, | 1326 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
1338 | &disp_clk_regs, | 1327 | &disp_clk_regs, |
1339 | &disp_clk_shift, | 1328 | &disp_clk_shift, |
1340 | &disp_clk_mask); | 1329 | &disp_clk_mask); |
1341 | if (pool->base.dccg == NULL) { | 1330 | if (pool->base.clk_mgr == NULL) { |
1342 | dm_error("DC: failed to create display clock!\n"); | 1331 | dm_error("DC: failed to create display clock!\n"); |
1343 | BREAK_TO_DEBUGGER(); | 1332 | BREAK_TO_DEBUGGER(); |
1344 | goto res_create_fail; | 1333 | goto res_create_fail; |
@@ -1364,10 +1353,6 @@ static bool dce83_construct( | |||
1364 | goto res_create_fail; | 1353 | goto res_create_fail; |
1365 | } | 1354 | } |
1366 | 1355 | ||
1367 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1368 | pool->base.dccg->max_clks_state = | ||
1369 | static_clk_info.max_clocks_state; | ||
1370 | |||
1371 | { | 1356 | { |
1372 | struct irq_service_init_data init_data; | 1357 | struct irq_service_init_data init_data; |
1373 | init_data.ctx = dc->ctx; | 1358 | init_data.ctx = dc->ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 032f872be89c..55f293c8a3c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ | 25 | DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ |
26 | dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ | 26 | dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ |
27 | dcn10_hubp.o dcn10_mpc.o \ | 27 | dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \ |
28 | dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ | 28 | dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ |
29 | dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o | 29 | dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o |
30 | 30 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c new file mode 100644 index 000000000000..20f531d27e2b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c | |||
@@ -0,0 +1,379 @@ | |||
1 | /* | ||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "dcn10_clk_mgr.h" | ||
27 | |||
28 | #include "reg_helper.h" | ||
29 | #include "core_types.h" | ||
30 | |||
31 | #define TO_DCE_CLK_MGR(clocks)\ | ||
32 | container_of(clocks, struct dce_clk_mgr, base) | ||
33 | |||
34 | #define REG(reg) \ | ||
35 | (clk_mgr_dce->regs->reg) | ||
36 | |||
37 | #undef FN | ||
38 | #define FN(reg_name, field_name) \ | ||
39 | clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name | ||
40 | |||
41 | #define CTX \ | ||
42 | clk_mgr_dce->base.ctx | ||
43 | #define DC_LOGGER \ | ||
44 | clk_mgr->ctx->logger | ||
45 | |||
46 | void dcn1_pplib_apply_display_requirements( | ||
47 | struct dc *dc, | ||
48 | struct dc_state *context) | ||
49 | { | ||
50 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
51 | |||
52 | pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz; | ||
53 | pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz; | ||
54 | pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz; | ||
55 | pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz; | ||
56 | pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz; | ||
57 | pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; | ||
58 | dce110_fill_display_configs(context, pp_display_cfg); | ||
59 | |||
60 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
61 | } | ||
62 | |||
63 | static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) | ||
64 | { | ||
65 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
66 | bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz; | ||
67 | int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; | ||
68 | bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz; | ||
69 | |||
70 | /* increase clock, looking for div is 0 for current, request div is 1*/ | ||
71 | if (dispclk_increase) { | ||
72 | /* already divided by 2, no need to reach target clk with 2 steps*/ | ||
73 | if (cur_dpp_div) | ||
74 | return new_clocks->dispclk_khz; | ||
75 | |||
76 | /* request disp clk is lower than maximum supported dpp clk, | ||
77 | * no need to reach target clk with two steps. | ||
78 | */ | ||
79 | if (new_clocks->dispclk_khz <= disp_clk_threshold) | ||
80 | return new_clocks->dispclk_khz; | ||
81 | |||
82 | /* target dpp clk not request divided by 2, still within threshold */ | ||
83 | if (!request_dpp_div) | ||
84 | return new_clocks->dispclk_khz; | ||
85 | |||
86 | } else { | ||
87 | /* decrease clock, looking for current dppclk divided by 2, | ||
88 | * request dppclk not divided by 2. | ||
89 | */ | ||
90 | |||
91 | /* current dpp clk not divided by 2, no need to ramp*/ | ||
92 | if (!cur_dpp_div) | ||
93 | return new_clocks->dispclk_khz; | ||
94 | |||
95 | /* current disp clk is lower than current maximum dpp clk, | ||
96 | * no need to ramp | ||
97 | */ | ||
98 | if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold) | ||
99 | return new_clocks->dispclk_khz; | ||
100 | |||
101 | /* request dpp clk need to be divided by 2 */ | ||
102 | if (request_dpp_div) | ||
103 | return new_clocks->dispclk_khz; | ||
104 | } | ||
105 | |||
106 | return disp_clk_threshold; | ||
107 | } | ||
108 | |||
109 | static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) | ||
110 | { | ||
111 | struct dc *dc = clk_mgr->ctx->dc; | ||
112 | int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks); | ||
113 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
114 | int i; | ||
115 | |||
116 | /* set disp clk to dpp clk threshold */ | ||
117 | dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold); | ||
118 | |||
119 | /* update request dpp clk division option */ | ||
120 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
121 | struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | ||
122 | |||
123 | if (!pipe_ctx->plane_state) | ||
124 | continue; | ||
125 | |||
126 | pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( | ||
127 | pipe_ctx->plane_res.dpp, | ||
128 | request_dpp_div, | ||
129 | true); | ||
130 | } | ||
131 | |||
132 | /* If target clk not same as dppclk threshold, set to target clock */ | ||
133 | if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) | ||
134 | dce112_set_clock(clk_mgr, new_clocks->dispclk_khz); | ||
135 | |||
136 | clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
137 | clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; | ||
138 | clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; | ||
139 | } | ||
140 | |||
141 | static int get_active_display_cnt( | ||
142 | struct dc *dc, | ||
143 | struct dc_state *context) | ||
144 | { | ||
145 | int i, display_count; | ||
146 | |||
147 | display_count = 0; | ||
148 | for (i = 0; i < context->stream_count; i++) { | ||
149 | const struct dc_stream_state *stream = context->streams[i]; | ||
150 | |||
151 | /* | ||
152 | * Only notify active stream or virtual stream. | ||
153 | * Need to notify virtual stream to work around | ||
154 | * headless case. HPD does not fire when system is in | ||
155 | * S0i2. | ||
156 | */ | ||
157 | if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) | ||
158 | display_count++; | ||
159 | } | ||
160 | |||
161 | return display_count; | ||
162 | } | ||
163 | |||
164 | static void notify_deep_sleep_dcfclk_to_smu( | ||
165 | struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz) | ||
166 | { | ||
167 | int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz | ||
168 | /* | ||
169 | * if function pointer not set up, this message is | ||
170 | * sent as part of pplib_apply_display_requirements. | ||
171 | * So just return. | ||
172 | */ | ||
173 | if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk) | ||
174 | return; | ||
175 | |||
176 | min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up | ||
177 | pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz); | ||
178 | } | ||
179 | |||
180 | static void notify_hard_min_dcfclk_to_smu( | ||
181 | struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz) | ||
182 | { | ||
183 | int min_dcf_clk_mhz; //minimum required DCF clock in mhz | ||
184 | |||
185 | /* | ||
186 | * if function pointer not set up, this message is | ||
187 | * sent as part of pplib_apply_display_requirements. | ||
188 | * So just return. | ||
189 | */ | ||
190 | if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq) | ||
191 | return; | ||
192 | |||
193 | min_dcf_clk_mhz = min_dcf_clk_khz / 1000; | ||
194 | |||
195 | pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz); | ||
196 | } | ||
197 | |||
198 | static void notify_hard_min_fclk_to_smu( | ||
199 | struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz) | ||
200 | { | ||
201 | int min_f_clk_mhz; //minimum required F clock in mhz | ||
202 | |||
203 | /* | ||
204 | * if function pointer not set up, this message is | ||
205 | * sent as part of pplib_apply_display_requirements. | ||
206 | * So just return. | ||
207 | */ | ||
208 | if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq) | ||
209 | return; | ||
210 | |||
211 | min_f_clk_mhz = min_f_clk_khz / 1000; | ||
212 | |||
213 | pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz); | ||
214 | } | ||
215 | |||
216 | static void dcn1_update_clocks(struct clk_mgr *clk_mgr, | ||
217 | struct dc_state *context, | ||
218 | bool safe_to_lower) | ||
219 | { | ||
220 | struct dc *dc = clk_mgr->ctx->dc; | ||
221 | struct dc_clocks *new_clocks = &context->bw.dcn.clk; | ||
222 | struct pp_smu_display_requirement_rv *smu_req_cur = | ||
223 | &dc->res_pool->pp_smu_req; | ||
224 | struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; | ||
225 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
226 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
227 | bool send_request_to_increase = false; | ||
228 | bool send_request_to_lower = false; | ||
229 | int display_count; | ||
230 | |||
231 | bool enter_display_off = false; | ||
232 | |||
233 | display_count = get_active_display_cnt(dc, context); | ||
234 | |||
235 | if (display_count == 0) | ||
236 | enter_display_off = true; | ||
237 | |||
238 | if (enter_display_off == safe_to_lower) { | ||
239 | /* | ||
240 | * Notify SMU active displays | ||
241 | * if function pointer not set up, this message is | ||
242 | * sent as part of pplib_apply_display_requirements. | ||
243 | */ | ||
244 | if (pp_smu->set_display_count) | ||
245 | pp_smu->set_display_count(&pp_smu->pp_smu, display_count); | ||
246 | else | ||
247 | smu_req.display_count = display_count; | ||
248 | |||
249 | } | ||
250 | |||
251 | if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz | ||
252 | || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz | ||
253 | || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz | ||
254 | || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz) | ||
255 | send_request_to_increase = true; | ||
256 | |||
257 | if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { | ||
258 | clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; | ||
259 | |||
260 | send_request_to_lower = true; | ||
261 | } | ||
262 | |||
263 | // F Clock | ||
264 | if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { | ||
265 | clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; | ||
266 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; | ||
267 | clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; | ||
268 | smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; | ||
269 | |||
270 | notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz); | ||
271 | |||
272 | send_request_to_lower = true; | ||
273 | } | ||
274 | |||
275 | //DCF Clock | ||
276 | if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { | ||
277 | clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; | ||
278 | smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000; | ||
279 | |||
280 | send_request_to_lower = true; | ||
281 | } | ||
282 | |||
283 | if (should_set_clock(safe_to_lower, | ||
284 | new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { | ||
285 | clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; | ||
286 | smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000; | ||
287 | |||
288 | send_request_to_lower = true; | ||
289 | } | ||
290 | |||
291 | /* make sure dcf clk is before dpp clk to | ||
292 | * make sure we have enough voltage to run dpp clk | ||
293 | */ | ||
294 | if (send_request_to_increase) { | ||
295 | /*use dcfclk to request voltage*/ | ||
296 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
297 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
298 | |||
299 | notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); | ||
300 | |||
301 | if (pp_smu->set_display_requirement) | ||
302 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
303 | |||
304 | notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); | ||
305 | dcn1_pplib_apply_display_requirements(dc, context); | ||
306 | } | ||
307 | |||
308 | /* dcn1 dppclk is tied to dispclk */ | ||
309 | /* program dispclk on = as a w/a for sleep resume clock ramping issues */ | ||
310 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz) | ||
311 | || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) { | ||
312 | dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks); | ||
313 | clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
314 | |||
315 | send_request_to_lower = true; | ||
316 | } | ||
317 | |||
318 | if (!send_request_to_increase && send_request_to_lower) { | ||
319 | /*use dcfclk to request voltage*/ | ||
320 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
321 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
322 | |||
323 | notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); | ||
324 | |||
325 | if (pp_smu->set_display_requirement) | ||
326 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
327 | |||
328 | notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); | ||
329 | dcn1_pplib_apply_display_requirements(dc, context); | ||
330 | } | ||
331 | |||
332 | |||
333 | *smu_req_cur = smu_req; | ||
334 | } | ||
335 | |||
336 | static const struct clk_mgr_funcs dcn1_funcs = { | ||
337 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
338 | .update_clocks = dcn1_update_clocks | ||
339 | }; | ||
340 | |||
341 | struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx) | ||
342 | { | ||
343 | struct dc_debug_options *debug = &ctx->dc->debug; | ||
344 | struct dc_bios *bp = ctx->dc_bios; | ||
345 | struct dc_firmware_info fw_info = { { 0 } }; | ||
346 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
347 | |||
348 | if (clk_mgr_dce == NULL) { | ||
349 | BREAK_TO_DEBUGGER(); | ||
350 | return NULL; | ||
351 | } | ||
352 | |||
353 | clk_mgr_dce->base.ctx = ctx; | ||
354 | clk_mgr_dce->base.funcs = &dcn1_funcs; | ||
355 | |||
356 | clk_mgr_dce->dfs_bypass_disp_clk = 0; | ||
357 | |||
358 | clk_mgr_dce->dprefclk_ss_percentage = 0; | ||
359 | clk_mgr_dce->dprefclk_ss_divider = 1000; | ||
360 | clk_mgr_dce->ss_on_dprefclk = false; | ||
361 | |||
362 | clk_mgr_dce->dprefclk_khz = 600000; | ||
363 | if (bp->integrated_info) | ||
364 | clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; | ||
365 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) { | ||
366 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
367 | clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; | ||
368 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) | ||
369 | clk_mgr_dce->dentist_vco_freq_khz = 3600000; | ||
370 | } | ||
371 | |||
372 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
373 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
374 | clk_mgr_dce->dfs_bypass_enabled = true; | ||
375 | |||
376 | dce_clock_read_ss_info(clk_mgr_dce); | ||
377 | |||
378 | return &clk_mgr_dce->base; | ||
379 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h new file mode 100644 index 000000000000..9dbaf6578006 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #ifndef __DCN10_CLK_MGR_H__ | ||
27 | #define __DCN10_CLK_MGR_H__ | ||
28 | |||
29 | #include "../dce/dce_clk_mgr.h" | ||
30 | |||
31 | void dcn1_pplib_apply_display_requirements( | ||
32 | struct dc *dc, | ||
33 | struct dc_state *context); | ||
34 | |||
35 | struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx); | ||
36 | |||
37 | #endif //__DCN10_CLK_MGR_H__ | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 5d95a997fd9f..3eea44092a04 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | |||
@@ -71,39 +71,39 @@ void cm_helper_program_xfer_func( | |||
71 | unsigned int i = 0; | 71 | unsigned int i = 0; |
72 | 72 | ||
73 | REG_SET_2(reg->start_cntl_b, 0, | 73 | REG_SET_2(reg->start_cntl_b, 0, |
74 | exp_region_start, params->arr_points[0].custom_float_x, | 74 | exp_region_start, params->corner_points[0].blue.custom_float_x, |
75 | exp_resion_start_segment, 0); | 75 | exp_resion_start_segment, 0); |
76 | REG_SET_2(reg->start_cntl_g, 0, | 76 | REG_SET_2(reg->start_cntl_g, 0, |
77 | exp_region_start, params->arr_points[0].custom_float_x, | 77 | exp_region_start, params->corner_points[0].green.custom_float_x, |
78 | exp_resion_start_segment, 0); | 78 | exp_resion_start_segment, 0); |
79 | REG_SET_2(reg->start_cntl_r, 0, | 79 | REG_SET_2(reg->start_cntl_r, 0, |
80 | exp_region_start, params->arr_points[0].custom_float_x, | 80 | exp_region_start, params->corner_points[0].red.custom_float_x, |
81 | exp_resion_start_segment, 0); | 81 | exp_resion_start_segment, 0); |
82 | 82 | ||
83 | REG_SET(reg->start_slope_cntl_b, 0, | 83 | REG_SET(reg->start_slope_cntl_b, 0, |
84 | field_region_linear_slope, params->arr_points[0].custom_float_slope); | 84 | field_region_linear_slope, params->corner_points[0].blue.custom_float_slope); |
85 | REG_SET(reg->start_slope_cntl_g, 0, | 85 | REG_SET(reg->start_slope_cntl_g, 0, |
86 | field_region_linear_slope, params->arr_points[0].custom_float_slope); | 86 | field_region_linear_slope, params->corner_points[0].green.custom_float_slope); |
87 | REG_SET(reg->start_slope_cntl_r, 0, | 87 | REG_SET(reg->start_slope_cntl_r, 0, |
88 | field_region_linear_slope, params->arr_points[0].custom_float_slope); | 88 | field_region_linear_slope, params->corner_points[0].red.custom_float_slope); |
89 | 89 | ||
90 | REG_SET(reg->start_end_cntl1_b, 0, | 90 | REG_SET(reg->start_end_cntl1_b, 0, |
91 | field_region_end, params->arr_points[1].custom_float_x); | 91 | field_region_end, params->corner_points[1].blue.custom_float_x); |
92 | REG_SET_2(reg->start_end_cntl2_b, 0, | 92 | REG_SET_2(reg->start_end_cntl2_b, 0, |
93 | field_region_end_slope, params->arr_points[1].custom_float_slope, | 93 | field_region_end_slope, params->corner_points[1].blue.custom_float_slope, |
94 | field_region_end_base, params->arr_points[1].custom_float_y); | 94 | field_region_end_base, params->corner_points[1].blue.custom_float_y); |
95 | 95 | ||
96 | REG_SET(reg->start_end_cntl1_g, 0, | 96 | REG_SET(reg->start_end_cntl1_g, 0, |
97 | field_region_end, params->arr_points[1].custom_float_x); | 97 | field_region_end, params->corner_points[1].green.custom_float_x); |
98 | REG_SET_2(reg->start_end_cntl2_g, 0, | 98 | REG_SET_2(reg->start_end_cntl2_g, 0, |
99 | field_region_end_slope, params->arr_points[1].custom_float_slope, | 99 | field_region_end_slope, params->corner_points[1].green.custom_float_slope, |
100 | field_region_end_base, params->arr_points[1].custom_float_y); | 100 | field_region_end_base, params->corner_points[1].green.custom_float_y); |
101 | 101 | ||
102 | REG_SET(reg->start_end_cntl1_r, 0, | 102 | REG_SET(reg->start_end_cntl1_r, 0, |
103 | field_region_end, params->arr_points[1].custom_float_x); | 103 | field_region_end, params->corner_points[1].red.custom_float_x); |
104 | REG_SET_2(reg->start_end_cntl2_r, 0, | 104 | REG_SET_2(reg->start_end_cntl2_r, 0, |
105 | field_region_end_slope, params->arr_points[1].custom_float_slope, | 105 | field_region_end_slope, params->corner_points[1].red.custom_float_slope, |
106 | field_region_end_base, params->arr_points[1].custom_float_y); | 106 | field_region_end_base, params->corner_points[1].red.custom_float_y); |
107 | 107 | ||
108 | for (reg_region_cur = reg->region_start; | 108 | for (reg_region_cur = reg->region_start; |
109 | reg_region_cur <= reg->region_end; | 109 | reg_region_cur <= reg->region_end; |
@@ -127,7 +127,7 @@ void cm_helper_program_xfer_func( | |||
127 | 127 | ||
128 | bool cm_helper_convert_to_custom_float( | 128 | bool cm_helper_convert_to_custom_float( |
129 | struct pwl_result_data *rgb_resulted, | 129 | struct pwl_result_data *rgb_resulted, |
130 | struct curve_points *arr_points, | 130 | struct curve_points3 *corner_points, |
131 | uint32_t hw_points_num, | 131 | uint32_t hw_points_num, |
132 | bool fixpoint) | 132 | bool fixpoint) |
133 | { | 133 | { |
@@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float( | |||
141 | fmt.mantissa_bits = 12; | 141 | fmt.mantissa_bits = 12; |
142 | fmt.sign = false; | 142 | fmt.sign = false; |
143 | 143 | ||
144 | if (!convert_to_custom_float_format(arr_points[0].x, &fmt, | 144 | /* corner_points[0] - beginning base, slope offset for R,G,B |
145 | &arr_points[0].custom_float_x)) { | 145 | * corner_points[1] - end base, slope offset for R,G,B |
146 | */ | ||
147 | if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt, | ||
148 | &corner_points[0].red.custom_float_x)) { | ||
149 | BREAK_TO_DEBUGGER(); | ||
150 | return false; | ||
151 | } | ||
152 | if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt, | ||
153 | &corner_points[0].green.custom_float_x)) { | ||
154 | BREAK_TO_DEBUGGER(); | ||
155 | return false; | ||
156 | } | ||
157 | if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt, | ||
158 | &corner_points[0].blue.custom_float_x)) { | ||
146 | BREAK_TO_DEBUGGER(); | 159 | BREAK_TO_DEBUGGER(); |
147 | return false; | 160 | return false; |
148 | } | 161 | } |
149 | 162 | ||
150 | if (!convert_to_custom_float_format(arr_points[0].offset, &fmt, | 163 | if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt, |
151 | &arr_points[0].custom_float_offset)) { | 164 | &corner_points[0].red.custom_float_offset)) { |
165 | BREAK_TO_DEBUGGER(); | ||
166 | return false; | ||
167 | } | ||
168 | if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt, | ||
169 | &corner_points[0].green.custom_float_offset)) { | ||
170 | BREAK_TO_DEBUGGER(); | ||
171 | return false; | ||
172 | } | ||
173 | if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt, | ||
174 | &corner_points[0].blue.custom_float_offset)) { | ||
152 | BREAK_TO_DEBUGGER(); | 175 | BREAK_TO_DEBUGGER(); |
153 | return false; | 176 | return false; |
154 | } | 177 | } |
155 | 178 | ||
156 | if (!convert_to_custom_float_format(arr_points[0].slope, &fmt, | 179 | if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt, |
157 | &arr_points[0].custom_float_slope)) { | 180 | &corner_points[0].red.custom_float_slope)) { |
181 | BREAK_TO_DEBUGGER(); | ||
182 | return false; | ||
183 | } | ||
184 | if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt, | ||
185 | &corner_points[0].green.custom_float_slope)) { | ||
186 | BREAK_TO_DEBUGGER(); | ||
187 | return false; | ||
188 | } | ||
189 | if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt, | ||
190 | &corner_points[0].blue.custom_float_slope)) { | ||
158 | BREAK_TO_DEBUGGER(); | 191 | BREAK_TO_DEBUGGER(); |
159 | return false; | 192 | return false; |
160 | } | 193 | } |
@@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float( | |||
162 | fmt.mantissa_bits = 10; | 195 | fmt.mantissa_bits = 10; |
163 | fmt.sign = false; | 196 | fmt.sign = false; |
164 | 197 | ||
165 | if (!convert_to_custom_float_format(arr_points[1].x, &fmt, | 198 | if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt, |
166 | &arr_points[1].custom_float_x)) { | 199 | &corner_points[1].red.custom_float_x)) { |
167 | BREAK_TO_DEBUGGER(); | 200 | BREAK_TO_DEBUGGER(); |
168 | return false; | 201 | return false; |
169 | } | 202 | } |
170 | 203 | if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt, | |
171 | if (fixpoint == true) | 204 | &corner_points[1].green.custom_float_x)) { |
172 | arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y); | 205 | BREAK_TO_DEBUGGER(); |
173 | else if (!convert_to_custom_float_format(arr_points[1].y, &fmt, | 206 | return false; |
174 | &arr_points[1].custom_float_y)) { | 207 | } |
208 | if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt, | ||
209 | &corner_points[1].blue.custom_float_x)) { | ||
175 | BREAK_TO_DEBUGGER(); | 210 | BREAK_TO_DEBUGGER(); |
176 | return false; | 211 | return false; |
177 | } | 212 | } |
178 | 213 | ||
179 | if (!convert_to_custom_float_format(arr_points[1].slope, &fmt, | 214 | if (fixpoint == true) { |
180 | &arr_points[1].custom_float_slope)) { | 215 | corner_points[1].red.custom_float_y = |
216 | dc_fixpt_clamp_u0d14(corner_points[1].red.y); | ||
217 | corner_points[1].green.custom_float_y = | ||
218 | dc_fixpt_clamp_u0d14(corner_points[1].green.y); | ||
219 | corner_points[1].blue.custom_float_y = | ||
220 | dc_fixpt_clamp_u0d14(corner_points[1].blue.y); | ||
221 | } else { | ||
222 | if (!convert_to_custom_float_format(corner_points[1].red.y, | ||
223 | &fmt, &corner_points[1].red.custom_float_y)) { | ||
224 | BREAK_TO_DEBUGGER(); | ||
225 | return false; | ||
226 | } | ||
227 | if (!convert_to_custom_float_format(corner_points[1].green.y, | ||
228 | &fmt, &corner_points[1].green.custom_float_y)) { | ||
229 | BREAK_TO_DEBUGGER(); | ||
230 | return false; | ||
231 | } | ||
232 | if (!convert_to_custom_float_format(corner_points[1].blue.y, | ||
233 | &fmt, &corner_points[1].blue.custom_float_y)) { | ||
234 | BREAK_TO_DEBUGGER(); | ||
235 | return false; | ||
236 | } | ||
237 | } | ||
238 | |||
239 | if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt, | ||
240 | &corner_points[1].red.custom_float_slope)) { | ||
241 | BREAK_TO_DEBUGGER(); | ||
242 | return false; | ||
243 | } | ||
244 | if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt, | ||
245 | &corner_points[1].green.custom_float_slope)) { | ||
246 | BREAK_TO_DEBUGGER(); | ||
247 | return false; | ||
248 | } | ||
249 | if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt, | ||
250 | &corner_points[1].blue.custom_float_slope)) { | ||
181 | BREAK_TO_DEBUGGER(); | 251 | BREAK_TO_DEBUGGER(); |
182 | return false; | 252 | return false; |
183 | } | 253 | } |
@@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format( | |||
242 | const struct dc_transfer_func *output_tf, | 312 | const struct dc_transfer_func *output_tf, |
243 | struct pwl_params *lut_params, bool fixpoint) | 313 | struct pwl_params *lut_params, bool fixpoint) |
244 | { | 314 | { |
245 | struct curve_points *arr_points; | 315 | struct curve_points3 *corner_points; |
246 | struct pwl_result_data *rgb_resulted; | 316 | struct pwl_result_data *rgb_resulted; |
247 | struct pwl_result_data *rgb; | 317 | struct pwl_result_data *rgb; |
248 | struct pwl_result_data *rgb_plus_1; | 318 | struct pwl_result_data *rgb_plus_1; |
249 | struct fixed31_32 y_r; | ||
250 | struct fixed31_32 y_g; | ||
251 | struct fixed31_32 y_b; | ||
252 | struct fixed31_32 y1_min; | ||
253 | struct fixed31_32 y3_max; | ||
254 | 319 | ||
255 | int32_t region_start, region_end; | 320 | int32_t region_start, region_end; |
256 | int32_t i; | 321 | int32_t i; |
@@ -261,14 +326,14 @@ bool cm_helper_translate_curve_to_hw_format( | |||
261 | 326 | ||
262 | PERF_TRACE(); | 327 | PERF_TRACE(); |
263 | 328 | ||
264 | arr_points = lut_params->arr_points; | 329 | corner_points = lut_params->corner_points; |
265 | rgb_resulted = lut_params->rgb_resulted; | 330 | rgb_resulted = lut_params->rgb_resulted; |
266 | hw_points = 0; | 331 | hw_points = 0; |
267 | 332 | ||
268 | memset(lut_params, 0, sizeof(struct pwl_params)); | 333 | memset(lut_params, 0, sizeof(struct pwl_params)); |
269 | memset(seg_distr, 0, sizeof(seg_distr)); | 334 | memset(seg_distr, 0, sizeof(seg_distr)); |
270 | 335 | ||
271 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { | 336 | if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) { |
272 | /* 32 segments | 337 | /* 32 segments |
273 | * segments are from 2^-25 to 2^7 | 338 | * segments are from 2^-25 to 2^7 |
274 | */ | 339 | */ |
@@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format( | |||
327 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; | 392 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; |
328 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; | 393 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; |
329 | 394 | ||
330 | arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 395 | // All 3 color channels have same x |
396 | corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), | ||
331 | dc_fixpt_from_int(region_start)); | 397 | dc_fixpt_from_int(region_start)); |
332 | arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 398 | corner_points[0].green.x = corner_points[0].red.x; |
333 | dc_fixpt_from_int(region_end)); | 399 | corner_points[0].blue.x = corner_points[0].red.x; |
334 | 400 | ||
335 | y_r = rgb_resulted[0].red; | 401 | corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), |
336 | y_g = rgb_resulted[0].green; | 402 | dc_fixpt_from_int(region_end)); |
337 | y_b = rgb_resulted[0].blue; | 403 | corner_points[1].green.x = corner_points[1].red.x; |
404 | corner_points[1].blue.x = corner_points[1].red.x; | ||
338 | 405 | ||
339 | y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); | 406 | corner_points[0].red.y = rgb_resulted[0].red; |
407 | corner_points[0].green.y = rgb_resulted[0].green; | ||
408 | corner_points[0].blue.y = rgb_resulted[0].blue; | ||
340 | 409 | ||
341 | arr_points[0].y = y1_min; | 410 | corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y, |
342 | arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); | 411 | corner_points[0].red.x); |
343 | y_r = rgb_resulted[hw_points - 1].red; | 412 | corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y, |
344 | y_g = rgb_resulted[hw_points - 1].green; | 413 | corner_points[0].green.x); |
345 | y_b = rgb_resulted[hw_points - 1].blue; | 414 | corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y, |
415 | corner_points[0].blue.x); | ||
346 | 416 | ||
347 | /* see comment above, m_arrPoints[1].y should be the Y value for the | 417 | /* see comment above, m_arrPoints[1].y should be the Y value for the |
348 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) | 418 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) |
349 | */ | 419 | */ |
350 | y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); | 420 | corner_points[1].red.y = rgb_resulted[hw_points - 1].red; |
351 | 421 | corner_points[1].green.y = rgb_resulted[hw_points - 1].green; | |
352 | arr_points[1].y = y3_max; | 422 | corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; |
353 | 423 | corner_points[1].red.slope = dc_fixpt_zero; | |
354 | arr_points[1].slope = dc_fixpt_zero; | 424 | corner_points[1].green.slope = dc_fixpt_zero; |
425 | corner_points[1].blue.slope = dc_fixpt_zero; | ||
355 | 426 | ||
356 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { | 427 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { |
357 | /* for PQ, we want to have a straight line from last HW X point, | 428 | /* for PQ, we want to have a straight line from last HW X point, |
@@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format( | |||
360 | const struct fixed31_32 end_value = | 431 | const struct fixed31_32 end_value = |
361 | dc_fixpt_from_int(125); | 432 | dc_fixpt_from_int(125); |
362 | 433 | ||
363 | arr_points[1].slope = dc_fixpt_div( | 434 | corner_points[1].red.slope = dc_fixpt_div( |
364 | dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), | 435 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), |
365 | dc_fixpt_sub(end_value, arr_points[1].x)); | 436 | dc_fixpt_sub(end_value, corner_points[1].red.x)); |
437 | corner_points[1].green.slope = dc_fixpt_div( | ||
438 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), | ||
439 | dc_fixpt_sub(end_value, corner_points[1].green.x)); | ||
440 | corner_points[1].blue.slope = dc_fixpt_div( | ||
441 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), | ||
442 | dc_fixpt_sub(end_value, corner_points[1].blue.x)); | ||
366 | } | 443 | } |
367 | 444 | ||
368 | lut_params->hw_points_num = hw_points; | 445 | lut_params->hw_points_num = hw_points; |
@@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format( | |||
411 | ++i; | 488 | ++i; |
412 | } | 489 | } |
413 | cm_helper_convert_to_custom_float(rgb_resulted, | 490 | cm_helper_convert_to_custom_float(rgb_resulted, |
414 | lut_params->arr_points, | 491 | lut_params->corner_points, |
415 | hw_points, fixpoint); | 492 | hw_points, fixpoint); |
416 | 493 | ||
417 | return true; | 494 | return true; |
@@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
424 | const struct dc_transfer_func *output_tf, | 501 | const struct dc_transfer_func *output_tf, |
425 | struct pwl_params *lut_params) | 502 | struct pwl_params *lut_params) |
426 | { | 503 | { |
427 | struct curve_points *arr_points; | 504 | struct curve_points3 *corner_points; |
428 | struct pwl_result_data *rgb_resulted; | 505 | struct pwl_result_data *rgb_resulted; |
429 | struct pwl_result_data *rgb; | 506 | struct pwl_result_data *rgb; |
430 | struct pwl_result_data *rgb_plus_1; | 507 | struct pwl_result_data *rgb_plus_1; |
431 | struct fixed31_32 y_r; | ||
432 | struct fixed31_32 y_g; | ||
433 | struct fixed31_32 y_b; | ||
434 | struct fixed31_32 y1_min; | ||
435 | struct fixed31_32 y3_max; | ||
436 | 508 | ||
437 | int32_t region_start, region_end; | 509 | int32_t region_start, region_end; |
438 | int32_t i; | 510 | int32_t i; |
@@ -443,7 +515,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
443 | 515 | ||
444 | PERF_TRACE(); | 516 | PERF_TRACE(); |
445 | 517 | ||
446 | arr_points = lut_params->arr_points; | 518 | corner_points = lut_params->corner_points; |
447 | rgb_resulted = lut_params->rgb_resulted; | 519 | rgb_resulted = lut_params->rgb_resulted; |
448 | hw_points = 0; | 520 | hw_points = 0; |
449 | 521 | ||
@@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
489 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; | 561 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; |
490 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; | 562 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; |
491 | 563 | ||
492 | arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 564 | corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), |
493 | dc_fixpt_from_int(region_start)); | 565 | dc_fixpt_from_int(region_start)); |
494 | arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 566 | corner_points[0].green.x = corner_points[0].red.x; |
567 | corner_points[0].blue.x = corner_points[0].red.x; | ||
568 | corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), | ||
495 | dc_fixpt_from_int(region_end)); | 569 | dc_fixpt_from_int(region_end)); |
570 | corner_points[1].green.x = corner_points[1].red.x; | ||
571 | corner_points[1].blue.x = corner_points[1].red.x; | ||
496 | 572 | ||
497 | y_r = rgb_resulted[0].red; | 573 | corner_points[0].red.y = rgb_resulted[0].red; |
498 | y_g = rgb_resulted[0].green; | 574 | corner_points[0].green.y = rgb_resulted[0].green; |
499 | y_b = rgb_resulted[0].blue; | 575 | corner_points[0].blue.y = rgb_resulted[0].blue; |
500 | |||
501 | y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); | ||
502 | |||
503 | arr_points[0].y = y1_min; | ||
504 | arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); | ||
505 | y_r = rgb_resulted[hw_points - 1].red; | ||
506 | y_g = rgb_resulted[hw_points - 1].green; | ||
507 | y_b = rgb_resulted[hw_points - 1].blue; | ||
508 | 576 | ||
509 | /* see comment above, m_arrPoints[1].y should be the Y value for the | 577 | /* see comment above, m_arrPoints[1].y should be the Y value for the |
510 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) | 578 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) |
511 | */ | 579 | */ |
512 | y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); | 580 | corner_points[1].red.y = rgb_resulted[hw_points - 1].red; |
513 | 581 | corner_points[1].green.y = rgb_resulted[hw_points - 1].green; | |
514 | arr_points[1].y = y3_max; | 582 | corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; |
515 | 583 | corner_points[1].red.slope = dc_fixpt_zero; | |
516 | arr_points[1].slope = dc_fixpt_zero; | 584 | corner_points[1].green.slope = dc_fixpt_zero; |
585 | corner_points[1].blue.slope = dc_fixpt_zero; | ||
517 | 586 | ||
518 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { | 587 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { |
519 | /* for PQ, we want to have a straight line from last HW X point, | 588 | /* for PQ, we want to have a straight line from last HW X point, |
@@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
522 | const struct fixed31_32 end_value = | 591 | const struct fixed31_32 end_value = |
523 | dc_fixpt_from_int(125); | 592 | dc_fixpt_from_int(125); |
524 | 593 | ||
525 | arr_points[1].slope = dc_fixpt_div( | 594 | corner_points[1].red.slope = dc_fixpt_div( |
526 | dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), | 595 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), |
527 | dc_fixpt_sub(end_value, arr_points[1].x)); | 596 | dc_fixpt_sub(end_value, corner_points[1].red.x)); |
597 | corner_points[1].green.slope = dc_fixpt_div( | ||
598 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), | ||
599 | dc_fixpt_sub(end_value, corner_points[1].green.x)); | ||
600 | corner_points[1].blue.slope = dc_fixpt_div( | ||
601 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), | ||
602 | dc_fixpt_sub(end_value, corner_points[1].blue.x)); | ||
528 | } | 603 | } |
529 | 604 | ||
530 | lut_params->hw_points_num = hw_points; | 605 | lut_params->hw_points_num = hw_points; |
@@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
564 | ++i; | 639 | ++i; |
565 | } | 640 | } |
566 | cm_helper_convert_to_custom_float(rgb_resulted, | 641 | cm_helper_convert_to_custom_float(rgb_resulted, |
567 | lut_params->arr_points, | 642 | lut_params->corner_points, |
568 | hw_points, false); | 643 | hw_points, false); |
569 | 644 | ||
570 | return true; | 645 | return true; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 7a531b02871f..5ae4d69391a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h | |||
@@ -98,7 +98,7 @@ void cm_helper_program_xfer_func( | |||
98 | 98 | ||
99 | bool cm_helper_convert_to_custom_float( | 99 | bool cm_helper_convert_to_custom_float( |
100 | struct pwl_result_data *rgb_resulted, | 100 | struct pwl_result_data *rgb_resulted, |
101 | struct curve_points *arr_points, | 101 | struct curve_points3 *corner_points, |
102 | uint32_t hw_points_num, | 102 | uint32_t hw_points_num, |
103 | bool fixpoint); | 103 | bool fixpoint); |
104 | 104 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 193184affefb..87495dea45ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "dcn10_hubbub.h" | 45 | #include "dcn10_hubbub.h" |
46 | #include "dcn10_cm_common.h" | 46 | #include "dcn10_cm_common.h" |
47 | #include "dc_link_dp.h" | 47 | #include "dc_link_dp.h" |
48 | #include "dccg.h" | ||
48 | 49 | ||
49 | #define DC_LOGGER_INIT(logger) | 50 | #define DC_LOGGER_INIT(logger) |
50 | 51 | ||
@@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
786 | &dc->current_state->res_ctx.pipe_ctx[i]; | 787 | &dc->current_state->res_ctx.pipe_ctx[i]; |
787 | if (pipe_ctx != NULL) { | 788 | if (pipe_ctx != NULL) { |
788 | hubp = pipe_ctx->plane_res.hubp; | 789 | hubp = pipe_ctx->plane_res.hubp; |
789 | if (hubp != NULL) { | 790 | if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) { |
790 | if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) { | 791 | if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) { |
791 | /* one pipe underflow, we will reset all the pipes*/ | 792 | /* one pipe underflow, we will reset all the pipes*/ |
792 | need_recover = true; | 793 | need_recover = true; |
@@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
812 | if (pipe_ctx != NULL) { | 813 | if (pipe_ctx != NULL) { |
813 | hubp = pipe_ctx->plane_res.hubp; | 814 | hubp = pipe_ctx->plane_res.hubp; |
814 | /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ | 815 | /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ |
815 | if (hubp != NULL) | 816 | if (hubp != NULL && hubp->funcs->set_hubp_blank_en) |
816 | hubp->funcs->set_hubp_blank_en(hubp, true); | 817 | hubp->funcs->set_hubp_blank_en(hubp, true); |
817 | } | 818 | } |
818 | } | 819 | } |
@@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
825 | if (pipe_ctx != NULL) { | 826 | if (pipe_ctx != NULL) { |
826 | hubp = pipe_ctx->plane_res.hubp; | 827 | hubp = pipe_ctx->plane_res.hubp; |
827 | /*DCHUBP_CNTL:HUBP_DISABLE=1*/ | 828 | /*DCHUBP_CNTL:HUBP_DISABLE=1*/ |
828 | if (hubp != NULL) | 829 | if (hubp != NULL && hubp->funcs->hubp_disable_control) |
829 | hubp->funcs->hubp_disable_control(hubp, true); | 830 | hubp->funcs->hubp_disable_control(hubp, true); |
830 | } | 831 | } |
831 | } | 832 | } |
@@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
835 | if (pipe_ctx != NULL) { | 836 | if (pipe_ctx != NULL) { |
836 | hubp = pipe_ctx->plane_res.hubp; | 837 | hubp = pipe_ctx->plane_res.hubp; |
837 | /*DCHUBP_CNTL:HUBP_DISABLE=0*/ | 838 | /*DCHUBP_CNTL:HUBP_DISABLE=0*/ |
838 | if (hubp != NULL) | 839 | if (hubp != NULL && hubp->funcs->hubp_disable_control) |
839 | hubp->funcs->hubp_disable_control(hubp, true); | 840 | hubp->funcs->hubp_disable_control(hubp, true); |
840 | } | 841 | } |
841 | } | 842 | } |
@@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
847 | if (pipe_ctx != NULL) { | 848 | if (pipe_ctx != NULL) { |
848 | hubp = pipe_ctx->plane_res.hubp; | 849 | hubp = pipe_ctx->plane_res.hubp; |
849 | /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ | 850 | /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ |
850 | if (hubp != NULL) | 851 | if (hubp != NULL && hubp->funcs->set_hubp_blank_en) |
851 | hubp->funcs->set_hubp_blank_en(hubp, true); | 852 | hubp->funcs->set_hubp_blank_en(hubp, true); |
852 | } | 853 | } |
853 | } | 854 | } |
@@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc) | |||
1126 | 1127 | ||
1127 | enable_power_gating_plane(dc->hwseq, true); | 1128 | enable_power_gating_plane(dc->hwseq, true); |
1128 | 1129 | ||
1129 | memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks)); | 1130 | memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks)); |
1130 | } | 1131 | } |
1131 | 1132 | ||
1132 | static void reset_hw_ctx_wrap( | 1133 | static void reset_hw_ctx_wrap( |
@@ -1603,7 +1604,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, | |||
1603 | } | 1604 | } |
1604 | 1605 | ||
1605 | 1606 | ||
1606 | static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) | 1607 | void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) |
1607 | { | 1608 | { |
1608 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); | 1609 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); |
1609 | struct vm_system_aperture_param apt = { {{ 0 } } }; | 1610 | struct vm_system_aperture_param apt = { {{ 0 } } }; |
@@ -1703,33 +1704,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx) | |||
1703 | pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); | 1704 | pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); |
1704 | } | 1705 | } |
1705 | 1706 | ||
1706 | 1707 | static void dcn10_program_output_csc(struct dc *dc, | |
1707 | static void program_csc_matrix(struct pipe_ctx *pipe_ctx, | 1708 | struct pipe_ctx *pipe_ctx, |
1708 | enum dc_color_space colorspace, | 1709 | enum dc_color_space colorspace, |
1709 | uint16_t *matrix) | 1710 | uint16_t *matrix, |
1711 | int opp_id) | ||
1710 | { | 1712 | { |
1711 | if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { | 1713 | if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { |
1712 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) | 1714 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) |
1713 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); | 1715 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); |
1714 | } else { | 1716 | } else { |
1715 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL) | 1717 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL) |
1716 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace); | 1718 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace); |
1717 | } | 1719 | } |
1718 | } | 1720 | } |
1719 | 1721 | ||
1720 | static void dcn10_program_output_csc(struct dc *dc, | 1722 | bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) |
1721 | struct pipe_ctx *pipe_ctx, | ||
1722 | enum dc_color_space colorspace, | ||
1723 | uint16_t *matrix, | ||
1724 | int opp_id) | ||
1725 | { | ||
1726 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) | ||
1727 | program_csc_matrix(pipe_ctx, | ||
1728 | colorspace, | ||
1729 | matrix); | ||
1730 | } | ||
1731 | |||
1732 | static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | ||
1733 | { | 1723 | { |
1734 | if (pipe_ctx->plane_state->visible) | 1724 | if (pipe_ctx->plane_state->visible) |
1735 | return true; | 1725 | return true; |
@@ -1738,7 +1728,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | |||
1738 | return false; | 1728 | return false; |
1739 | } | 1729 | } |
1740 | 1730 | ||
1741 | static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | 1731 | bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) |
1742 | { | 1732 | { |
1743 | if (pipe_ctx->plane_state->visible) | 1733 | if (pipe_ctx->plane_state->visible) |
1744 | return true; | 1734 | return true; |
@@ -1747,7 +1737,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | |||
1747 | return false; | 1737 | return false; |
1748 | } | 1738 | } |
1749 | 1739 | ||
1750 | static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | 1740 | bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) |
1751 | { | 1741 | { |
1752 | if (pipe_ctx->plane_state->visible) | 1742 | if (pipe_ctx->plane_state->visible) |
1753 | return true; | 1743 | return true; |
@@ -1943,10 +1933,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) | |||
1943 | struct mpc *mpc = dc->res_pool->mpc; | 1933 | struct mpc *mpc = dc->res_pool->mpc; |
1944 | struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); | 1934 | struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); |
1945 | 1935 | ||
1946 | |||
1947 | |||
1948 | /* TODO: proper fix once fpga works */ | ||
1949 | |||
1950 | if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { | 1936 | if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { |
1951 | dcn10_get_hdr_visual_confirm_color( | 1937 | dcn10_get_hdr_visual_confirm_color( |
1952 | pipe_ctx, &blnd_cfg.black_color); | 1938 | pipe_ctx, &blnd_cfg.black_color); |
@@ -2026,8 +2012,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) | |||
2026 | bool per_pixel_alpha = | 2012 | bool per_pixel_alpha = |
2027 | pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; | 2013 | pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; |
2028 | 2014 | ||
2029 | /* TODO: proper fix once fpga works */ | ||
2030 | |||
2031 | pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha; | 2015 | pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha; |
2032 | pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; | 2016 | pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; |
2033 | /* scaler configuration */ | 2017 | /* scaler configuration */ |
@@ -2035,7 +2019,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) | |||
2035 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); | 2019 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); |
2036 | } | 2020 | } |
2037 | 2021 | ||
2038 | static void update_dchubp_dpp( | 2022 | void update_dchubp_dpp( |
2039 | struct dc *dc, | 2023 | struct dc *dc, |
2040 | struct pipe_ctx *pipe_ctx, | 2024 | struct pipe_ctx *pipe_ctx, |
2041 | struct dc_state *context) | 2025 | struct dc_state *context) |
@@ -2052,16 +2036,22 @@ static void update_dchubp_dpp( | |||
2052 | */ | 2036 | */ |
2053 | if (plane_state->update_flags.bits.full_update) { | 2037 | if (plane_state->update_flags.bits.full_update) { |
2054 | bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <= | 2038 | bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <= |
2055 | dc->res_pool->dccg->clks.dispclk_khz / 2; | 2039 | dc->res_pool->clk_mgr->clks.dispclk_khz / 2; |
2056 | 2040 | ||
2057 | dpp->funcs->dpp_dppclk_control( | 2041 | dpp->funcs->dpp_dppclk_control( |
2058 | dpp, | 2042 | dpp, |
2059 | should_divided_by_2, | 2043 | should_divided_by_2, |
2060 | true); | 2044 | true); |
2061 | 2045 | ||
2062 | dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ? | 2046 | if (dc->res_pool->dccg) |
2063 | dc->res_pool->dccg->clks.dispclk_khz / 2 : | 2047 | dc->res_pool->dccg->funcs->update_dpp_dto( |
2064 | dc->res_pool->dccg->clks.dispclk_khz; | 2048 | dc->res_pool->dccg, |
2049 | dpp->inst, | ||
2050 | pipe_ctx->plane_res.bw.calc.dppclk_khz); | ||
2051 | else | ||
2052 | dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? | ||
2053 | dc->res_pool->clk_mgr->clks.dispclk_khz / 2 : | ||
2054 | dc->res_pool->clk_mgr->clks.dispclk_khz; | ||
2065 | } | 2055 | } |
2066 | 2056 | ||
2067 | /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG | 2057 | /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG |
@@ -2182,7 +2172,7 @@ static void dcn10_blank_pixel_data( | |||
2182 | } | 2172 | } |
2183 | } | 2173 | } |
2184 | 2174 | ||
2185 | static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) | 2175 | void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) |
2186 | { | 2176 | { |
2187 | struct fixed31_32 multiplier = dc_fixpt_from_fraction( | 2177 | struct fixed31_32 multiplier = dc_fixpt_from_fraction( |
2188 | pipe_ctx->plane_state->sdr_white_level, 80); | 2178 | pipe_ctx->plane_state->sdr_white_level, 80); |
@@ -2257,47 +2247,7 @@ static void program_all_pipe_in_tree( | |||
2257 | } | 2247 | } |
2258 | } | 2248 | } |
2259 | 2249 | ||
2260 | static void dcn10_pplib_apply_display_requirements( | 2250 | struct pipe_ctx *find_top_pipe_for_stream( |
2261 | struct dc *dc, | ||
2262 | struct dc_state *context) | ||
2263 | { | ||
2264 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
2265 | |||
2266 | pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz; | ||
2267 | pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz; | ||
2268 | pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz; | ||
2269 | pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz; | ||
2270 | pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz; | ||
2271 | pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz; | ||
2272 | dce110_fill_display_configs(context, pp_display_cfg); | ||
2273 | |||
2274 | if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( | ||
2275 | struct dm_pp_display_configuration)) != 0) | ||
2276 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
2277 | |||
2278 | dc->prev_display_config = *pp_display_cfg; | ||
2279 | } | ||
2280 | |||
2281 | static void optimize_shared_resources(struct dc *dc) | ||
2282 | { | ||
2283 | if (dc->current_state->stream_count == 0) { | ||
2284 | /* S0i2 message */ | ||
2285 | dcn10_pplib_apply_display_requirements(dc, dc->current_state); | ||
2286 | } | ||
2287 | |||
2288 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) | ||
2289 | dcn_bw_notify_pplib_of_wm_ranges(dc); | ||
2290 | } | ||
2291 | |||
2292 | static void ready_shared_resources(struct dc *dc, struct dc_state *context) | ||
2293 | { | ||
2294 | /* S0i2 message */ | ||
2295 | if (dc->current_state->stream_count == 0 && | ||
2296 | context->stream_count != 0) | ||
2297 | dcn10_pplib_apply_display_requirements(dc, context); | ||
2298 | } | ||
2299 | |||
2300 | static struct pipe_ctx *find_top_pipe_for_stream( | ||
2301 | struct dc *dc, | 2251 | struct dc *dc, |
2302 | struct dc_state *context, | 2252 | struct dc_state *context, |
2303 | const struct dc_stream_state *stream) | 2253 | const struct dc_stream_state *stream) |
@@ -2398,10 +2348,9 @@ static void dcn10_apply_ctx_for_surface( | |||
2398 | hubbub1_wm_change_req_wa(dc->res_pool->hubbub); | 2348 | hubbub1_wm_change_req_wa(dc->res_pool->hubbub); |
2399 | } | 2349 | } |
2400 | 2350 | ||
2401 | static void dcn10_set_bandwidth( | 2351 | static void dcn10_prepare_bandwidth( |
2402 | struct dc *dc, | 2352 | struct dc *dc, |
2403 | struct dc_state *context, | 2353 | struct dc_state *context) |
2404 | bool safe_to_lower) | ||
2405 | { | 2354 | { |
2406 | if (dc->debug.sanity_checks) | 2355 | if (dc->debug.sanity_checks) |
2407 | dcn10_verify_allow_pstate_change_high(dc); | 2356 | dcn10_verify_allow_pstate_change_high(dc); |
@@ -2410,12 +2359,39 @@ static void dcn10_set_bandwidth( | |||
2410 | if (context->stream_count == 0) | 2359 | if (context->stream_count == 0) |
2411 | context->bw.dcn.clk.phyclk_khz = 0; | 2360 | context->bw.dcn.clk.phyclk_khz = 0; |
2412 | 2361 | ||
2413 | dc->res_pool->dccg->funcs->update_clocks( | 2362 | dc->res_pool->clk_mgr->funcs->update_clocks( |
2414 | dc->res_pool->dccg, | 2363 | dc->res_pool->clk_mgr, |
2415 | &context->bw.dcn.clk, | 2364 | context, |
2416 | safe_to_lower); | 2365 | false); |
2366 | } | ||
2417 | 2367 | ||
2418 | dcn10_pplib_apply_display_requirements(dc, context); | 2368 | hubbub1_program_watermarks(dc->res_pool->hubbub, |
2369 | &context->bw.dcn.watermarks, | ||
2370 | dc->res_pool->ref_clock_inKhz / 1000, | ||
2371 | true); | ||
2372 | |||
2373 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) | ||
2374 | dcn_bw_notify_pplib_of_wm_ranges(dc); | ||
2375 | |||
2376 | if (dc->debug.sanity_checks) | ||
2377 | dcn10_verify_allow_pstate_change_high(dc); | ||
2378 | } | ||
2379 | |||
2380 | static void dcn10_optimize_bandwidth( | ||
2381 | struct dc *dc, | ||
2382 | struct dc_state *context) | ||
2383 | { | ||
2384 | if (dc->debug.sanity_checks) | ||
2385 | dcn10_verify_allow_pstate_change_high(dc); | ||
2386 | |||
2387 | if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { | ||
2388 | if (context->stream_count == 0) | ||
2389 | context->bw.dcn.clk.phyclk_khz = 0; | ||
2390 | |||
2391 | dc->res_pool->clk_mgr->funcs->update_clocks( | ||
2392 | dc->res_pool->clk_mgr, | ||
2393 | context, | ||
2394 | true); | ||
2419 | } | 2395 | } |
2420 | 2396 | ||
2421 | hubbub1_program_watermarks(dc->res_pool->hubbub, | 2397 | hubbub1_program_watermarks(dc->res_pool->hubbub, |
@@ -2423,6 +2399,9 @@ static void dcn10_set_bandwidth( | |||
2423 | dc->res_pool->ref_clock_inKhz / 1000, | 2399 | dc->res_pool->ref_clock_inKhz / 1000, |
2424 | true); | 2400 | true); |
2425 | 2401 | ||
2402 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) | ||
2403 | dcn_bw_notify_pplib_of_wm_ranges(dc); | ||
2404 | |||
2426 | if (dc->debug.sanity_checks) | 2405 | if (dc->debug.sanity_checks) |
2427 | dcn10_verify_allow_pstate_change_high(dc); | 2406 | dcn10_verify_allow_pstate_change_high(dc); |
2428 | } | 2407 | } |
@@ -2694,7 +2673,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) | |||
2694 | 2673 | ||
2695 | static const struct hw_sequencer_funcs dcn10_funcs = { | 2674 | static const struct hw_sequencer_funcs dcn10_funcs = { |
2696 | .program_gamut_remap = program_gamut_remap, | 2675 | .program_gamut_remap = program_gamut_remap, |
2697 | .program_csc_matrix = program_csc_matrix, | ||
2698 | .init_hw = dcn10_init_hw, | 2676 | .init_hw = dcn10_init_hw, |
2699 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, | 2677 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, |
2700 | .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, | 2678 | .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, |
@@ -2721,7 +2699,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { | |||
2721 | .disable_plane = dcn10_disable_plane, | 2699 | .disable_plane = dcn10_disable_plane, |
2722 | .blank_pixel_data = dcn10_blank_pixel_data, | 2700 | .blank_pixel_data = dcn10_blank_pixel_data, |
2723 | .pipe_control_lock = dcn10_pipe_control_lock, | 2701 | .pipe_control_lock = dcn10_pipe_control_lock, |
2724 | .set_bandwidth = dcn10_set_bandwidth, | 2702 | .prepare_bandwidth = dcn10_prepare_bandwidth, |
2703 | .optimize_bandwidth = dcn10_optimize_bandwidth, | ||
2725 | .reset_hw_ctx_wrap = reset_hw_ctx_wrap, | 2704 | .reset_hw_ctx_wrap = reset_hw_ctx_wrap, |
2726 | .enable_stream_timing = dcn10_enable_stream_timing, | 2705 | .enable_stream_timing = dcn10_enable_stream_timing, |
2727 | .set_drr = set_drr, | 2706 | .set_drr = set_drr, |
@@ -2732,10 +2711,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = { | |||
2732 | .log_hw_state = dcn10_log_hw_state, | 2711 | .log_hw_state = dcn10_log_hw_state, |
2733 | .get_hw_state = dcn10_get_hw_state, | 2712 | .get_hw_state = dcn10_get_hw_state, |
2734 | .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, | 2713 | .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, |
2735 | .ready_shared_resources = ready_shared_resources, | ||
2736 | .optimize_shared_resources = optimize_shared_resources, | ||
2737 | .pplib_apply_display_requirements = | ||
2738 | dcn10_pplib_apply_display_requirements, | ||
2739 | .edp_backlight_control = hwss_edp_backlight_control, | 2714 | .edp_backlight_control = hwss_edp_backlight_control, |
2740 | .edp_power_control = hwss_edp_power_control, | 2715 | .edp_power_control = hwss_edp_power_control, |
2741 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, | 2716 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 84d461e0ed3e..5e5610c9e600 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | |||
@@ -51,4 +51,24 @@ void dcn10_get_hw_state( | |||
51 | char *pBuf, unsigned int bufSize, | 51 | char *pBuf, unsigned int bufSize, |
52 | unsigned int mask); | 52 | unsigned int mask); |
53 | 53 | ||
54 | bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | ||
55 | |||
56 | bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | ||
57 | |||
58 | bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | ||
59 | |||
60 | void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); | ||
61 | |||
62 | void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); | ||
63 | |||
64 | void update_dchubp_dpp( | ||
65 | struct dc *dc, | ||
66 | struct pipe_ctx *pipe_ctx, | ||
67 | struct dc_state *context); | ||
68 | |||
69 | struct pipe_ctx *find_top_pipe_for_stream( | ||
70 | struct dc *dc, | ||
71 | struct dc_state *context, | ||
72 | const struct dc_stream_state *stream); | ||
73 | |||
54 | #endif /* __DC_HWSS_DCN10_H__ */ | 74 | #endif /* __DC_HWSS_DCN10_H__ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index ba6a8686062f..477ab9222216 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | |||
@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output( | |||
589 | return false; | 589 | return false; |
590 | 590 | ||
591 | /* DCE11 HW does not support 420 */ | 591 | /* DCE11 HW does not support 420 */ |
592 | if (!enc10->base.features.ycbcr420_supported && | 592 | if (!enc10->base.features.hdmi_ycbcr420_supported && |
593 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) | 593 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) |
594 | return false; | 594 | return false; |
595 | 595 | ||
@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output( | |||
606 | const struct dcn10_link_encoder *enc10, | 606 | const struct dcn10_link_encoder *enc10, |
607 | const struct dc_crtc_timing *crtc_timing) | 607 | const struct dc_crtc_timing *crtc_timing) |
608 | { | 608 | { |
609 | if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) | 609 | if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { |
610 | return false; | 610 | if (!enc10->base.features.dp_ycbcr420_supported) |
611 | return false; | ||
612 | } | ||
611 | 613 | ||
612 | return true; | 614 | return true; |
613 | } | 615 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 54626682bab2..7d1f66797cb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | |||
@@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc) | |||
87 | REG_SET(OTG_STEREO_CONTROL, 0, | 87 | REG_SET(OTG_STEREO_CONTROL, 0, |
88 | OTG_STEREO_EN, 0); | 88 | OTG_STEREO_EN, 0); |
89 | 89 | ||
90 | REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0, | 90 | REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0, |
91 | OTG_3D_STRUCTURE_EN, 0, | 91 | OTG_3D_STRUCTURE_EN, 0, |
92 | OTG_3D_STRUCTURE_V_UPDATE_MODE, 0, | ||
93 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); | 92 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); |
94 | } | 93 | } |
95 | 94 | ||
@@ -274,10 +273,12 @@ void optc1_program_timing( | |||
274 | * program the reg for interrupt postition. | 273 | * program the reg for interrupt postition. |
275 | */ | 274 | */ |
276 | vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; | 275 | vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; |
277 | if (vertical_line_start < 0) { | 276 | v_fp2 = 0; |
278 | ASSERT(0); | 277 | if (vertical_line_start < 0) |
278 | v_fp2 = -vertical_line_start; | ||
279 | if (vertical_line_start < 0) | ||
279 | vertical_line_start = 0; | 280 | vertical_line_start = 0; |
280 | } | 281 | |
281 | REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, | 282 | REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, |
282 | OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start); | 283 | OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start); |
283 | 284 | ||
@@ -296,9 +297,6 @@ void optc1_program_timing( | |||
296 | if (patched_crtc_timing.flags.INTERLACE == 1) | 297 | if (patched_crtc_timing.flags.INTERLACE == 1) |
297 | field_num = 1; | 298 | field_num = 1; |
298 | } | 299 | } |
299 | v_fp2 = 0; | ||
300 | if (optc->dlg_otg_param.vstartup_start > asic_blank_end) | ||
301 | v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end; | ||
302 | 300 | ||
303 | /* Interlace */ | 301 | /* Interlace */ |
304 | if (patched_crtc_timing.flags.INTERLACE == 1) { | 302 | if (patched_crtc_timing.flags.INTERLACE == 1) { |
@@ -1155,9 +1153,8 @@ static void optc1_enable_stereo(struct timing_generator *optc, | |||
1155 | OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1); | 1153 | OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1); |
1156 | 1154 | ||
1157 | if (flags->PROGRAM_STEREO) | 1155 | if (flags->PROGRAM_STEREO) |
1158 | REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL, | 1156 | REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL, |
1159 | OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED, | 1157 | OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED, |
1160 | OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED, | ||
1161 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED); | 1158 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED); |
1162 | 1159 | ||
1163 | } | 1160 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index a71453a15ae3..47dbe4bb294a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | |||
@@ -28,23 +28,23 @@ | |||
28 | 28 | ||
29 | #include "resource.h" | 29 | #include "resource.h" |
30 | #include "include/irq_service_interface.h" | 30 | #include "include/irq_service_interface.h" |
31 | #include "dcn10/dcn10_resource.h" | 31 | #include "dcn10_resource.h" |
32 | 32 | ||
33 | #include "dcn10/dcn10_ipp.h" | 33 | #include "dcn10_ipp.h" |
34 | #include "dcn10/dcn10_mpc.h" | 34 | #include "dcn10_mpc.h" |
35 | #include "irq/dcn10/irq_service_dcn10.h" | 35 | #include "irq/dcn10/irq_service_dcn10.h" |
36 | #include "dcn10/dcn10_dpp.h" | 36 | #include "dcn10_dpp.h" |
37 | #include "dcn10_optc.h" | 37 | #include "dcn10_optc.h" |
38 | #include "dcn10/dcn10_hw_sequencer.h" | 38 | #include "dcn10_hw_sequencer.h" |
39 | #include "dce110/dce110_hw_sequencer.h" | 39 | #include "dce110/dce110_hw_sequencer.h" |
40 | #include "dcn10/dcn10_opp.h" | 40 | #include "dcn10_opp.h" |
41 | #include "dcn10/dcn10_link_encoder.h" | 41 | #include "dcn10_link_encoder.h" |
42 | #include "dcn10/dcn10_stream_encoder.h" | 42 | #include "dcn10_stream_encoder.h" |
43 | #include "dce/dce_clocks.h" | 43 | #include "dcn10_clk_mgr.h" |
44 | #include "dce/dce_clock_source.h" | 44 | #include "dce/dce_clock_source.h" |
45 | #include "dce/dce_audio.h" | 45 | #include "dce/dce_audio.h" |
46 | #include "dce/dce_hwseq.h" | 46 | #include "dce/dce_hwseq.h" |
47 | #include "../virtual/virtual_stream_encoder.h" | 47 | #include "virtual/virtual_stream_encoder.h" |
48 | #include "dce110/dce110_resource.h" | 48 | #include "dce110/dce110_resource.h" |
49 | #include "dce112/dce112_resource.h" | 49 | #include "dce112/dce112_resource.h" |
50 | #include "dcn10_hubp.h" | 50 | #include "dcn10_hubp.h" |
@@ -438,6 +438,7 @@ static const struct dcn_optc_mask tg_mask = { | |||
438 | 438 | ||
439 | 439 | ||
440 | static const struct bios_registers bios_regs = { | 440 | static const struct bios_registers bios_regs = { |
441 | NBIO_SR(BIOS_SCRATCH_0), | ||
441 | NBIO_SR(BIOS_SCRATCH_3), | 442 | NBIO_SR(BIOS_SCRATCH_3), |
442 | NBIO_SR(BIOS_SCRATCH_6) | 443 | NBIO_SR(BIOS_SCRATCH_6) |
443 | }; | 444 | }; |
@@ -719,7 +720,8 @@ static struct timing_generator *dcn10_timing_generator_create( | |||
719 | static const struct encoder_feature_support link_enc_feature = { | 720 | static const struct encoder_feature_support link_enc_feature = { |
720 | .max_hdmi_deep_color = COLOR_DEPTH_121212, | 721 | .max_hdmi_deep_color = COLOR_DEPTH_121212, |
721 | .max_hdmi_pixel_clock = 600000, | 722 | .max_hdmi_pixel_clock = 600000, |
722 | .ycbcr420_supported = true, | 723 | .hdmi_ycbcr420_supported = true, |
724 | .dp_ycbcr420_supported = false, | ||
723 | .flags.bits.IS_HBR2_CAPABLE = true, | 725 | .flags.bits.IS_HBR2_CAPABLE = true, |
724 | .flags.bits.IS_HBR3_CAPABLE = true, | 726 | .flags.bits.IS_HBR3_CAPABLE = true, |
725 | .flags.bits.IS_TPS3_CAPABLE = true, | 727 | .flags.bits.IS_TPS3_CAPABLE = true, |
@@ -949,8 +951,8 @@ static void destruct(struct dcn10_resource_pool *pool) | |||
949 | if (pool->base.dmcu != NULL) | 951 | if (pool->base.dmcu != NULL) |
950 | dce_dmcu_destroy(&pool->base.dmcu); | 952 | dce_dmcu_destroy(&pool->base.dmcu); |
951 | 953 | ||
952 | if (pool->base.dccg != NULL) | 954 | if (pool->base.clk_mgr != NULL) |
953 | dce_dccg_destroy(&pool->base.dccg); | 955 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
954 | 956 | ||
955 | kfree(pool->base.pp_smu); | 957 | kfree(pool->base.pp_smu); |
956 | } | 958 | } |
@@ -1276,8 +1278,8 @@ static bool construct( | |||
1276 | } | 1278 | } |
1277 | } | 1279 | } |
1278 | 1280 | ||
1279 | pool->base.dccg = dcn1_dccg_create(ctx); | 1281 | pool->base.clk_mgr = dcn1_clk_mgr_create(ctx); |
1280 | if (pool->base.dccg == NULL) { | 1282 | if (pool->base.clk_mgr == NULL) { |
1281 | dm_error("DC: failed to create display clock!\n"); | 1283 | dm_error("DC: failed to create display clock!\n"); |
1282 | BREAK_TO_DEBUGGER(); | 1284 | BREAK_TO_DEBUGGER(); |
1283 | goto fail; | 1285 | goto fail; |
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index f2ea8452d48f..beb08fd12b1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | |||
@@ -55,10 +55,10 @@ struct pp_smu { | |||
55 | 55 | ||
56 | struct pp_smu_wm_set_range { | 56 | struct pp_smu_wm_set_range { |
57 | unsigned int wm_inst; | 57 | unsigned int wm_inst; |
58 | uint32_t min_fill_clk_khz; | 58 | uint32_t min_fill_clk_mhz; |
59 | uint32_t max_fill_clk_khz; | 59 | uint32_t max_fill_clk_mhz; |
60 | uint32_t min_drain_clk_khz; | 60 | uint32_t min_drain_clk_mhz; |
61 | uint32_t max_drain_clk_khz; | 61 | uint32_t max_drain_clk_mhz; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | #define MAX_WATERMARK_SETS 4 | 64 | #define MAX_WATERMARK_SETS 4 |
@@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv { | |||
77 | */ | 77 | */ |
78 | unsigned int display_count; | 78 | unsigned int display_count; |
79 | 79 | ||
80 | /* PPSMC_MSG_SetHardMinFclkByFreq: khz | 80 | /* PPSMC_MSG_SetHardMinFclkByFreq: mhz |
81 | * FCLK will vary with DPM, but never below requested hard min | 81 | * FCLK will vary with DPM, but never below requested hard min |
82 | */ | 82 | */ |
83 | unsigned int hard_min_fclk_khz; | 83 | unsigned int hard_min_fclk_mhz; |
84 | 84 | ||
85 | /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz | 85 | /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz |
86 | * fixed clock at requested freq, either from FCH bypass or DFS | 86 | * fixed clock at requested freq, either from FCH bypass or DFS |
87 | */ | 87 | */ |
88 | unsigned int hard_min_dcefclk_khz; | 88 | unsigned int hard_min_dcefclk_mhz; |
89 | 89 | ||
90 | /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz | 90 | /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz |
91 | * when DF is in cstate, dcf clock is further divided down | 91 | * when DF is in cstate, dcf clock is further divided down |
@@ -103,13 +103,19 @@ struct pp_smu_funcs_rv { | |||
103 | void (*set_display_count)(struct pp_smu *pp, int count); | 103 | void (*set_display_count)(struct pp_smu *pp, int count); |
104 | 104 | ||
105 | /* which SMU message? are reader and writer WM separate SMU msg? */ | 105 | /* which SMU message? are reader and writer WM separate SMU msg? */ |
106 | /* | ||
107 | * PPSMC_MSG_SetDriverDramAddrHigh | ||
108 | * PPSMC_MSG_SetDriverDramAddrLow | ||
109 | * PPSMC_MSG_TransferTableDram2Smu | ||
110 | * | ||
111 | * */ | ||
106 | void (*set_wm_ranges)(struct pp_smu *pp, | 112 | void (*set_wm_ranges)(struct pp_smu *pp, |
107 | struct pp_smu_wm_range_sets *ranges); | 113 | struct pp_smu_wm_range_sets *ranges); |
108 | 114 | ||
109 | /* PPSMC_MSG_SetHardMinDcfclkByFreq | 115 | /* PPSMC_MSG_SetHardMinDcfclkByFreq |
110 | * fixed clock at requested freq, either from FCH bypass or DFS | 116 | * fixed clock at requested freq, either from FCH bypass or DFS |
111 | */ | 117 | */ |
112 | void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz); | 118 | void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz); |
113 | 119 | ||
114 | /* PPSMC_MSG_SetMinDeepSleepDcfclk | 120 | /* PPSMC_MSG_SetMinDeepSleepDcfclk |
115 | * when DF is in cstate, dcf clock is further divided down | 121 | * when DF is in cstate, dcf clock is further divided down |
@@ -120,12 +126,12 @@ struct pp_smu_funcs_rv { | |||
120 | /* PPSMC_MSG_SetHardMinFclkByFreq | 126 | /* PPSMC_MSG_SetHardMinFclkByFreq |
121 | * FCLK will vary with DPM, but never below requested hard min | 127 | * FCLK will vary with DPM, but never below requested hard min |
122 | */ | 128 | */ |
123 | void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz); | 129 | void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz); |
124 | 130 | ||
125 | /* PPSMC_MSG_SetHardMinSocclkByFreq | 131 | /* PPSMC_MSG_SetHardMinSocclkByFreq |
126 | * Needed for DWB support | 132 | * Needed for DWB support |
127 | */ | 133 | */ |
128 | void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz); | 134 | void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz); |
129 | 135 | ||
130 | /* PME w/a */ | 136 | /* PME w/a */ |
131 | void (*set_pme_wa_enable)(struct pp_smu *pp); | 137 | void (*set_pme_wa_enable)(struct pp_smu *pp); |
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index 2b83f922ac02..1af8c777b3ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h | |||
@@ -208,22 +208,20 @@ struct dm_bl_data_point { | |||
208 | /* Brightness level as effective value in range 0-255, | 208 | /* Brightness level as effective value in range 0-255, |
209 | * corresponding to above percentage | 209 | * corresponding to above percentage |
210 | */ | 210 | */ |
211 | uint8_t signalLevel; | 211 | uint8_t signal_level; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | /* Total size of the structure should not exceed 256 bytes */ | 214 | /* Total size of the structure should not exceed 256 bytes */ |
215 | struct dm_acpi_atif_backlight_caps { | 215 | struct dm_acpi_atif_backlight_caps { |
216 | |||
217 | |||
218 | uint16_t size; /* Bytes 0-1 (2 bytes) */ | 216 | uint16_t size; /* Bytes 0-1 (2 bytes) */ |
219 | uint16_t flags; /* Byted 2-3 (2 bytes) */ | 217 | uint16_t flags; /* Byted 2-3 (2 bytes) */ |
220 | uint8_t errorCode; /* Byte 4 */ | 218 | uint8_t error_code; /* Byte 4 */ |
221 | uint8_t acLevelPercentage; /* Byte 5 */ | 219 | uint8_t ac_level_percentage; /* Byte 5 */ |
222 | uint8_t dcLevelPercentage; /* Byte 6 */ | 220 | uint8_t dc_level_percentage; /* Byte 6 */ |
223 | uint8_t minInputSignal; /* Byte 7 */ | 221 | uint8_t min_input_signal; /* Byte 7 */ |
224 | uint8_t maxInputSignal; /* Byte 8 */ | 222 | uint8_t max_input_signal; /* Byte 8 */ |
225 | uint8_t numOfDataPoints; /* Byte 9 */ | 223 | uint8_t num_data_points; /* Byte 9 */ |
226 | struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/ | 224 | struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/ |
227 | }; | 225 | }; |
228 | 226 | ||
229 | enum dm_acpi_display_type { | 227 | enum dm_acpi_display_type { |
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index cbafce649e33..5dd04520ceca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | |||
@@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st { | |||
113 | int use_urgent_burst_bw; | 113 | int use_urgent_burst_bw; |
114 | double max_hscl_ratio; | 114 | double max_hscl_ratio; |
115 | double max_vscl_ratio; | 115 | double max_vscl_ratio; |
116 | struct _vcs_dpi_voltage_scaling_st clock_limits[7]; | 116 | unsigned int num_states; |
117 | struct _vcs_dpi_voltage_scaling_st clock_limits[8]; | ||
117 | }; | 118 | }; |
118 | 119 | ||
119 | struct _vcs_dpi_ip_params_st { | 120 | struct _vcs_dpi_ip_params_st { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h index 39ee8eba3c31..d1656c9d50df 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h +++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h | |||
@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw | |||
126 | static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2) | 126 | static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2) |
127 | { | 127 | { |
128 | struct bw_fixed res; | 128 | struct bw_fixed res; |
129 | div64_u64_rem(arg1.value, arg2.value, &res.value); | 129 | div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value); |
130 | return res; | 130 | return res; |
131 | } | 131 | } |
132 | 132 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c1976c175b57..e3ee96afa60e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h | |||
@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option); | |||
82 | 82 | ||
83 | void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); | 83 | void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); |
84 | /********** DAL Core*********************/ | 84 | /********** DAL Core*********************/ |
85 | #include "display_clock.h" | 85 | #include "hw/clk_mgr.h" |
86 | #include "transform.h" | 86 | #include "transform.h" |
87 | #include "dpp.h" | 87 | #include "dpp.h" |
88 | 88 | ||
@@ -169,6 +169,7 @@ struct resource_pool { | |||
169 | unsigned int audio_count; | 169 | unsigned int audio_count; |
170 | struct audio_support audio_support; | 170 | struct audio_support audio_support; |
171 | 171 | ||
172 | struct clk_mgr *clk_mgr; | ||
172 | struct dccg *dccg; | 173 | struct dccg *dccg; |
173 | struct irq_service *irqs; | 174 | struct irq_service *irqs; |
174 | 175 | ||
@@ -287,7 +288,7 @@ struct dc_state { | |||
287 | struct dcn_bw_internal_vars dcn_bw_vars; | 288 | struct dcn_bw_internal_vars dcn_bw_vars; |
288 | #endif | 289 | #endif |
289 | 290 | ||
290 | struct dccg *dis_clk; | 291 | struct clk_mgr *dccg; |
291 | 292 | ||
292 | struct kref refcount; | 293 | struct kref refcount; |
293 | }; | 294 | }; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index e688eb9b975c..ece954a40a8e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | |||
@@ -31,8 +31,8 @@ | |||
31 | #define __DCN_CALCS_H__ | 31 | #define __DCN_CALCS_H__ |
32 | 32 | ||
33 | #include "bw_fixed.h" | 33 | #include "bw_fixed.h" |
34 | #include "display_clock.h" | ||
35 | #include "../dml/display_mode_lib.h" | 34 | #include "../dml/display_mode_lib.h" |
35 | #include "hw/clk_mgr.h" | ||
36 | 36 | ||
37 | struct dc; | 37 | struct dc; |
38 | struct dc_state; | 38 | struct dc_state; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index a83a48494613..abc961c0906e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h | |||
@@ -47,12 +47,18 @@ struct abm_funcs { | |||
47 | bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); | 47 | bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); |
48 | bool (*set_abm_immediate_disable)(struct abm *abm); | 48 | bool (*set_abm_immediate_disable)(struct abm *abm); |
49 | bool (*init_backlight)(struct abm *abm); | 49 | bool (*init_backlight)(struct abm *abm); |
50 | bool (*set_backlight_level)(struct abm *abm, | 50 | |
51 | unsigned int backlight_level, | 51 | /* backlight_pwm_u16_16 is unsigned 32 bit, |
52 | * 16 bit integer + 16 fractional, where 1.0 is max backlight value. | ||
53 | */ | ||
54 | bool (*set_backlight_level_pwm)(struct abm *abm, | ||
55 | unsigned int backlight_pwm_u16_16, | ||
52 | unsigned int frame_ramp, | 56 | unsigned int frame_ramp, |
53 | unsigned int controller_id, | 57 | unsigned int controller_id, |
54 | bool use_smooth_brightness); | 58 | bool use_smooth_brightness); |
55 | unsigned int (*get_current_backlight_8_bit)(struct abm *abm); | 59 | |
60 | unsigned int (*get_current_backlight)(struct abm *abm); | ||
61 | unsigned int (*get_target_backlight)(struct abm *abm); | ||
56 | }; | 62 | }; |
57 | 63 | ||
58 | #endif | 64 | #endif |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 689faa16c0ae..23a4b18e5fee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | |||
@@ -23,41 +23,25 @@ | |||
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #ifndef __DISPLAY_CLOCK_H__ | 26 | #ifndef __DAL_CLK_MGR_H__ |
27 | #define __DISPLAY_CLOCK_H__ | 27 | #define __DAL_CLK_MGR_H__ |
28 | 28 | ||
29 | #include "dm_services_types.h" | 29 | #include "dm_services_types.h" |
30 | #include "dc.h" | 30 | #include "dc.h" |
31 | 31 | ||
32 | /* Structure containing all state-dependent clocks | 32 | struct clk_mgr { |
33 | * (dependent on "enum clocks_state") */ | ||
34 | struct state_dependent_clocks { | ||
35 | int display_clk_khz; | ||
36 | int pixel_clk_khz; | ||
37 | }; | ||
38 | |||
39 | struct dccg { | ||
40 | struct dc_context *ctx; | 33 | struct dc_context *ctx; |
41 | const struct display_clock_funcs *funcs; | 34 | const struct clk_mgr_funcs *funcs; |
42 | 35 | ||
43 | enum dm_pp_clocks_state max_clks_state; | ||
44 | enum dm_pp_clocks_state cur_min_clks_state; | ||
45 | struct dc_clocks clks; | 36 | struct dc_clocks clks; |
46 | }; | 37 | }; |
47 | 38 | ||
48 | struct display_clock_funcs { | 39 | struct clk_mgr_funcs { |
49 | void (*update_clocks)(struct dccg *dccg, | 40 | void (*update_clocks)(struct clk_mgr *clk_mgr, |
50 | struct dc_clocks *new_clocks, | 41 | struct dc_state *context, |
51 | bool safe_to_lower); | 42 | bool safe_to_lower); |
52 | int (*set_dispclk)(struct dccg *dccg, | ||
53 | int requested_clock_khz); | ||
54 | |||
55 | int (*get_dp_ref_clk_frequency)(struct dccg *dccg); | ||
56 | 43 | ||
57 | bool (*update_dfs_bypass)(struct dccg *dccg, | 44 | int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); |
58 | struct dc *dc, | ||
59 | struct dc_state *context, | ||
60 | int requested_clock_khz); | ||
61 | }; | 45 | }; |
62 | 46 | ||
63 | #endif /* __DISPLAY_CLOCK_H__ */ | 47 | #endif /* __DAL_CLK_MGR_H__ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h new file mode 100644 index 000000000000..95a56d012626 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #ifndef __DAL_DCCG_H__ | ||
27 | #define __DAL_DCCG_H__ | ||
28 | |||
29 | #include "dc_types.h" | ||
30 | |||
31 | struct dccg { | ||
32 | struct dc_context *ctx; | ||
33 | const struct dccg_funcs *funcs; | ||
34 | |||
35 | int ref_dppclk; | ||
36 | }; | ||
37 | |||
38 | struct dccg_funcs { | ||
39 | void (*update_dpp_dto)(struct dccg *dccg, | ||
40 | int dpp_inst, | ||
41 | int req_dppclk); | ||
42 | }; | ||
43 | |||
44 | #endif //__DAL_DCCG_H__ | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index cf7433ebf91a..da85537a4488 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | |||
@@ -53,6 +53,12 @@ struct curve_points { | |||
53 | uint32_t custom_float_slope; | 53 | uint32_t custom_float_slope; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct curve_points3 { | ||
57 | struct curve_points red; | ||
58 | struct curve_points green; | ||
59 | struct curve_points blue; | ||
60 | }; | ||
61 | |||
56 | struct pwl_result_data { | 62 | struct pwl_result_data { |
57 | struct fixed31_32 red; | 63 | struct fixed31_32 red; |
58 | struct fixed31_32 green; | 64 | struct fixed31_32 green; |
@@ -71,9 +77,17 @@ struct pwl_result_data { | |||
71 | uint32_t delta_blue_reg; | 77 | uint32_t delta_blue_reg; |
72 | }; | 78 | }; |
73 | 79 | ||
80 | /* arr_curve_points - regamma regions/segments specification | ||
81 | * arr_points - beginning and end point specified separately (only one on DCE) | ||
82 | * corner_points - beginning and end point for all 3 colors (DCN) | ||
83 | * rgb_resulted - final curve | ||
84 | */ | ||
74 | struct pwl_params { | 85 | struct pwl_params { |
75 | struct gamma_curve arr_curve_points[34]; | 86 | struct gamma_curve arr_curve_points[34]; |
76 | struct curve_points arr_points[2]; | 87 | union { |
88 | struct curve_points arr_points[2]; | ||
89 | struct curve_points3 corner_points[2]; | ||
90 | }; | ||
77 | struct pwl_result_data rgb_resulted[256 + 3]; | 91 | struct pwl_result_data rgb_resulted[256 + 3]; |
78 | uint32_t hw_points_num; | 92 | uint32_t hw_points_num; |
79 | }; | 93 | }; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index e28e9770e0a3..c20fdcaac53b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h | |||
@@ -65,7 +65,8 @@ struct encoder_feature_support { | |||
65 | 65 | ||
66 | enum dc_color_depth max_hdmi_deep_color; | 66 | enum dc_color_depth max_hdmi_deep_color; |
67 | unsigned int max_hdmi_pixel_clock; | 67 | unsigned int max_hdmi_pixel_clock; |
68 | bool ycbcr420_supported; | 68 | bool hdmi_ycbcr420_supported; |
69 | bool dp_ycbcr420_supported; | ||
69 | }; | 70 | }; |
70 | 71 | ||
71 | union dpcd_psr_configuration { | 72 | union dpcd_psr_configuration { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index da89c2edb07c..06df02ddff6a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | |||
@@ -31,7 +31,7 @@ | |||
31 | #include "dml/display_mode_structs.h" | 31 | #include "dml/display_mode_structs.h" |
32 | 32 | ||
33 | struct dchub_init_data; | 33 | struct dchub_init_data; |
34 | struct cstate_pstate_watermarks_st { | 34 | struct cstate_pstate_watermarks_st1 { |
35 | uint32_t cstate_exit_ns; | 35 | uint32_t cstate_exit_ns; |
36 | uint32_t cstate_enter_plus_exit_ns; | 36 | uint32_t cstate_enter_plus_exit_ns; |
37 | uint32_t pstate_change_ns; | 37 | uint32_t pstate_change_ns; |
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st { | |||
40 | struct dcn_watermarks { | 40 | struct dcn_watermarks { |
41 | uint32_t pte_meta_urgent_ns; | 41 | uint32_t pte_meta_urgent_ns; |
42 | uint32_t urgent_ns; | 42 | uint32_t urgent_ns; |
43 | struct cstate_pstate_watermarks_st cstate_pstate; | 43 | struct cstate_pstate_watermarks_st1 cstate_pstate; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct dcn_watermark_set { | 46 | struct dcn_watermark_set { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 26f29d5da3d8..e9b702ce02dd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | |||
@@ -32,8 +32,6 @@ | |||
32 | #include "inc/hw/link_encoder.h" | 32 | #include "inc/hw/link_encoder.h" |
33 | #include "core_status.h" | 33 | #include "core_status.h" |
34 | 34 | ||
35 | #define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF | ||
36 | |||
37 | enum pipe_gating_control { | 35 | enum pipe_gating_control { |
38 | PIPE_GATING_CONTROL_DISABLE = 0, | 36 | PIPE_GATING_CONTROL_DISABLE = 0, |
39 | PIPE_GATING_CONTROL_ENABLE, | 37 | PIPE_GATING_CONTROL_ENABLE, |
@@ -87,11 +85,6 @@ struct hw_sequencer_funcs { | |||
87 | void (*program_gamut_remap)( | 85 | void (*program_gamut_remap)( |
88 | struct pipe_ctx *pipe_ctx); | 86 | struct pipe_ctx *pipe_ctx); |
89 | 87 | ||
90 | void (*program_csc_matrix)( | ||
91 | struct pipe_ctx *pipe_ctx, | ||
92 | enum dc_color_space colorspace, | ||
93 | uint16_t *matrix); | ||
94 | |||
95 | void (*program_output_csc)(struct dc *dc, | 88 | void (*program_output_csc)(struct dc *dc, |
96 | struct pipe_ctx *pipe_ctx, | 89 | struct pipe_ctx *pipe_ctx, |
97 | enum dc_color_space colorspace, | 90 | enum dc_color_space colorspace, |
@@ -177,10 +170,12 @@ struct hw_sequencer_funcs { | |||
177 | struct pipe_ctx *pipe_ctx, | 170 | struct pipe_ctx *pipe_ctx, |
178 | bool blank); | 171 | bool blank); |
179 | 172 | ||
180 | void (*set_bandwidth)( | 173 | void (*prepare_bandwidth)( |
181 | struct dc *dc, | 174 | struct dc *dc, |
182 | struct dc_state *context, | 175 | struct dc_state *context); |
183 | bool safe_to_lower); | 176 | void (*optimize_bandwidth)( |
177 | struct dc *dc, | ||
178 | struct dc_state *context); | ||
184 | 179 | ||
185 | void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, | 180 | void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, |
186 | int vmin, int vmax); | 181 | int vmin, int vmax); |
@@ -210,11 +205,6 @@ struct hw_sequencer_funcs { | |||
210 | struct resource_pool *res_pool, | 205 | struct resource_pool *res_pool, |
211 | struct pipe_ctx *pipe_ctx); | 206 | struct pipe_ctx *pipe_ctx); |
212 | 207 | ||
213 | void (*ready_shared_resources)(struct dc *dc, struct dc_state *context); | ||
214 | void (*optimize_shared_resources)(struct dc *dc); | ||
215 | void (*pplib_apply_display_requirements)( | ||
216 | struct dc *dc, | ||
217 | struct dc_state *context); | ||
218 | void (*edp_power_control)( | 208 | void (*edp_power_control)( |
219 | struct dc_link *link, | 209 | struct dc_link *link, |
220 | bool enable); | 210 | bool enable); |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 33b99e3ab10d..0086a2f1d21a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h | |||
@@ -30,9 +30,6 @@ | |||
30 | #include "dal_asic_id.h" | 30 | #include "dal_asic_id.h" |
31 | #include "dm_pp_smu.h" | 31 | #include "dm_pp_smu.h" |
32 | 32 | ||
33 | /* TODO unhardcode, 4 for CZ*/ | ||
34 | #define MEMORY_TYPE_MULTIPLIER 4 | ||
35 | |||
36 | enum dce_version resource_parse_asic_id( | 33 | enum dce_version resource_parse_asic_id( |
37 | struct hw_asic_id asic_id); | 34 | struct hw_asic_id asic_id); |
38 | 35 | ||
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index cdcefd087487..7480f072c375 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c | |||
@@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space( | |||
306 | a1); | 306 | a1); |
307 | } | 307 | } |
308 | 308 | ||
309 | static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) | ||
310 | { | ||
311 | struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); | ||
312 | |||
313 | return translate_from_linear_space(arg, | ||
314 | dc_fixpt_zero, | ||
315 | dc_fixpt_zero, | ||
316 | dc_fixpt_zero, | ||
317 | dc_fixpt_zero, | ||
318 | gamma); | ||
319 | } | ||
320 | |||
309 | static struct fixed31_32 translate_to_linear_space( | 321 | static struct fixed31_32 translate_to_linear_space( |
310 | struct fixed31_32 arg, | 322 | struct fixed31_32 arg, |
311 | struct fixed31_32 a0, | 323 | struct fixed31_32 a0, |
@@ -709,6 +721,169 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma, | |||
709 | } | 721 | } |
710 | } | 722 | } |
711 | 723 | ||
724 | static void hermite_spline_eetf(struct fixed31_32 input_x, | ||
725 | struct fixed31_32 max_display, | ||
726 | struct fixed31_32 min_display, | ||
727 | struct fixed31_32 max_content, | ||
728 | struct fixed31_32 *out_x) | ||
729 | { | ||
730 | struct fixed31_32 min_lum_pq; | ||
731 | struct fixed31_32 max_lum_pq; | ||
732 | struct fixed31_32 max_content_pq; | ||
733 | struct fixed31_32 ks; | ||
734 | struct fixed31_32 E1; | ||
735 | struct fixed31_32 E2; | ||
736 | struct fixed31_32 E3; | ||
737 | struct fixed31_32 t; | ||
738 | struct fixed31_32 t2; | ||
739 | struct fixed31_32 t3; | ||
740 | struct fixed31_32 two; | ||
741 | struct fixed31_32 three; | ||
742 | struct fixed31_32 temp1; | ||
743 | struct fixed31_32 temp2; | ||
744 | struct fixed31_32 a = dc_fixpt_from_fraction(15, 10); | ||
745 | struct fixed31_32 b = dc_fixpt_from_fraction(5, 10); | ||
746 | struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small | ||
747 | |||
748 | if (dc_fixpt_eq(max_content, dc_fixpt_zero)) { | ||
749 | *out_x = dc_fixpt_zero; | ||
750 | return; | ||
751 | } | ||
752 | |||
753 | compute_pq(input_x, &E1); | ||
754 | compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq); | ||
755 | compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq); | ||
756 | compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird | ||
757 | a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent | ||
758 | ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b | ||
759 | |||
760 | if (dc_fixpt_lt(E1, ks)) | ||
761 | E2 = E1; | ||
762 | else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) { | ||
763 | if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks))) | ||
764 | // t = (E1 - ks) / (1 - ks) | ||
765 | t = dc_fixpt_div(dc_fixpt_sub(E1, ks), | ||
766 | dc_fixpt_sub(dc_fixpt_one, ks)); | ||
767 | else | ||
768 | t = dc_fixpt_zero; | ||
769 | |||
770 | two = dc_fixpt_from_int(2); | ||
771 | three = dc_fixpt_from_int(3); | ||
772 | |||
773 | t2 = dc_fixpt_mul(t, t); | ||
774 | t3 = dc_fixpt_mul(t2, t); | ||
775 | temp1 = dc_fixpt_mul(two, t3); | ||
776 | temp2 = dc_fixpt_mul(three, t2); | ||
777 | |||
778 | // (2t^3 - 3t^2 + 1) * ks | ||
779 | E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one, | ||
780 | dc_fixpt_sub(temp1, temp2))); | ||
781 | |||
782 | // (-2t^3 + 3t^2) * max_lum_pq | ||
783 | E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq, | ||
784 | dc_fixpt_sub(temp2, temp1))); | ||
785 | |||
786 | temp1 = dc_fixpt_mul(two, t2); | ||
787 | temp2 = dc_fixpt_sub(dc_fixpt_one, ks); | ||
788 | |||
789 | // (t^3 - 2t^2 + t) * (1-ks) | ||
790 | E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2, | ||
791 | dc_fixpt_add(t, dc_fixpt_sub(t3, temp1)))); | ||
792 | } else | ||
793 | E2 = dc_fixpt_one; | ||
794 | |||
795 | temp1 = dc_fixpt_sub(dc_fixpt_one, E2); | ||
796 | temp2 = dc_fixpt_mul(temp1, temp1); | ||
797 | temp2 = dc_fixpt_mul(temp2, temp2); | ||
798 | // temp2 = (1-E2)^4 | ||
799 | |||
800 | E3 = dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2)); | ||
801 | compute_de_pq(E3, out_x); | ||
802 | |||
803 | *out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content)); | ||
804 | } | ||
805 | |||
806 | static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, | ||
807 | uint32_t hw_points_num, | ||
808 | const struct hw_x_point *coordinate_x, | ||
809 | const struct freesync_hdr_tf_params *fs_params) | ||
810 | { | ||
811 | uint32_t i; | ||
812 | struct pwl_float_data_ex *rgb = rgb_regamma; | ||
813 | const struct hw_x_point *coord_x = coordinate_x; | ||
814 | struct fixed31_32 scaledX = dc_fixpt_zero; | ||
815 | struct fixed31_32 scaledX1 = dc_fixpt_zero; | ||
816 | struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display); | ||
817 | struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); | ||
818 | struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content); | ||
819 | struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); | ||
820 | struct fixed31_32 clip = dc_fixpt_one; | ||
821 | struct fixed31_32 output; | ||
822 | bool use_eetf = false; | ||
823 | bool is_clipped = false; | ||
824 | struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); | ||
825 | |||
826 | if (fs_params == NULL || fs_params->max_content == 0 || | ||
827 | fs_params->max_display == 0) | ||
828 | return false; | ||
829 | |||
830 | if (fs_params->min_display > 1000) // cap at 0.1 at the bottom | ||
831 | min_display = dc_fixpt_from_fraction(1, 10); | ||
832 | if (fs_params->max_display < 100) // cap at 100 at the top | ||
833 | max_display = dc_fixpt_from_int(100); | ||
834 | |||
835 | if (fs_params->min_content < fs_params->min_display) | ||
836 | use_eetf = true; | ||
837 | else | ||
838 | min_content = min_display; | ||
839 | |||
840 | if (fs_params->max_content > fs_params->max_display) | ||
841 | use_eetf = true; | ||
842 | else | ||
843 | max_content = max_display; | ||
844 | |||
845 | rgb += 32; // first 32 points have problems with fixed point, too small | ||
846 | coord_x += 32; | ||
847 | for (i = 32; i <= hw_points_num; i++) { | ||
848 | if (!is_clipped) { | ||
849 | if (use_eetf) { | ||
850 | /*max content is equal 1 */ | ||
851 | scaledX1 = dc_fixpt_div(coord_x->x, | ||
852 | dc_fixpt_div(max_content, sdr_white_level)); | ||
853 | hermite_spline_eetf(scaledX1, max_display, min_display, | ||
854 | max_content, &scaledX); | ||
855 | } else | ||
856 | scaledX = dc_fixpt_div(coord_x->x, | ||
857 | dc_fixpt_div(max_display, sdr_white_level)); | ||
858 | |||
859 | if (dc_fixpt_lt(scaledX, clip)) { | ||
860 | if (dc_fixpt_lt(scaledX, dc_fixpt_zero)) | ||
861 | output = dc_fixpt_zero; | ||
862 | else | ||
863 | output = calculate_gamma22(scaledX); | ||
864 | |||
865 | rgb->r = output; | ||
866 | rgb->g = output; | ||
867 | rgb->b = output; | ||
868 | } else { | ||
869 | is_clipped = true; | ||
870 | rgb->r = clip; | ||
871 | rgb->g = clip; | ||
872 | rgb->b = clip; | ||
873 | } | ||
874 | } else { | ||
875 | rgb->r = clip; | ||
876 | rgb->g = clip; | ||
877 | rgb->b = clip; | ||
878 | } | ||
879 | |||
880 | ++coord_x; | ||
881 | ++rgb; | ||
882 | } | ||
883 | |||
884 | return true; | ||
885 | } | ||
886 | |||
712 | static void build_degamma(struct pwl_float_data_ex *curve, | 887 | static void build_degamma(struct pwl_float_data_ex *curve, |
713 | uint32_t hw_points_num, | 888 | uint32_t hw_points_num, |
714 | const struct hw_x_point *coordinate_x, bool is_2_4) | 889 | const struct hw_x_point *coordinate_x, bool is_2_4) |
@@ -1356,7 +1531,8 @@ static bool map_regamma_hw_to_x_user( | |||
1356 | #define _EXTRA_POINTS 3 | 1531 | #define _EXTRA_POINTS 3 |
1357 | 1532 | ||
1358 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | 1533 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, |
1359 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed) | 1534 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, |
1535 | const struct freesync_hdr_tf_params *fs_params) | ||
1360 | { | 1536 | { |
1361 | struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; | 1537 | struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; |
1362 | struct dividers dividers; | 1538 | struct dividers dividers; |
@@ -1374,7 +1550,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | |||
1374 | /* we can use hardcoded curve for plain SRGB TF */ | 1550 | /* we can use hardcoded curve for plain SRGB TF */ |
1375 | if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && | 1551 | if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && |
1376 | output_tf->tf == TRANSFER_FUNCTION_SRGB && | 1552 | output_tf->tf == TRANSFER_FUNCTION_SRGB && |
1377 | (!mapUserRamp && ramp->type == GAMMA_RGB_256)) | 1553 | (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))) |
1378 | return true; | 1554 | return true; |
1379 | 1555 | ||
1380 | output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; | 1556 | output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; |
@@ -1424,6 +1600,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | |||
1424 | MAX_HW_POINTS, | 1600 | MAX_HW_POINTS, |
1425 | coordinates_x, | 1601 | coordinates_x, |
1426 | output_tf->sdr_ref_white_level); | 1602 | output_tf->sdr_ref_white_level); |
1603 | } else if (tf == TRANSFER_FUNCTION_GAMMA22 && | ||
1604 | fs_params != NULL) { | ||
1605 | build_freesync_hdr(rgb_regamma, | ||
1606 | MAX_HW_POINTS, | ||
1607 | coordinates_x, | ||
1608 | fs_params); | ||
1427 | } else { | 1609 | } else { |
1428 | tf_pts->end_exponent = 0; | 1610 | tf_pts->end_exponent = 0; |
1429 | tf_pts->x_point_at_y1_red = 1; | 1611 | tf_pts->x_point_at_y1_red = 1; |
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 63ccb9c91224..a6e164df090a 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h | |||
@@ -73,12 +73,21 @@ struct regamma_lut { | |||
73 | }; | 73 | }; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct freesync_hdr_tf_params { | ||
77 | unsigned int sdr_white_level; | ||
78 | unsigned int min_content; // luminance in 1/10000 nits | ||
79 | unsigned int max_content; // luminance in nits | ||
80 | unsigned int min_display; // luminance in 1/10000 nits | ||
81 | unsigned int max_display; // luminance in nits | ||
82 | }; | ||
83 | |||
76 | void setup_x_points_distribution(void); | 84 | void setup_x_points_distribution(void); |
77 | void precompute_pq(void); | 85 | void precompute_pq(void); |
78 | void precompute_de_pq(void); | 86 | void precompute_de_pq(void); |
79 | 87 | ||
80 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | 88 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, |
81 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed); | 89 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, |
90 | const struct freesync_hdr_tf_params *fs_params); | ||
82 | 91 | ||
83 | bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, | 92 | bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, |
84 | const struct dc_gamma *ramp, bool mapUserRamp); | 93 | const struct dc_gamma *ramp, bool mapUserRamp); |
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 4018c7180d00..620a171620ee 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #define RENDER_TIMES_MAX_COUNT 10 | 37 | #define RENDER_TIMES_MAX_COUNT 10 |
38 | /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ | 38 | /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ |
39 | #define BTR_EXIT_MARGIN 2000 | 39 | #define BTR_EXIT_MARGIN 2000 |
40 | /*Threshold to exit fixed refresh rate*/ | ||
41 | #define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4 | ||
40 | /* Number of consecutive frames to check before entering/exiting fixed refresh*/ | 42 | /* Number of consecutive frames to check before entering/exiting fixed refresh*/ |
41 | #define FIXED_REFRESH_ENTER_FRAME_COUNT 5 | 43 | #define FIXED_REFRESH_ENTER_FRAME_COUNT 5 |
42 | #define FIXED_REFRESH_EXIT_FRAME_COUNT 5 | 44 | #define FIXED_REFRESH_EXIT_FRAME_COUNT 5 |
@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync, | |||
257 | if (in_out_vrr->btr.btr_active) { | 259 | if (in_out_vrr->btr.btr_active) { |
258 | in_out_vrr->btr.frame_counter = 0; | 260 | in_out_vrr->btr.frame_counter = 0; |
259 | in_out_vrr->btr.btr_active = false; | 261 | in_out_vrr->btr.btr_active = false; |
260 | |||
261 | /* Exit Fixed Refresh mode */ | ||
262 | } else if (in_out_vrr->fixed.fixed_active) { | ||
263 | |||
264 | in_out_vrr->fixed.frame_counter++; | ||
265 | |||
266 | if (in_out_vrr->fixed.frame_counter > | ||
267 | FIXED_REFRESH_EXIT_FRAME_COUNT) { | ||
268 | in_out_vrr->fixed.frame_counter = 0; | ||
269 | in_out_vrr->fixed.fixed_active = false; | ||
270 | } | ||
271 | } | 262 | } |
272 | } else if (last_render_time_in_us > max_render_time_in_us) { | 263 | } else if (last_render_time_in_us > max_render_time_in_us) { |
273 | /* Enter Below the Range */ | 264 | /* Enter Below the Range */ |
274 | if (!in_out_vrr->btr.btr_active && | 265 | in_out_vrr->btr.btr_active = true; |
275 | in_out_vrr->btr.btr_enabled) { | ||
276 | in_out_vrr->btr.btr_active = true; | ||
277 | |||
278 | /* Enter Fixed Refresh mode */ | ||
279 | } else if (!in_out_vrr->fixed.fixed_active && | ||
280 | !in_out_vrr->btr.btr_enabled) { | ||
281 | in_out_vrr->fixed.frame_counter++; | ||
282 | |||
283 | if (in_out_vrr->fixed.frame_counter > | ||
284 | FIXED_REFRESH_ENTER_FRAME_COUNT) { | ||
285 | in_out_vrr->fixed.frame_counter = 0; | ||
286 | in_out_vrr->fixed.fixed_active = true; | ||
287 | } | ||
288 | } | ||
289 | } | 266 | } |
290 | 267 | ||
291 | /* BTR set to "not active" so disengage */ | 268 | /* BTR set to "not active" so disengage */ |
292 | if (!in_out_vrr->btr.btr_active) { | 269 | if (!in_out_vrr->btr.btr_active) { |
293 | in_out_vrr->btr.btr_active = false; | ||
294 | in_out_vrr->btr.inserted_duration_in_us = 0; | 270 | in_out_vrr->btr.inserted_duration_in_us = 0; |
295 | in_out_vrr->btr.frames_to_insert = 0; | 271 | in_out_vrr->btr.frames_to_insert = 0; |
296 | in_out_vrr->btr.frame_counter = 0; | 272 | in_out_vrr->btr.frame_counter = 0; |
@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync, | |||
375 | bool update = false; | 351 | bool update = false; |
376 | unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; | 352 | unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; |
377 | 353 | ||
378 | if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { | 354 | //Compute the exit refresh rate and exit frame duration |
355 | unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us) | ||
356 | + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ)); | ||
357 | unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz; | ||
358 | |||
359 | if (last_render_time_in_us < exit_frame_duration_in_us) { | ||
379 | /* Exit Fixed Refresh mode */ | 360 | /* Exit Fixed Refresh mode */ |
380 | if (in_out_vrr->fixed.fixed_active) { | 361 | if (in_out_vrr->fixed.fixed_active) { |
381 | in_out_vrr->fixed.frame_counter++; | 362 | in_out_vrr->fixed.frame_counter++; |