diff options
| author | Dave Airlie <airlied@redhat.com> | 2018-12-05 22:28:19 -0500 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2018-12-05 22:29:09 -0500 |
| commit | 513126ae00ba897cac1ab07f61edf062093d4dcb (patch) | |
| tree | a6c8bf7e52609a4d3d42954eb4492f01113eaafc | |
| parent | 467e8a516dcf922d1ea343cebb0e751f81f0dca3 (diff) | |
| parent | 2c486cc4c2774df684d8a43ca7a20670c67ccd76 (diff) | |
Merge branch 'drm-next-4.21' of git://people.freedesktop.org/~agd5f/linux into drm-next
amdgpu and amdkfd:
- Freesync support
- ABM support in DC
- KFD support for vega12 and polaris12
- Add sdma paging queue support for vega
- Use ACPI to query backlight range on supported platforms
- Clean up doorbell handling
- KFD fix for pasid handling under non-HWS
- Misc cleanups and fixes
scheduler:
- Revert "fix timeout handling v2"
radeon:
- Fix possible overflow on 32 bit
ttm:
- Fix for LRU handling for ghost objects
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181130192505.2946-1-alexander.deucher@amd.com
103 files changed, 2895 insertions, 1461 deletions
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 4b1501b4835b..8da2a178cf85 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst | |||
| @@ -575,6 +575,13 @@ Explicit Fencing Properties | |||
| 575 | .. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c | 575 | .. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c |
| 576 | :doc: explicit fencing properties | 576 | :doc: explicit fencing properties |
| 577 | 577 | ||
| 578 | |||
| 579 | Variable Refresh Properties | ||
| 580 | --------------------------- | ||
| 581 | |||
| 582 | .. kernel-doc:: drivers/gpu/drm/drm_connector.c | ||
| 583 | :doc: Variable refresh properties | ||
| 584 | |||
| 578 | Existing KMS Properties | 585 | Existing KMS Properties |
| 579 | ----------------------- | 586 | ----------------------- |
| 580 | 587 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 42f882c633ee..c8ad6bf6618a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -81,6 +81,7 @@ | |||
| 81 | #include "amdgpu_job.h" | 81 | #include "amdgpu_job.h" |
| 82 | #include "amdgpu_bo_list.h" | 82 | #include "amdgpu_bo_list.h" |
| 83 | #include "amdgpu_gem.h" | 83 | #include "amdgpu_gem.h" |
| 84 | #include "amdgpu_doorbell.h" | ||
| 84 | 85 | ||
| 85 | #define MAX_GPU_INSTANCE 16 | 86 | #define MAX_GPU_INSTANCE 16 |
| 86 | 87 | ||
| @@ -361,123 +362,6 @@ int amdgpu_fence_slab_init(void); | |||
| 361 | void amdgpu_fence_slab_fini(void); | 362 | void amdgpu_fence_slab_fini(void); |
| 362 | 363 | ||
| 363 | /* | 364 | /* |
| 364 | * GPU doorbell structures, functions & helpers | ||
| 365 | */ | ||
| 366 | typedef enum _AMDGPU_DOORBELL_ASSIGNMENT | ||
| 367 | { | ||
| 368 | AMDGPU_DOORBELL_KIQ = 0x000, | ||
| 369 | AMDGPU_DOORBELL_HIQ = 0x001, | ||
| 370 | AMDGPU_DOORBELL_DIQ = 0x002, | ||
| 371 | AMDGPU_DOORBELL_MEC_RING0 = 0x010, | ||
| 372 | AMDGPU_DOORBELL_MEC_RING1 = 0x011, | ||
| 373 | AMDGPU_DOORBELL_MEC_RING2 = 0x012, | ||
| 374 | AMDGPU_DOORBELL_MEC_RING3 = 0x013, | ||
| 375 | AMDGPU_DOORBELL_MEC_RING4 = 0x014, | ||
| 376 | AMDGPU_DOORBELL_MEC_RING5 = 0x015, | ||
| 377 | AMDGPU_DOORBELL_MEC_RING6 = 0x016, | ||
| 378 | AMDGPU_DOORBELL_MEC_RING7 = 0x017, | ||
| 379 | AMDGPU_DOORBELL_GFX_RING0 = 0x020, | ||
| 380 | AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, | ||
| 381 | AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, | ||
| 382 | AMDGPU_DOORBELL_IH = 0x1E8, | ||
| 383 | AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, | ||
| 384 | AMDGPU_DOORBELL_INVALID = 0xFFFF | ||
| 385 | } AMDGPU_DOORBELL_ASSIGNMENT; | ||
| 386 | |||
| 387 | struct amdgpu_doorbell { | ||
| 388 | /* doorbell mmio */ | ||
| 389 | resource_size_t base; | ||
| 390 | resource_size_t size; | ||
| 391 | u32 __iomem *ptr; | ||
| 392 | u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ | ||
| 393 | }; | ||
| 394 | |||
| 395 | /* | ||
| 396 | * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space | ||
| 397 | */ | ||
| 398 | typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT | ||
| 399 | { | ||
| 400 | /* | ||
| 401 | * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in | ||
| 402 | * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. | ||
| 403 | * Compute related doorbells are allocated from 0x00 to 0x8a | ||
| 404 | */ | ||
| 405 | |||
| 406 | |||
| 407 | /* kernel scheduling */ | ||
| 408 | AMDGPU_DOORBELL64_KIQ = 0x00, | ||
| 409 | |||
| 410 | /* HSA interface queue and debug queue */ | ||
| 411 | AMDGPU_DOORBELL64_HIQ = 0x01, | ||
| 412 | AMDGPU_DOORBELL64_DIQ = 0x02, | ||
| 413 | |||
| 414 | /* Compute engines */ | ||
| 415 | AMDGPU_DOORBELL64_MEC_RING0 = 0x03, | ||
| 416 | AMDGPU_DOORBELL64_MEC_RING1 = 0x04, | ||
| 417 | AMDGPU_DOORBELL64_MEC_RING2 = 0x05, | ||
| 418 | AMDGPU_DOORBELL64_MEC_RING3 = 0x06, | ||
| 419 | AMDGPU_DOORBELL64_MEC_RING4 = 0x07, | ||
| 420 | AMDGPU_DOORBELL64_MEC_RING5 = 0x08, | ||
| 421 | AMDGPU_DOORBELL64_MEC_RING6 = 0x09, | ||
| 422 | AMDGPU_DOORBELL64_MEC_RING7 = 0x0a, | ||
| 423 | |||
| 424 | /* User queue doorbell range (128 doorbells) */ | ||
| 425 | AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b, | ||
| 426 | AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a, | ||
| 427 | |||
| 428 | /* Graphics engine */ | ||
| 429 | AMDGPU_DOORBELL64_GFX_RING0 = 0x8b, | ||
| 430 | |||
| 431 | /* | ||
| 432 | * Other graphics doorbells can be allocated here: from 0x8c to 0xdf | ||
| 433 | * Graphics voltage island aperture 1 | ||
| 434 | * default non-graphics QWORD index is 0xe0 - 0xFF inclusive | ||
| 435 | */ | ||
| 436 | |||
| 437 | /* sDMA engines reserved from 0xe0 -0xef */ | ||
| 438 | AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0, | ||
| 439 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1, | ||
| 440 | AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8, | ||
| 441 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9, | ||
| 442 | |||
| 443 | /* For vega10 sriov, the sdma doorbell must be fixed as follow | ||
| 444 | * to keep the same setting with host driver, or it will | ||
| 445 | * happen conflicts | ||
| 446 | */ | ||
| 447 | AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0, | ||
| 448 | AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, | ||
| 449 | AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2, | ||
| 450 | AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, | ||
| 451 | |||
| 452 | /* Interrupt handler */ | ||
| 453 | AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */ | ||
| 454 | AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */ | ||
| 455 | AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */ | ||
| 456 | |||
| 457 | /* VCN engine use 32 bits doorbell */ | ||
| 458 | AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ | ||
| 459 | AMDGPU_DOORBELL64_VCN2_3 = 0xF9, | ||
| 460 | AMDGPU_DOORBELL64_VCN4_5 = 0xFA, | ||
| 461 | AMDGPU_DOORBELL64_VCN6_7 = 0xFB, | ||
| 462 | |||
| 463 | /* overlap the doorbell assignment with VCN as they are mutually exclusive | ||
| 464 | * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD | ||
| 465 | */ | ||
| 466 | AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, | ||
| 467 | AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, | ||
| 468 | AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, | ||
| 469 | AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, | ||
| 470 | |||
| 471 | AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, | ||
| 472 | AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, | ||
| 473 | AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, | ||
| 474 | AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, | ||
| 475 | |||
| 476 | AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, | ||
| 477 | AMDGPU_DOORBELL64_INVALID = 0xFFFF | ||
| 478 | } AMDGPU_DOORBELL64_ASSIGNMENT; | ||
| 479 | |||
| 480 | /* | ||
| 481 | * IRQS. | 365 | * IRQS. |
| 482 | */ | 366 | */ |
| 483 | 367 | ||
| @@ -654,6 +538,8 @@ struct amdgpu_asic_funcs { | |||
| 654 | struct amdgpu_ring *ring); | 538 | struct amdgpu_ring *ring); |
| 655 | /* check if the asic needs a full reset of if soft reset will work */ | 539 | /* check if the asic needs a full reset of if soft reset will work */ |
| 656 | bool (*need_full_reset)(struct amdgpu_device *adev); | 540 | bool (*need_full_reset)(struct amdgpu_device *adev); |
| 541 | /* initialize doorbell layout for specific asic*/ | ||
| 542 | void (*init_doorbell_index)(struct amdgpu_device *adev); | ||
| 657 | }; | 543 | }; |
| 658 | 544 | ||
| 659 | /* | 545 | /* |
| @@ -1023,6 +909,8 @@ struct amdgpu_device { | |||
| 1023 | unsigned long last_mm_index; | 909 | unsigned long last_mm_index; |
| 1024 | bool in_gpu_reset; | 910 | bool in_gpu_reset; |
| 1025 | struct mutex lock_reset; | 911 | struct mutex lock_reset; |
| 912 | struct amdgpu_doorbell_index doorbell_index; | ||
| 913 | int asic_reset_res; | ||
| 1026 | }; | 914 | }; |
| 1027 | 915 | ||
| 1028 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) | 916 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) |
| @@ -1047,11 +935,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); | |||
| 1047 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); | 935 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); |
| 1048 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); | 936 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); |
| 1049 | 937 | ||
| 1050 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); | ||
| 1051 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); | ||
| 1052 | u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); | ||
| 1053 | void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); | ||
| 1054 | |||
| 1055 | bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); | 938 | bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); |
| 1056 | bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); | 939 | bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); |
| 1057 | 940 | ||
| @@ -1113,11 +996,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); | |||
| 1113 | #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) | 996 | #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) |
| 1114 | #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) | 997 | #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) |
| 1115 | 998 | ||
| 1116 | #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) | ||
| 1117 | #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) | ||
| 1118 | #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) | ||
| 1119 | #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v)) | ||
| 1120 | |||
| 1121 | #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT | 999 | #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT |
| 1122 | #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK | 1000 | #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK |
| 1123 | 1001 | ||
| @@ -1159,6 +1037,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); | |||
| 1159 | #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) | 1037 | #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) |
| 1160 | #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) | 1038 | #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) |
| 1161 | #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) | 1039 | #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) |
| 1040 | #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) | ||
| 1162 | 1041 | ||
| 1163 | /* Common functions */ | 1042 | /* Common functions */ |
| 1164 | bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); | 1043 | bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); |
| @@ -1219,12 +1098,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); | |||
| 1219 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, | 1098 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, |
| 1220 | unsigned long arg); | 1099 | unsigned long arg); |
| 1221 | 1100 | ||
| 1222 | |||
| 1223 | /* | ||
| 1224 | * functions used by amdgpu_xgmi.c | ||
| 1225 | */ | ||
| 1226 | int amdgpu_xgmi_add_device(struct amdgpu_device *adev); | ||
| 1227 | |||
| 1228 | /* | 1101 | /* |
| 1229 | * functions used by amdgpu_encoder.c | 1102 | * functions used by amdgpu_encoder.c |
| 1230 | */ | 1103 | */ |
| @@ -1252,6 +1125,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade | |||
| 1252 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, | 1125 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, |
| 1253 | u8 perf_req, bool advertise); | 1126 | u8 perf_req, bool advertise); |
| 1254 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); | 1127 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); |
| 1128 | |||
| 1129 | void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, | ||
| 1130 | struct amdgpu_dm_backlight_caps *caps); | ||
| 1255 | #else | 1131 | #else |
| 1256 | static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } | 1132 | static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } |
| 1257 | static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } | 1133 | static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 7f0afc526419..47db65926d71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | |||
| @@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg { | |||
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | struct amdgpu_atif_notifications { | 43 | struct amdgpu_atif_notifications { |
| 44 | bool display_switch; | ||
| 45 | bool expansion_mode_change; | ||
| 46 | bool thermal_state; | 44 | bool thermal_state; |
| 47 | bool forced_power_state; | 45 | bool forced_power_state; |
| 48 | bool system_power_state; | 46 | bool system_power_state; |
| 49 | bool display_conf_change; | ||
| 50 | bool px_gfx_switch; | ||
| 51 | bool brightness_change; | 47 | bool brightness_change; |
| 52 | bool dgpu_display_event; | 48 | bool dgpu_display_event; |
| 49 | bool gpu_package_power_limit; | ||
| 53 | }; | 50 | }; |
| 54 | 51 | ||
| 55 | struct amdgpu_atif_functions { | 52 | struct amdgpu_atif_functions { |
| 56 | bool system_params; | 53 | bool system_params; |
| 57 | bool sbios_requests; | 54 | bool sbios_requests; |
| 58 | bool select_active_disp; | ||
| 59 | bool lid_state; | ||
| 60 | bool get_tv_standard; | ||
| 61 | bool set_tv_standard; | ||
| 62 | bool get_panel_expansion_mode; | ||
| 63 | bool set_panel_expansion_mode; | ||
| 64 | bool temperature_change; | 55 | bool temperature_change; |
| 65 | bool graphics_device_types; | 56 | bool query_backlight_transfer_characteristics; |
| 57 | bool ready_to_undock; | ||
| 58 | bool external_gpu_information; | ||
| 66 | }; | 59 | }; |
| 67 | 60 | ||
| 68 | struct amdgpu_atif { | 61 | struct amdgpu_atif { |
| @@ -72,6 +65,7 @@ struct amdgpu_atif { | |||
| 72 | struct amdgpu_atif_functions functions; | 65 | struct amdgpu_atif_functions functions; |
| 73 | struct amdgpu_atif_notification_cfg notification_cfg; | 66 | struct amdgpu_atif_notification_cfg notification_cfg; |
| 74 | struct amdgpu_encoder *encoder_for_bl; | 67 | struct amdgpu_encoder *encoder_for_bl; |
| 68 | struct amdgpu_dm_backlight_caps backlight_caps; | ||
| 75 | }; | 69 | }; |
| 76 | 70 | ||
| 77 | /* Call the ATIF method | 71 | /* Call the ATIF method |
| @@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, | |||
| 137 | */ | 131 | */ |
| 138 | static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask) | 132 | static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask) |
| 139 | { | 133 | { |
| 140 | n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; | ||
| 141 | n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; | ||
| 142 | n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; | 134 | n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; |
| 143 | n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; | 135 | n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; |
| 144 | n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; | 136 | n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; |
| 145 | n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; | ||
| 146 | n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; | ||
| 147 | n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; | 137 | n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; |
| 148 | n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; | 138 | n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; |
| 139 | n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED; | ||
| 149 | } | 140 | } |
| 150 | 141 | ||
| 151 | /** | 142 | /** |
| @@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas | |||
| 162 | { | 153 | { |
| 163 | f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; | 154 | f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; |
| 164 | f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; | 155 | f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; |
| 165 | f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED; | ||
| 166 | f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED; | ||
| 167 | f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED; | ||
| 168 | f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED; | ||
| 169 | f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED; | ||
| 170 | f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED; | ||
| 171 | f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; | 156 | f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; |
| 172 | f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED; | 157 | f->query_backlight_transfer_characteristics = |
| 158 | mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED; | ||
| 159 | f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED; | ||
| 160 | f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED; | ||
| 173 | } | 161 | } |
| 174 | 162 | ||
| 175 | /** | 163 | /** |
| @@ -311,6 +299,65 @@ out: | |||
| 311 | } | 299 | } |
| 312 | 300 | ||
| 313 | /** | 301 | /** |
| 302 | * amdgpu_atif_query_backlight_caps - get min and max backlight input signal | ||
| 303 | * | ||
| 304 | * @handle: acpi handle | ||
| 305 | * | ||
| 306 | * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function | ||
| 307 | * to determine the acceptable range of backlight values | ||
| 308 | * | ||
| 309 | * Backlight_caps.caps_valid will be set to true if the query is successful | ||
| 310 | * | ||
| 311 | * The input signals are in range 0-255 | ||
| 312 | * | ||
| 313 | * This function assumes the display with backlight is the first LCD | ||
| 314 | * | ||
| 315 | * Returns 0 on success, error on failure. | ||
| 316 | */ | ||
| 317 | static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif) | ||
| 318 | { | ||
| 319 | union acpi_object *info; | ||
| 320 | struct atif_qbtc_output characteristics; | ||
| 321 | struct atif_qbtc_arguments arguments; | ||
| 322 | struct acpi_buffer params; | ||
| 323 | size_t size; | ||
| 324 | int err = 0; | ||
| 325 | |||
| 326 | arguments.size = sizeof(arguments); | ||
| 327 | arguments.requested_display = ATIF_QBTC_REQUEST_LCD1; | ||
| 328 | |||
| 329 | params.length = sizeof(arguments); | ||
| 330 | params.pointer = (void *)&arguments; | ||
| 331 | |||
| 332 | info = amdgpu_atif_call(atif, | ||
| 333 | ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS, | ||
| 334 | ¶ms); | ||
| 335 | if (!info) { | ||
| 336 | err = -EIO; | ||
| 337 | goto out; | ||
| 338 | } | ||
| 339 | |||
| 340 | size = *(u16 *) info->buffer.pointer; | ||
| 341 | if (size < 10) { | ||
| 342 | err = -EINVAL; | ||
| 343 | goto out; | ||
| 344 | } | ||
| 345 | |||
| 346 | memset(&characteristics, 0, sizeof(characteristics)); | ||
| 347 | size = min(sizeof(characteristics), size); | ||
| 348 | memcpy(&characteristics, info->buffer.pointer, size); | ||
| 349 | |||
| 350 | atif->backlight_caps.caps_valid = true; | ||
| 351 | atif->backlight_caps.min_input_signal = | ||
| 352 | characteristics.min_input_signal; | ||
| 353 | atif->backlight_caps.max_input_signal = | ||
| 354 | characteristics.max_input_signal; | ||
| 355 | out: | ||
| 356 | kfree(info); | ||
| 357 | return err; | ||
| 358 | } | ||
| 359 | |||
| 360 | /** | ||
| 314 | * amdgpu_atif_get_sbios_requests - get requested sbios event | 361 | * amdgpu_atif_get_sbios_requests - get requested sbios event |
| 315 | * | 362 | * |
| 316 | * @handle: acpi handle | 363 | * @handle: acpi handle |
| @@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) | |||
| 799 | } | 846 | } |
| 800 | } | 847 | } |
| 801 | 848 | ||
| 849 | if (atif->functions.query_backlight_transfer_characteristics) { | ||
| 850 | ret = amdgpu_atif_query_backlight_caps(atif); | ||
| 851 | if (ret) { | ||
| 852 | DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n", | ||
| 853 | ret); | ||
| 854 | atif->backlight_caps.caps_valid = false; | ||
| 855 | } | ||
| 856 | } else { | ||
| 857 | atif->backlight_caps.caps_valid = false; | ||
| 858 | } | ||
| 859 | |||
| 802 | out: | 860 | out: |
| 803 | adev->acpi_nb.notifier_call = amdgpu_acpi_event; | 861 | adev->acpi_nb.notifier_call = amdgpu_acpi_event; |
| 804 | register_acpi_notifier(&adev->acpi_nb); | 862 | register_acpi_notifier(&adev->acpi_nb); |
| @@ -806,6 +864,18 @@ out: | |||
| 806 | return ret; | 864 | return ret; |
| 807 | } | 865 | } |
| 808 | 866 | ||
| 867 | void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, | ||
| 868 | struct amdgpu_dm_backlight_caps *caps) | ||
| 869 | { | ||
| 870 | if (!adev->atif) { | ||
| 871 | caps->caps_valid = false; | ||
| 872 | return; | ||
| 873 | } | ||
| 874 | caps->caps_valid = adev->atif->backlight_caps.caps_valid; | ||
| 875 | caps->min_input_signal = adev->atif->backlight_caps.min_input_signal; | ||
| 876 | caps->max_input_signal = adev->atif->backlight_caps.max_input_signal; | ||
| 877 | } | ||
| 878 | |||
| 809 | /** | 879 | /** |
| 810 | * amdgpu_acpi_fini - tear down driver acpi support | 880 | * amdgpu_acpi_fini - tear down driver acpi support |
| 811 | * | 881 | * |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bcf1666fb31d..d693b8047653 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
| @@ -73,9 +73,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) | |||
| 73 | case CHIP_FIJI: | 73 | case CHIP_FIJI: |
| 74 | case CHIP_POLARIS10: | 74 | case CHIP_POLARIS10: |
| 75 | case CHIP_POLARIS11: | 75 | case CHIP_POLARIS11: |
| 76 | case CHIP_POLARIS12: | ||
| 76 | kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); | 77 | kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); |
| 77 | break; | 78 | break; |
| 78 | case CHIP_VEGA10: | 79 | case CHIP_VEGA10: |
| 80 | case CHIP_VEGA12: | ||
| 79 | case CHIP_VEGA20: | 81 | case CHIP_VEGA20: |
| 80 | case CHIP_RAVEN: | 82 | case CHIP_RAVEN: |
| 81 | kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); | 83 | kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); |
| @@ -179,25 +181,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) | |||
| 179 | * process in case of 64-bit doorbells so we | 181 | * process in case of 64-bit doorbells so we |
| 180 | * can use each doorbell assignment twice. | 182 | * can use each doorbell assignment twice. |
| 181 | */ | 183 | */ |
| 182 | if (adev->asic_type == CHIP_VEGA10) { | 184 | gpu_resources.sdma_doorbell[0][i] = |
| 183 | gpu_resources.sdma_doorbell[0][i] = | 185 | adev->doorbell_index.sdma_engine0 + (i >> 1); |
| 184 | AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1); | 186 | gpu_resources.sdma_doorbell[0][i+1] = |
| 185 | gpu_resources.sdma_doorbell[0][i+1] = | 187 | adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1); |
| 186 | AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1); | 188 | gpu_resources.sdma_doorbell[1][i] = |
| 187 | gpu_resources.sdma_doorbell[1][i] = | 189 | adev->doorbell_index.sdma_engine1 + (i >> 1); |
| 188 | AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1); | 190 | gpu_resources.sdma_doorbell[1][i+1] = |
| 189 | gpu_resources.sdma_doorbell[1][i+1] = | 191 | adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1); |
| 190 | AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1); | ||
| 191 | } else { | ||
| 192 | gpu_resources.sdma_doorbell[0][i] = | ||
| 193 | AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1); | ||
| 194 | gpu_resources.sdma_doorbell[0][i+1] = | ||
| 195 | AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1); | ||
| 196 | gpu_resources.sdma_doorbell[1][i] = | ||
| 197 | AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1); | ||
| 198 | gpu_resources.sdma_doorbell[1][i+1] = | ||
| 199 | AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1); | ||
| 200 | } | ||
| 201 | } | 192 | } |
| 202 | /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for | 193 | /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for |
| 203 | * SDMA, IH and VCN. So don't use them for the CP. | 194 | * SDMA, IH and VCN. So don't use them for the CP. |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index df0a059565f9..f3129b912714 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | |||
| @@ -46,9 +46,9 @@ | |||
| 46 | /* Impose limit on how much memory KFD can use */ | 46 | /* Impose limit on how much memory KFD can use */ |
| 47 | static struct { | 47 | static struct { |
| 48 | uint64_t max_system_mem_limit; | 48 | uint64_t max_system_mem_limit; |
| 49 | uint64_t max_userptr_mem_limit; | 49 | uint64_t max_ttm_mem_limit; |
| 50 | int64_t system_mem_used; | 50 | int64_t system_mem_used; |
| 51 | int64_t userptr_mem_used; | 51 | int64_t ttm_mem_used; |
| 52 | spinlock_t mem_limit_lock; | 52 | spinlock_t mem_limit_lock; |
| 53 | } kfd_mem_limit; | 53 | } kfd_mem_limit; |
| 54 | 54 | ||
| @@ -90,8 +90,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, | |||
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | /* Set memory usage limits. Current, limits are | 92 | /* Set memory usage limits. Current, limits are |
| 93 | * System (kernel) memory - 3/8th System RAM | 93 | * System (TTM + userptr) memory - 3/4th System RAM |
| 94 | * Userptr memory - 3/4th System RAM | 94 | * TTM memory - 3/8th System RAM |
| 95 | */ | 95 | */ |
| 96 | void amdgpu_amdkfd_gpuvm_init_mem_limits(void) | 96 | void amdgpu_amdkfd_gpuvm_init_mem_limits(void) |
| 97 | { | 97 | { |
| @@ -103,48 +103,54 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) | |||
| 103 | mem *= si.mem_unit; | 103 | mem *= si.mem_unit; |
| 104 | 104 | ||
| 105 | spin_lock_init(&kfd_mem_limit.mem_limit_lock); | 105 | spin_lock_init(&kfd_mem_limit.mem_limit_lock); |
| 106 | kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3); | 106 | kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); |
| 107 | kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2); | 107 | kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); |
| 108 | pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n", | 108 | pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", |
| 109 | (kfd_mem_limit.max_system_mem_limit >> 20), | 109 | (kfd_mem_limit.max_system_mem_limit >> 20), |
| 110 | (kfd_mem_limit.max_userptr_mem_limit >> 20)); | 110 | (kfd_mem_limit.max_ttm_mem_limit >> 20)); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev, | 113 | static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev, |
| 114 | uint64_t size, u32 domain) | 114 | uint64_t size, u32 domain, bool sg) |
| 115 | { | 115 | { |
| 116 | size_t acc_size; | 116 | size_t acc_size, system_mem_needed, ttm_mem_needed; |
| 117 | int ret = 0; | 117 | int ret = 0; |
| 118 | 118 | ||
| 119 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | 119 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, |
| 120 | sizeof(struct amdgpu_bo)); | 120 | sizeof(struct amdgpu_bo)); |
| 121 | 121 | ||
| 122 | spin_lock(&kfd_mem_limit.mem_limit_lock); | 122 | spin_lock(&kfd_mem_limit.mem_limit_lock); |
| 123 | |||
| 123 | if (domain == AMDGPU_GEM_DOMAIN_GTT) { | 124 | if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
| 124 | if (kfd_mem_limit.system_mem_used + (acc_size + size) > | 125 | /* TTM GTT memory */ |
| 125 | kfd_mem_limit.max_system_mem_limit) { | 126 | system_mem_needed = acc_size + size; |
| 126 | ret = -ENOMEM; | 127 | ttm_mem_needed = acc_size + size; |
| 127 | goto err_no_mem; | 128 | } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { |
| 128 | } | 129 | /* Userptr */ |
| 129 | kfd_mem_limit.system_mem_used += (acc_size + size); | 130 | system_mem_needed = acc_size + size; |
| 130 | } else if (domain == AMDGPU_GEM_DOMAIN_CPU) { | 131 | ttm_mem_needed = acc_size; |
| 131 | if ((kfd_mem_limit.system_mem_used + acc_size > | 132 | } else { |
| 132 | kfd_mem_limit.max_system_mem_limit) || | 133 | /* VRAM and SG */ |
| 133 | (kfd_mem_limit.userptr_mem_used + (size + acc_size) > | 134 | system_mem_needed = acc_size; |
| 134 | kfd_mem_limit.max_userptr_mem_limit)) { | 135 | ttm_mem_needed = acc_size; |
| 135 | ret = -ENOMEM; | ||
| 136 | goto err_no_mem; | ||
| 137 | } | ||
| 138 | kfd_mem_limit.system_mem_used += acc_size; | ||
| 139 | kfd_mem_limit.userptr_mem_used += size; | ||
| 140 | } | 136 | } |
| 141 | err_no_mem: | 137 | |
| 138 | if ((kfd_mem_limit.system_mem_used + system_mem_needed > | ||
| 139 | kfd_mem_limit.max_system_mem_limit) || | ||
| 140 | (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > | ||
| 141 | kfd_mem_limit.max_ttm_mem_limit)) | ||
| 142 | ret = -ENOMEM; | ||
| 143 | else { | ||
| 144 | kfd_mem_limit.system_mem_used += system_mem_needed; | ||
| 145 | kfd_mem_limit.ttm_mem_used += ttm_mem_needed; | ||
| 146 | } | ||
| 147 | |||
| 142 | spin_unlock(&kfd_mem_limit.mem_limit_lock); | 148 | spin_unlock(&kfd_mem_limit.mem_limit_lock); |
| 143 | return ret; | 149 | return ret; |
| 144 | } | 150 | } |
| 145 | 151 | ||
| 146 | static void unreserve_system_mem_limit(struct amdgpu_device *adev, | 152 | static void unreserve_system_mem_limit(struct amdgpu_device *adev, |
| 147 | uint64_t size, u32 domain) | 153 | uint64_t size, u32 domain, bool sg) |
| 148 | { | 154 | { |
| 149 | size_t acc_size; | 155 | size_t acc_size; |
| 150 | 156 | ||
| @@ -154,14 +160,18 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev, | |||
| 154 | spin_lock(&kfd_mem_limit.mem_limit_lock); | 160 | spin_lock(&kfd_mem_limit.mem_limit_lock); |
| 155 | if (domain == AMDGPU_GEM_DOMAIN_GTT) { | 161 | if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
| 156 | kfd_mem_limit.system_mem_used -= (acc_size + size); | 162 | kfd_mem_limit.system_mem_used -= (acc_size + size); |
| 157 | } else if (domain == AMDGPU_GEM_DOMAIN_CPU) { | 163 | kfd_mem_limit.ttm_mem_used -= (acc_size + size); |
| 164 | } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { | ||
| 165 | kfd_mem_limit.system_mem_used -= (acc_size + size); | ||
| 166 | kfd_mem_limit.ttm_mem_used -= acc_size; | ||
| 167 | } else { | ||
| 158 | kfd_mem_limit.system_mem_used -= acc_size; | 168 | kfd_mem_limit.system_mem_used -= acc_size; |
| 159 | kfd_mem_limit.userptr_mem_used -= size; | 169 | kfd_mem_limit.ttm_mem_used -= acc_size; |
| 160 | } | 170 | } |
| 161 | WARN_ONCE(kfd_mem_limit.system_mem_used < 0, | 171 | WARN_ONCE(kfd_mem_limit.system_mem_used < 0, |
| 162 | "kfd system memory accounting unbalanced"); | 172 | "kfd system memory accounting unbalanced"); |
| 163 | WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0, | 173 | WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, |
| 164 | "kfd userptr memory accounting unbalanced"); | 174 | "kfd TTM memory accounting unbalanced"); |
| 165 | 175 | ||
| 166 | spin_unlock(&kfd_mem_limit.mem_limit_lock); | 176 | spin_unlock(&kfd_mem_limit.mem_limit_lock); |
| 167 | } | 177 | } |
| @@ -171,16 +181,22 @@ void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo) | |||
| 171 | spin_lock(&kfd_mem_limit.mem_limit_lock); | 181 | spin_lock(&kfd_mem_limit.mem_limit_lock); |
| 172 | 182 | ||
| 173 | if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { | 183 | if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { |
| 174 | kfd_mem_limit.system_mem_used -= bo->tbo.acc_size; | 184 | kfd_mem_limit.system_mem_used -= |
| 175 | kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo); | 185 | (bo->tbo.acc_size + amdgpu_bo_size(bo)); |
| 186 | kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size; | ||
| 176 | } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) { | 187 | } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) { |
| 177 | kfd_mem_limit.system_mem_used -= | 188 | kfd_mem_limit.system_mem_used -= |
| 178 | (bo->tbo.acc_size + amdgpu_bo_size(bo)); | 189 | (bo->tbo.acc_size + amdgpu_bo_size(bo)); |
| 190 | kfd_mem_limit.ttm_mem_used -= | ||
| 191 | (bo->tbo.acc_size + amdgpu_bo_size(bo)); | ||
| 192 | } else { | ||
| 193 | kfd_mem_limit.system_mem_used -= bo->tbo.acc_size; | ||
| 194 | kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size; | ||
| 179 | } | 195 | } |
| 180 | WARN_ONCE(kfd_mem_limit.system_mem_used < 0, | 196 | WARN_ONCE(kfd_mem_limit.system_mem_used < 0, |
| 181 | "kfd system memory accounting unbalanced"); | 197 | "kfd system memory accounting unbalanced"); |
| 182 | WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0, | 198 | WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, |
| 183 | "kfd userptr memory accounting unbalanced"); | 199 | "kfd TTM memory accounting unbalanced"); |
| 184 | 200 | ||
| 185 | spin_unlock(&kfd_mem_limit.mem_limit_lock); | 201 | spin_unlock(&kfd_mem_limit.mem_limit_lock); |
| 186 | } | 202 | } |
| @@ -395,23 +411,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) | |||
| 395 | return 0; | 411 | return 0; |
| 396 | } | 412 | } |
| 397 | 413 | ||
| 398 | static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | ||
| 399 | struct dma_fence *f) | ||
| 400 | { | ||
| 401 | int ret = amdgpu_sync_fence(adev, sync, f, false); | ||
| 402 | |||
| 403 | /* Sync objects can't handle multiple GPUs (contexts) updating | ||
| 404 | * sync->last_vm_update. Fortunately we don't need it for | ||
| 405 | * KFD's purposes, so we can just drop that fence. | ||
| 406 | */ | ||
| 407 | if (sync->last_vm_update) { | ||
| 408 | dma_fence_put(sync->last_vm_update); | ||
| 409 | sync->last_vm_update = NULL; | ||
| 410 | } | ||
| 411 | |||
| 412 | return ret; | ||
| 413 | } | ||
| 414 | |||
| 415 | static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) | 414 | static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) |
| 416 | { | 415 | { |
| 417 | struct amdgpu_bo *pd = vm->root.base.bo; | 416 | struct amdgpu_bo *pd = vm->root.base.bo; |
| @@ -422,7 +421,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) | |||
| 422 | if (ret) | 421 | if (ret) |
| 423 | return ret; | 422 | return ret; |
| 424 | 423 | ||
| 425 | return sync_vm_fence(adev, sync, vm->last_update); | 424 | return amdgpu_sync_fence(NULL, sync, vm->last_update, false); |
| 426 | } | 425 | } |
| 427 | 426 | ||
| 428 | /* add_bo_to_vm - Add a BO to a VM | 427 | /* add_bo_to_vm - Add a BO to a VM |
| @@ -826,7 +825,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, | |||
| 826 | /* Add the eviction fence back */ | 825 | /* Add the eviction fence back */ |
| 827 | amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true); | 826 | amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true); |
| 828 | 827 | ||
| 829 | sync_vm_fence(adev, sync, bo_va->last_pt_update); | 828 | amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); |
| 830 | 829 | ||
| 831 | return 0; | 830 | return 0; |
| 832 | } | 831 | } |
| @@ -851,7 +850,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, | |||
| 851 | return ret; | 850 | return ret; |
| 852 | } | 851 | } |
| 853 | 852 | ||
| 854 | return sync_vm_fence(adev, sync, bo_va->last_pt_update); | 853 | return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); |
| 855 | } | 854 | } |
| 856 | 855 | ||
| 857 | static int map_bo_to_gpuvm(struct amdgpu_device *adev, | 856 | static int map_bo_to_gpuvm(struct amdgpu_device *adev, |
| @@ -901,6 +900,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info) | |||
| 901 | return 0; | 900 | return 0; |
| 902 | } | 901 | } |
| 903 | 902 | ||
| 903 | static int process_sync_pds_resv(struct amdkfd_process_info *process_info, | ||
| 904 | struct amdgpu_sync *sync) | ||
| 905 | { | ||
| 906 | struct amdgpu_vm *peer_vm; | ||
| 907 | int ret; | ||
| 908 | |||
| 909 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | ||
| 910 | vm_list_node) { | ||
| 911 | struct amdgpu_bo *pd = peer_vm->root.base.bo; | ||
| 912 | |||
| 913 | ret = amdgpu_sync_resv(NULL, | ||
| 914 | sync, pd->tbo.resv, | ||
| 915 | AMDGPU_FENCE_OWNER_UNDEFINED, false); | ||
| 916 | if (ret) | ||
| 917 | return ret; | ||
| 918 | } | ||
| 919 | |||
| 920 | return 0; | ||
| 921 | } | ||
| 922 | |||
| 904 | static int process_update_pds(struct amdkfd_process_info *process_info, | 923 | static int process_update_pds(struct amdkfd_process_info *process_info, |
| 905 | struct amdgpu_sync *sync) | 924 | struct amdgpu_sync *sync) |
| 906 | { | 925 | { |
| @@ -1199,7 +1218,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( | |||
| 1199 | byte_align = (adev->family == AMDGPU_FAMILY_VI && | 1218 | byte_align = (adev->family == AMDGPU_FAMILY_VI && |
| 1200 | adev->asic_type != CHIP_FIJI && | 1219 | adev->asic_type != CHIP_FIJI && |
| 1201 | adev->asic_type != CHIP_POLARIS10 && | 1220 | adev->asic_type != CHIP_POLARIS10 && |
| 1202 | adev->asic_type != CHIP_POLARIS11) ? | 1221 | adev->asic_type != CHIP_POLARIS11 && |
| 1222 | adev->asic_type != CHIP_POLARIS12) ? | ||
| 1203 | VI_BO_SIZE_ALIGN : 1; | 1223 | VI_BO_SIZE_ALIGN : 1; |
| 1204 | 1224 | ||
| 1205 | mapping_flags = AMDGPU_VM_PAGE_READABLE; | 1225 | mapping_flags = AMDGPU_VM_PAGE_READABLE; |
| @@ -1215,10 +1235,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( | |||
| 1215 | 1235 | ||
| 1216 | amdgpu_sync_create(&(*mem)->sync); | 1236 | amdgpu_sync_create(&(*mem)->sync); |
| 1217 | 1237 | ||
| 1218 | ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain); | 1238 | ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, |
| 1239 | alloc_domain, false); | ||
| 1219 | if (ret) { | 1240 | if (ret) { |
| 1220 | pr_debug("Insufficient system memory\n"); | 1241 | pr_debug("Insufficient system memory\n"); |
| 1221 | goto err_reserve_system_mem; | 1242 | goto err_reserve_limit; |
| 1222 | } | 1243 | } |
| 1223 | 1244 | ||
| 1224 | pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", | 1245 | pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", |
| @@ -1266,10 +1287,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( | |||
| 1266 | allocate_init_user_pages_failed: | 1287 | allocate_init_user_pages_failed: |
| 1267 | amdgpu_bo_unref(&bo); | 1288 | amdgpu_bo_unref(&bo); |
| 1268 | /* Don't unreserve system mem limit twice */ | 1289 | /* Don't unreserve system mem limit twice */ |
| 1269 | goto err_reserve_system_mem; | 1290 | goto err_reserve_limit; |
| 1270 | err_bo_create: | 1291 | err_bo_create: |
| 1271 | unreserve_system_mem_limit(adev, size, alloc_domain); | 1292 | unreserve_system_mem_limit(adev, size, alloc_domain, false); |
| 1272 | err_reserve_system_mem: | 1293 | err_reserve_limit: |
| 1273 | mutex_destroy(&(*mem)->lock); | 1294 | mutex_destroy(&(*mem)->lock); |
| 1274 | kfree(*mem); | 1295 | kfree(*mem); |
| 1275 | return ret; | 1296 | return ret; |
| @@ -1405,7 +1426,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( | |||
| 1405 | * the queues are still stopped and we can leave mapping for | 1426 | * the queues are still stopped and we can leave mapping for |
| 1406 | * the next restore worker | 1427 | * the next restore worker |
| 1407 | */ | 1428 | */ |
| 1408 | if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM) | 1429 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && |
| 1430 | bo->tbo.mem.mem_type == TTM_PL_SYSTEM) | ||
| 1409 | is_invalid_userptr = true; | 1431 | is_invalid_userptr = true; |
| 1410 | 1432 | ||
| 1411 | if (check_if_add_bo_to_vm(avm, mem)) { | 1433 | if (check_if_add_bo_to_vm(avm, mem)) { |
| @@ -2044,13 +2066,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) | |||
| 2044 | if (ret) | 2066 | if (ret) |
| 2045 | goto validate_map_fail; | 2067 | goto validate_map_fail; |
| 2046 | 2068 | ||
| 2047 | /* Wait for PD/PTs validate to finish */ | 2069 | ret = process_sync_pds_resv(process_info, &sync_obj); |
| 2048 | /* FIXME: I think this isn't needed */ | 2070 | if (ret) { |
| 2049 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | 2071 | pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); |
| 2050 | vm_list_node) { | 2072 | goto validate_map_fail; |
| 2051 | struct amdgpu_bo *bo = peer_vm->root.base.bo; | ||
| 2052 | |||
| 2053 | ttm_bo_wait(&bo->tbo, false, false); | ||
| 2054 | } | 2073 | } |
| 2055 | 2074 | ||
| 2056 | /* Validate BOs and map them to GPUVM (update VM page tables). */ | 2075 | /* Validate BOs and map them to GPUVM (update VM page tables). */ |
| @@ -2066,7 +2085,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) | |||
| 2066 | pr_debug("Memory eviction: Validate BOs failed. Try again\n"); | 2085 | pr_debug("Memory eviction: Validate BOs failed. Try again\n"); |
| 2067 | goto validate_map_fail; | 2086 | goto validate_map_fail; |
| 2068 | } | 2087 | } |
| 2069 | 2088 | ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false); | |
| 2089 | if (ret) { | ||
| 2090 | pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); | ||
| 2091 | goto validate_map_fail; | ||
| 2092 | } | ||
| 2070 | list_for_each_entry(bo_va_entry, &mem->bo_va_list, | 2093 | list_for_each_entry(bo_va_entry, &mem->bo_va_list, |
| 2071 | bo_list) { | 2094 | bo_list) { |
| 2072 | ret = update_gpuvm_pte((struct amdgpu_device *) | 2095 | ret = update_gpuvm_pte((struct amdgpu_device *) |
| @@ -2087,6 +2110,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) | |||
| 2087 | goto validate_map_fail; | 2110 | goto validate_map_fail; |
| 2088 | } | 2111 | } |
| 2089 | 2112 | ||
| 2113 | /* Wait for validate and PT updates to finish */ | ||
| 2090 | amdgpu_sync_wait(&sync_obj, false); | 2114 | amdgpu_sync_wait(&sync_obj, false); |
| 2091 | 2115 | ||
| 2092 | /* Release old eviction fence and create new one, because fence only | 2116 | /* Release old eviction fence and create new one, because fence only |
| @@ -2105,10 +2129,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) | |||
| 2105 | process_info->eviction_fence = new_fence; | 2129 | process_info->eviction_fence = new_fence; |
| 2106 | *ef = dma_fence_get(&new_fence->base); | 2130 | *ef = dma_fence_get(&new_fence->base); |
| 2107 | 2131 | ||
| 2108 | /* Wait for validate to finish and attach new eviction fence */ | 2132 | /* Attach new eviction fence to all BOs */ |
| 2109 | list_for_each_entry(mem, &process_info->kfd_bo_list, | ||
| 2110 | validate_list.head) | ||
| 2111 | ttm_bo_wait(&mem->bo->tbo, false, false); | ||
| 2112 | list_for_each_entry(mem, &process_info->kfd_bo_list, | 2133 | list_for_each_entry(mem, &process_info->kfd_bo_list, |
| 2113 | validate_list.head) | 2134 | validate_list.head) |
| 2114 | amdgpu_bo_fence(mem->bo, | 2135 | amdgpu_bo_fence(mem->bo, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 0c590ddf250a..5b550706ee76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | |||
| @@ -43,7 +43,7 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo | |||
| 43 | r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, | 43 | r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, |
| 44 | domain, bo, | 44 | domain, bo, |
| 45 | NULL, &ptr); | 45 | NULL, &ptr); |
| 46 | if (!bo) | 46 | if (!*bo) |
| 47 | return -ENOMEM; | 47 | return -ENOMEM; |
| 48 | 48 | ||
| 49 | memset(ptr, 0, size); | 49 | memset(ptr, 0, size); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f9b54236102d..95f4c4139fc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
| @@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { | |||
| 39 | [AMDGPU_HW_IP_UVD_ENC] = 1, | 39 | [AMDGPU_HW_IP_UVD_ENC] = 1, |
| 40 | [AMDGPU_HW_IP_VCN_DEC] = 1, | 40 | [AMDGPU_HW_IP_VCN_DEC] = 1, |
| 41 | [AMDGPU_HW_IP_VCN_ENC] = 1, | 41 | [AMDGPU_HW_IP_VCN_ENC] = 1, |
| 42 | [AMDGPU_HW_IP_VCN_JPEG] = 1, | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | static int amdgput_ctx_total_num_entities(void) | 45 | static int amdgput_ctx_total_num_entities(void) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 590588a82471..c75badfa5c4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -59,6 +59,8 @@ | |||
| 59 | #include "amdgpu_amdkfd.h" | 59 | #include "amdgpu_amdkfd.h" |
| 60 | #include "amdgpu_pm.h" | 60 | #include "amdgpu_pm.h" |
| 61 | 61 | ||
| 62 | #include "amdgpu_xgmi.h" | ||
| 63 | |||
| 62 | MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); | 64 | MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); |
| 63 | MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); | 65 | MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); |
| 64 | MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); | 66 | MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); |
| @@ -513,6 +515,8 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) | |||
| 513 | */ | 515 | */ |
| 514 | static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) | 516 | static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) |
| 515 | { | 517 | { |
| 518 | amdgpu_asic_init_doorbell_index(adev); | ||
| 519 | |||
| 516 | /* No doorbell on SI hardware generation */ | 520 | /* No doorbell on SI hardware generation */ |
| 517 | if (adev->asic_type < CHIP_BONAIRE) { | 521 | if (adev->asic_type < CHIP_BONAIRE) { |
| 518 | adev->doorbell.base = 0; | 522 | adev->doorbell.base = 0; |
| @@ -530,10 +534,19 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) | |||
| 530 | adev->doorbell.size = pci_resource_len(adev->pdev, 2); | 534 | adev->doorbell.size = pci_resource_len(adev->pdev, 2); |
| 531 | 535 | ||
| 532 | adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), | 536 | adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), |
| 533 | AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); | 537 | adev->doorbell_index.max_assignment+1); |
| 534 | if (adev->doorbell.num_doorbells == 0) | 538 | if (adev->doorbell.num_doorbells == 0) |
| 535 | return -EINVAL; | 539 | return -EINVAL; |
| 536 | 540 | ||
| 541 | /* For Vega, reserve and map two pages on doorbell BAR since SDMA | ||
| 542 | * paging queue doorbell use the second page. The | ||
| 543 | * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the | ||
| 544 | * doorbells are in the first page. So with paging queue enabled, | ||
| 545 | * the max num_doorbells should + 1 page (0x400 in dword) | ||
| 546 | */ | ||
| 547 | if (adev->asic_type >= CHIP_VEGA10) | ||
| 548 | adev->doorbell.num_doorbells += 0x400; | ||
| 549 | |||
| 537 | adev->doorbell.ptr = ioremap(adev->doorbell.base, | 550 | adev->doorbell.ptr = ioremap(adev->doorbell.base, |
| 538 | adev->doorbell.num_doorbells * | 551 | adev->doorbell.num_doorbells * |
| 539 | sizeof(u32)); | 552 | sizeof(u32)); |
| @@ -2458,9 +2471,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
| 2458 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); | 2471 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); |
| 2459 | DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); | 2472 | DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); |
| 2460 | 2473 | ||
| 2461 | /* doorbell bar mapping */ | ||
| 2462 | amdgpu_device_doorbell_init(adev); | ||
| 2463 | |||
| 2464 | /* io port mapping */ | 2474 | /* io port mapping */ |
| 2465 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 2475 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
| 2466 | if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { | 2476 | if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { |
| @@ -2479,6 +2489,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
| 2479 | if (r) | 2489 | if (r) |
| 2480 | return r; | 2490 | return r; |
| 2481 | 2491 | ||
| 2492 | /* doorbell bar mapping and doorbell index init*/ | ||
| 2493 | amdgpu_device_doorbell_init(adev); | ||
| 2494 | |||
| 2482 | /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ | 2495 | /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ |
| 2483 | /* this will fail for cards that aren't VGA class devices, just | 2496 | /* this will fail for cards that aren't VGA class devices, just |
| 2484 | * ignore it */ | 2497 | * ignore it */ |
| @@ -3151,86 +3164,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) | |||
| 3151 | return 0; | 3164 | return 0; |
| 3152 | } | 3165 | } |
| 3153 | 3166 | ||
| 3154 | /** | ||
| 3155 | * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough | ||
| 3156 | * | ||
| 3157 | * @adev: amdgpu device pointer | ||
| 3158 | * | ||
| 3159 | * attempt to do soft-reset or full-reset and reinitialize Asic | ||
| 3160 | * return 0 means succeeded otherwise failed | ||
| 3161 | */ | ||
| 3162 | static int amdgpu_device_reset(struct amdgpu_device *adev) | ||
| 3163 | { | ||
| 3164 | bool need_full_reset, vram_lost = 0; | ||
| 3165 | int r; | ||
| 3166 | |||
| 3167 | need_full_reset = amdgpu_device_ip_need_full_reset(adev); | ||
| 3168 | |||
| 3169 | if (!need_full_reset) { | ||
| 3170 | amdgpu_device_ip_pre_soft_reset(adev); | ||
| 3171 | r = amdgpu_device_ip_soft_reset(adev); | ||
| 3172 | amdgpu_device_ip_post_soft_reset(adev); | ||
| 3173 | if (r || amdgpu_device_ip_check_soft_reset(adev)) { | ||
| 3174 | DRM_INFO("soft reset failed, will fallback to full reset!\n"); | ||
| 3175 | need_full_reset = true; | ||
| 3176 | } | ||
| 3177 | } | ||
| 3178 | |||
| 3179 | if (need_full_reset) { | ||
| 3180 | r = amdgpu_device_ip_suspend(adev); | ||
| 3181 | |||
| 3182 | retry: | ||
| 3183 | r = amdgpu_asic_reset(adev); | ||
| 3184 | /* post card */ | ||
| 3185 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | ||
| 3186 | |||
| 3187 | if (!r) { | ||
| 3188 | dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); | ||
| 3189 | r = amdgpu_device_ip_resume_phase1(adev); | ||
| 3190 | if (r) | ||
| 3191 | goto out; | ||
| 3192 | |||
| 3193 | vram_lost = amdgpu_device_check_vram_lost(adev); | ||
| 3194 | if (vram_lost) { | ||
| 3195 | DRM_ERROR("VRAM is lost!\n"); | ||
| 3196 | atomic_inc(&adev->vram_lost_counter); | ||
| 3197 | } | ||
| 3198 | |||
| 3199 | r = amdgpu_gtt_mgr_recover( | ||
| 3200 | &adev->mman.bdev.man[TTM_PL_TT]); | ||
| 3201 | if (r) | ||
| 3202 | goto out; | ||
| 3203 | |||
| 3204 | r = amdgpu_device_fw_loading(adev); | ||
| 3205 | if (r) | ||
| 3206 | return r; | ||
| 3207 | |||
| 3208 | r = amdgpu_device_ip_resume_phase2(adev); | ||
| 3209 | if (r) | ||
| 3210 | goto out; | ||
| 3211 | |||
| 3212 | if (vram_lost) | ||
| 3213 | amdgpu_device_fill_reset_magic(adev); | ||
| 3214 | } | ||
| 3215 | } | ||
| 3216 | |||
| 3217 | out: | ||
| 3218 | if (!r) { | ||
| 3219 | amdgpu_irq_gpu_reset_resume_helper(adev); | ||
| 3220 | r = amdgpu_ib_ring_tests(adev); | ||
| 3221 | if (r) { | ||
| 3222 | dev_err(adev->dev, "ib ring test failed (%d).\n", r); | ||
| 3223 | r = amdgpu_device_ip_suspend(adev); | ||
| 3224 | need_full_reset = true; | ||
| 3225 | goto retry; | ||
| 3226 | } | ||
| 3227 | } | ||
| 3228 | |||
| 3229 | if (!r) | ||
| 3230 | r = amdgpu_device_recover_vram(adev); | ||
| 3231 | |||
| 3232 | return r; | ||
| 3233 | } | ||
| 3234 | 3167 | ||
| 3235 | /** | 3168 | /** |
| 3236 | * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf | 3169 | * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf |
| @@ -3329,31 +3262,13 @@ disabled: | |||
| 3329 | return false; | 3262 | return false; |
| 3330 | } | 3263 | } |
| 3331 | 3264 | ||
| 3332 | /** | ||
| 3333 | * amdgpu_device_gpu_recover - reset the asic and recover scheduler | ||
| 3334 | * | ||
| 3335 | * @adev: amdgpu device pointer | ||
| 3336 | * @job: which job trigger hang | ||
| 3337 | * | ||
| 3338 | * Attempt to reset the GPU if it has hung (all asics). | ||
| 3339 | * Returns 0 for success or an error on failure. | ||
| 3340 | */ | ||
| 3341 | int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | ||
| 3342 | struct amdgpu_job *job) | ||
| 3343 | { | ||
| 3344 | int i, r, resched; | ||
| 3345 | 3265 | ||
| 3346 | dev_info(adev->dev, "GPU reset begin!\n"); | 3266 | static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, |
| 3347 | 3267 | struct amdgpu_job *job, | |
| 3348 | mutex_lock(&adev->lock_reset); | 3268 | bool *need_full_reset_arg) |
| 3349 | atomic_inc(&adev->gpu_reset_counter); | 3269 | { |
| 3350 | adev->in_gpu_reset = 1; | 3270 | int i, r = 0; |
| 3351 | 3271 | bool need_full_reset = *need_full_reset_arg; | |
| 3352 | /* Block kfd */ | ||
| 3353 | amdgpu_amdkfd_pre_reset(adev); | ||
| 3354 | |||
| 3355 | /* block TTM */ | ||
| 3356 | resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); | ||
| 3357 | 3272 | ||
| 3358 | /* block all schedulers and reset given job's ring */ | 3273 | /* block all schedulers and reset given job's ring */ |
| 3359 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 3274 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| @@ -3373,10 +3288,123 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | |||
| 3373 | amdgpu_fence_driver_force_completion(ring); | 3288 | amdgpu_fence_driver_force_completion(ring); |
| 3374 | } | 3289 | } |
| 3375 | 3290 | ||
| 3376 | if (amdgpu_sriov_vf(adev)) | 3291 | |
| 3377 | r = amdgpu_device_reset_sriov(adev, job ? false : true); | 3292 | |
| 3378 | else | 3293 | if (!amdgpu_sriov_vf(adev)) { |
| 3379 | r = amdgpu_device_reset(adev); | 3294 | |
| 3295 | if (!need_full_reset) | ||
| 3296 | need_full_reset = amdgpu_device_ip_need_full_reset(adev); | ||
| 3297 | |||
| 3298 | if (!need_full_reset) { | ||
| 3299 | amdgpu_device_ip_pre_soft_reset(adev); | ||
| 3300 | r = amdgpu_device_ip_soft_reset(adev); | ||
| 3301 | amdgpu_device_ip_post_soft_reset(adev); | ||
| 3302 | if (r || amdgpu_device_ip_check_soft_reset(adev)) { | ||
| 3303 | DRM_INFO("soft reset failed, will fallback to full reset!\n"); | ||
| 3304 | need_full_reset = true; | ||
| 3305 | } | ||
| 3306 | } | ||
| 3307 | |||
| 3308 | if (need_full_reset) | ||
| 3309 | r = amdgpu_device_ip_suspend(adev); | ||
| 3310 | |||
| 3311 | *need_full_reset_arg = need_full_reset; | ||
| 3312 | } | ||
| 3313 | |||
| 3314 | return r; | ||
| 3315 | } | ||
| 3316 | |||
| 3317 | static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, | ||
| 3318 | struct list_head *device_list_handle, | ||
| 3319 | bool *need_full_reset_arg) | ||
| 3320 | { | ||
| 3321 | struct amdgpu_device *tmp_adev = NULL; | ||
| 3322 | bool need_full_reset = *need_full_reset_arg, vram_lost = false; | ||
| 3323 | int r = 0; | ||
| 3324 | |||
| 3325 | /* | ||
| 3326 | * ASIC reset has to be done on all HGMI hive nodes ASAP | ||
| 3327 | * to allow proper links negotiation in FW (within 1 sec) | ||
| 3328 | */ | ||
| 3329 | if (need_full_reset) { | ||
| 3330 | list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { | ||
| 3331 | r = amdgpu_asic_reset(tmp_adev); | ||
| 3332 | if (r) | ||
| 3333 | DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s", | ||
| 3334 | r, tmp_adev->ddev->unique); | ||
| 3335 | } | ||
| 3336 | } | ||
| 3337 | |||
| 3338 | |||
| 3339 | list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { | ||
| 3340 | if (need_full_reset) { | ||
| 3341 | /* post card */ | ||
| 3342 | if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) | ||
| 3343 | DRM_WARN("asic atom init failed!"); | ||
| 3344 | |||
| 3345 | if (!r) { | ||
| 3346 | dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); | ||
| 3347 | r = amdgpu_device_ip_resume_phase1(tmp_adev); | ||
| 3348 | if (r) | ||
| 3349 | goto out; | ||
| 3350 | |||
| 3351 | vram_lost = amdgpu_device_check_vram_lost(tmp_adev); | ||
| 3352 | if (vram_lost) { | ||
| 3353 | DRM_ERROR("VRAM is lost!\n"); | ||
| 3354 | atomic_inc(&tmp_adev->vram_lost_counter); | ||
| 3355 | } | ||
| 3356 | |||
| 3357 | r = amdgpu_gtt_mgr_recover( | ||
| 3358 | &tmp_adev->mman.bdev.man[TTM_PL_TT]); | ||
| 3359 | if (r) | ||
| 3360 | goto out; | ||
| 3361 | |||
| 3362 | r = amdgpu_device_fw_loading(tmp_adev); | ||
| 3363 | if (r) | ||
| 3364 | return r; | ||
| 3365 | |||
| 3366 | r = amdgpu_device_ip_resume_phase2(tmp_adev); | ||
| 3367 | if (r) | ||
| 3368 | goto out; | ||
| 3369 | |||
| 3370 | if (vram_lost) | ||
| 3371 | amdgpu_device_fill_reset_magic(tmp_adev); | ||
| 3372 | |||
| 3373 | /* Update PSP FW topology after reset */ | ||
| 3374 | if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) | ||
| 3375 | r = amdgpu_xgmi_update_topology(hive, tmp_adev); | ||
| 3376 | } | ||
| 3377 | } | ||
| 3378 | |||
| 3379 | |||
| 3380 | out: | ||
| 3381 | if (!r) { | ||
| 3382 | amdgpu_irq_gpu_reset_resume_helper(tmp_adev); | ||
| 3383 | r = amdgpu_ib_ring_tests(tmp_adev); | ||
| 3384 | if (r) { | ||
| 3385 | dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); | ||
| 3386 | r = amdgpu_device_ip_suspend(tmp_adev); | ||
| 3387 | need_full_reset = true; | ||
| 3388 | r = -EAGAIN; | ||
| 3389 | goto end; | ||
| 3390 | } | ||
| 3391 | } | ||
| 3392 | |||
| 3393 | if (!r) | ||
| 3394 | r = amdgpu_device_recover_vram(tmp_adev); | ||
| 3395 | else | ||
| 3396 | tmp_adev->asic_reset_res = r; | ||
| 3397 | } | ||
| 3398 | |||
| 3399 | end: | ||
| 3400 | *need_full_reset_arg = need_full_reset; | ||
| 3401 | return r; | ||
| 3402 | } | ||
| 3403 | |||
| 3404 | static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev, | ||
| 3405 | struct amdgpu_job *job) | ||
| 3406 | { | ||
| 3407 | int i; | ||
| 3380 | 3408 | ||
| 3381 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 3409 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| 3382 | struct amdgpu_ring *ring = adev->rings[i]; | 3410 | struct amdgpu_ring *ring = adev->rings[i]; |
| @@ -3388,7 +3416,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | |||
| 3388 | * or all rings (in the case @job is NULL) | 3416 | * or all rings (in the case @job is NULL) |
| 3389 | * after above amdgpu_reset accomplished | 3417 | * after above amdgpu_reset accomplished |
| 3390 | */ | 3418 | */ |
| 3391 | if ((!job || job->base.sched == &ring->sched) && !r) | 3419 | if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res) |
| 3392 | drm_sched_job_recovery(&ring->sched); | 3420 | drm_sched_job_recovery(&ring->sched); |
| 3393 | 3421 | ||
| 3394 | kthread_unpark(ring->sched.thread); | 3422 | kthread_unpark(ring->sched.thread); |
| @@ -3398,21 +3426,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | |||
| 3398 | drm_helper_resume_force_mode(adev->ddev); | 3426 | drm_helper_resume_force_mode(adev->ddev); |
| 3399 | } | 3427 | } |
| 3400 | 3428 | ||
| 3401 | ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); | 3429 | adev->asic_reset_res = 0; |
| 3430 | } | ||
| 3402 | 3431 | ||
| 3403 | if (r) { | 3432 | static void amdgpu_device_lock_adev(struct amdgpu_device *adev) |
| 3404 | /* bad news, how to tell it to userspace ? */ | 3433 | { |
| 3405 | dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); | 3434 | mutex_lock(&adev->lock_reset); |
| 3406 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); | 3435 | atomic_inc(&adev->gpu_reset_counter); |
| 3407 | } else { | 3436 | adev->in_gpu_reset = 1; |
| 3408 | dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter)); | 3437 | /* Block kfd */ |
| 3409 | } | 3438 | amdgpu_amdkfd_pre_reset(adev); |
| 3439 | } | ||
| 3410 | 3440 | ||
| 3441 | static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) | ||
| 3442 | { | ||
| 3411 | /*unlock kfd */ | 3443 | /*unlock kfd */ |
| 3412 | amdgpu_amdkfd_post_reset(adev); | 3444 | amdgpu_amdkfd_post_reset(adev); |
| 3413 | amdgpu_vf_error_trans_all(adev); | 3445 | amdgpu_vf_error_trans_all(adev); |
| 3414 | adev->in_gpu_reset = 0; | 3446 | adev->in_gpu_reset = 0; |
| 3415 | mutex_unlock(&adev->lock_reset); | 3447 | mutex_unlock(&adev->lock_reset); |
| 3448 | } | ||
| 3449 | |||
| 3450 | |||
| 3451 | /** | ||
| 3452 | * amdgpu_device_gpu_recover - reset the asic and recover scheduler | ||
| 3453 | * | ||
| 3454 | * @adev: amdgpu device pointer | ||
| 3455 | * @job: which job trigger hang | ||
| 3456 | * | ||
| 3457 | * Attempt to reset the GPU if it has hung (all asics). | ||
| 3458 | * Attempt to do soft-reset or full-reset and reinitialize Asic | ||
| 3459 | * Returns 0 for success or an error on failure. | ||
| 3460 | */ | ||
| 3461 | |||
| 3462 | int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | ||
| 3463 | struct amdgpu_job *job) | ||
| 3464 | { | ||
| 3465 | int r; | ||
| 3466 | struct amdgpu_hive_info *hive = NULL; | ||
| 3467 | bool need_full_reset = false; | ||
| 3468 | struct amdgpu_device *tmp_adev = NULL; | ||
| 3469 | struct list_head device_list, *device_list_handle = NULL; | ||
| 3470 | |||
| 3471 | INIT_LIST_HEAD(&device_list); | ||
| 3472 | |||
| 3473 | dev_info(adev->dev, "GPU reset begin!\n"); | ||
| 3474 | |||
| 3475 | /* | ||
| 3476 | * In case of XGMI hive disallow concurrent resets to be triggered | ||
| 3477 | * by different nodes. No point also since the one node already executing | ||
| 3478 | * reset will also reset all the other nodes in the hive. | ||
| 3479 | */ | ||
| 3480 | hive = amdgpu_get_xgmi_hive(adev); | ||
| 3481 | if (hive && adev->gmc.xgmi.num_physical_nodes > 1 && | ||
| 3482 | !mutex_trylock(&hive->hive_lock)) | ||
| 3483 | return 0; | ||
| 3484 | |||
| 3485 | /* Start with adev pre asic reset first for soft reset check.*/ | ||
| 3486 | amdgpu_device_lock_adev(adev); | ||
| 3487 | r = amdgpu_device_pre_asic_reset(adev, | ||
| 3488 | job, | ||
| 3489 | &need_full_reset); | ||
| 3490 | if (r) { | ||
| 3491 | /*TODO Should we stop ?*/ | ||
| 3492 | DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", | ||
| 3493 | r, adev->ddev->unique); | ||
| 3494 | adev->asic_reset_res = r; | ||
| 3495 | } | ||
| 3496 | |||
| 3497 | /* Build list of devices to reset */ | ||
| 3498 | if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) { | ||
| 3499 | if (!hive) { | ||
| 3500 | amdgpu_device_unlock_adev(adev); | ||
| 3501 | return -ENODEV; | ||
| 3502 | } | ||
| 3503 | |||
| 3504 | /* | ||
| 3505 | * In case we are in XGMI hive mode device reset is done for all the | ||
| 3506 | * nodes in the hive to retrain all XGMI links and hence the reset | ||
| 3507 | * sequence is executed in loop on all nodes. | ||
| 3508 | */ | ||
| 3509 | device_list_handle = &hive->device_list; | ||
| 3510 | } else { | ||
| 3511 | list_add_tail(&adev->gmc.xgmi.head, &device_list); | ||
| 3512 | device_list_handle = &device_list; | ||
| 3513 | } | ||
| 3514 | |||
| 3515 | retry: /* Rest of adevs pre asic reset from XGMI hive. */ | ||
| 3516 | list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { | ||
| 3517 | |||
| 3518 | if (tmp_adev == adev) | ||
| 3519 | continue; | ||
| 3520 | |||
| 3521 | dev_info(tmp_adev->dev, "GPU reset begin for drm dev %s!\n", adev->ddev->unique); | ||
| 3522 | |||
| 3523 | amdgpu_device_lock_adev(tmp_adev); | ||
| 3524 | r = amdgpu_device_pre_asic_reset(tmp_adev, | ||
| 3525 | NULL, | ||
| 3526 | &need_full_reset); | ||
| 3527 | /*TODO Should we stop ?*/ | ||
| 3528 | if (r) { | ||
| 3529 | DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", | ||
| 3530 | r, tmp_adev->ddev->unique); | ||
| 3531 | tmp_adev->asic_reset_res = r; | ||
| 3532 | } | ||
| 3533 | } | ||
| 3534 | |||
| 3535 | /* Actual ASIC resets if needed.*/ | ||
| 3536 | /* TODO Implement XGMI hive reset logic for SRIOV */ | ||
| 3537 | if (amdgpu_sriov_vf(adev)) { | ||
| 3538 | r = amdgpu_device_reset_sriov(adev, job ? false : true); | ||
| 3539 | if (r) | ||
| 3540 | adev->asic_reset_res = r; | ||
| 3541 | } else { | ||
| 3542 | r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); | ||
| 3543 | if (r && r == -EAGAIN) | ||
| 3544 | goto retry; | ||
| 3545 | } | ||
| 3546 | |||
| 3547 | /* Post ASIC reset for all devs .*/ | ||
| 3548 | list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { | ||
| 3549 | amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL); | ||
| 3550 | |||
| 3551 | if (r) { | ||
| 3552 | /* bad news, how to tell it to userspace ? */ | ||
| 3553 | dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); | ||
| 3554 | amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); | ||
| 3555 | } else { | ||
| 3556 | dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter)); | ||
| 3557 | } | ||
| 3558 | |||
| 3559 | amdgpu_device_unlock_adev(tmp_adev); | ||
| 3560 | } | ||
| 3561 | |||
| 3562 | if (hive && adev->gmc.xgmi.num_physical_nodes > 1) | ||
| 3563 | mutex_unlock(&hive->hive_lock); | ||
| 3564 | |||
| 3565 | if (r) | ||
| 3566 | dev_info(adev->dev, "GPU reset end with ret = %d\n", r); | ||
| 3416 | return r; | 3567 | return r; |
| 3417 | } | 3568 | } |
| 3418 | 3569 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 686a26de50f9..15ce7e681d67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
| @@ -631,6 +631,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) | |||
| 631 | drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); | 631 | drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); |
| 632 | if (!adev->mode_info.max_bpc_property) | 632 | if (!adev->mode_info.max_bpc_property) |
| 633 | return -ENOMEM; | 633 | return -ENOMEM; |
| 634 | adev->mode_info.abm_level_property = | ||
| 635 | drm_property_create_range(adev->ddev, 0, | ||
| 636 | "abm level", 0, 4); | ||
| 637 | if (!adev->mode_info.abm_level_property) | ||
| 638 | return -ENOMEM; | ||
| 634 | } | 639 | } |
| 635 | 640 | ||
| 636 | return 0; | 641 | return 0; |
| @@ -857,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, | |||
| 857 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ | 862 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ |
| 858 | if (in_vbl && (*vpos >= vbl_start)) { | 863 | if (in_vbl && (*vpos >= vbl_start)) { |
| 859 | vtotal = mode->crtc_vtotal; | 864 | vtotal = mode->crtc_vtotal; |
| 860 | *vpos = *vpos - vtotal; | 865 | |
| 866 | /* With variable refresh rate displays the vpos can exceed | ||
| 867 | * the vtotal value. Clamp to 0 to return -vbl_end instead | ||
| 868 | * of guessing the remaining number of lines until scanout. | ||
| 869 | */ | ||
| 870 | *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0; | ||
| 861 | } | 871 | } |
| 862 | 872 | ||
| 863 | /* Correct for shifted end of vbl at vbl_end. */ | 873 | /* Correct for shifted end of vbl at vbl_end. */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h new file mode 100644 index 000000000000..be620b29f4aa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | |||
| @@ -0,0 +1,243 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | /* | ||
| 25 | * GPU doorbell structures, functions & helpers | ||
| 26 | */ | ||
| 27 | struct amdgpu_doorbell { | ||
| 28 | /* doorbell mmio */ | ||
| 29 | resource_size_t base; | ||
| 30 | resource_size_t size; | ||
| 31 | u32 __iomem *ptr; | ||
| 32 | u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ | ||
| 33 | }; | ||
| 34 | |||
| 35 | /* Reserved doorbells for amdgpu (including multimedia). | ||
| 36 | * KFD can use all the rest in the 2M doorbell bar. | ||
| 37 | * For asic before vega10, doorbell is 32-bit, so the | ||
| 38 | * index/offset is in dword. For vega10 and after, doorbell | ||
| 39 | * can be 64-bit, so the index defined is in qword. | ||
| 40 | */ | ||
| 41 | struct amdgpu_doorbell_index { | ||
| 42 | uint32_t kiq; | ||
| 43 | uint32_t mec_ring0; | ||
| 44 | uint32_t mec_ring1; | ||
| 45 | uint32_t mec_ring2; | ||
| 46 | uint32_t mec_ring3; | ||
| 47 | uint32_t mec_ring4; | ||
| 48 | uint32_t mec_ring5; | ||
| 49 | uint32_t mec_ring6; | ||
| 50 | uint32_t mec_ring7; | ||
| 51 | uint32_t userqueue_start; | ||
| 52 | uint32_t userqueue_end; | ||
| 53 | uint32_t gfx_ring0; | ||
| 54 | uint32_t sdma_engine0; | ||
| 55 | uint32_t sdma_engine1; | ||
| 56 | uint32_t sdma_engine2; | ||
| 57 | uint32_t sdma_engine3; | ||
| 58 | uint32_t sdma_engine4; | ||
| 59 | uint32_t sdma_engine5; | ||
| 60 | uint32_t sdma_engine6; | ||
| 61 | uint32_t sdma_engine7; | ||
| 62 | uint32_t ih; | ||
| 63 | union { | ||
| 64 | struct { | ||
| 65 | uint32_t vcn_ring0_1; | ||
| 66 | uint32_t vcn_ring2_3; | ||
| 67 | uint32_t vcn_ring4_5; | ||
| 68 | uint32_t vcn_ring6_7; | ||
| 69 | } vcn; | ||
| 70 | struct { | ||
| 71 | uint32_t uvd_ring0_1; | ||
| 72 | uint32_t uvd_ring2_3; | ||
| 73 | uint32_t uvd_ring4_5; | ||
| 74 | uint32_t uvd_ring6_7; | ||
| 75 | uint32_t vce_ring0_1; | ||
| 76 | uint32_t vce_ring2_3; | ||
| 77 | uint32_t vce_ring4_5; | ||
| 78 | uint32_t vce_ring6_7; | ||
| 79 | } uvd_vce; | ||
| 80 | }; | ||
| 81 | uint32_t max_assignment; | ||
| 82 | }; | ||
| 83 | |||
| 84 | typedef enum _AMDGPU_DOORBELL_ASSIGNMENT | ||
| 85 | { | ||
| 86 | AMDGPU_DOORBELL_KIQ = 0x000, | ||
| 87 | AMDGPU_DOORBELL_HIQ = 0x001, | ||
| 88 | AMDGPU_DOORBELL_DIQ = 0x002, | ||
| 89 | AMDGPU_DOORBELL_MEC_RING0 = 0x010, | ||
| 90 | AMDGPU_DOORBELL_MEC_RING1 = 0x011, | ||
| 91 | AMDGPU_DOORBELL_MEC_RING2 = 0x012, | ||
| 92 | AMDGPU_DOORBELL_MEC_RING3 = 0x013, | ||
| 93 | AMDGPU_DOORBELL_MEC_RING4 = 0x014, | ||
| 94 | AMDGPU_DOORBELL_MEC_RING5 = 0x015, | ||
| 95 | AMDGPU_DOORBELL_MEC_RING6 = 0x016, | ||
| 96 | AMDGPU_DOORBELL_MEC_RING7 = 0x017, | ||
| 97 | AMDGPU_DOORBELL_GFX_RING0 = 0x020, | ||
| 98 | AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, | ||
| 99 | AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, | ||
| 100 | AMDGPU_DOORBELL_IH = 0x1E8, | ||
| 101 | AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, | ||
| 102 | AMDGPU_DOORBELL_INVALID = 0xFFFF | ||
| 103 | } AMDGPU_DOORBELL_ASSIGNMENT; | ||
| 104 | |||
| 105 | typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT | ||
| 106 | { | ||
| 107 | /* Compute + GFX: 0~255 */ | ||
| 108 | AMDGPU_VEGA20_DOORBELL_KIQ = 0x000, | ||
| 109 | AMDGPU_VEGA20_DOORBELL_HIQ = 0x001, | ||
| 110 | AMDGPU_VEGA20_DOORBELL_DIQ = 0x002, | ||
| 111 | AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003, | ||
| 112 | AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004, | ||
| 113 | AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005, | ||
| 114 | AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006, | ||
| 115 | AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007, | ||
| 116 | AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008, | ||
| 117 | AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009, | ||
| 118 | AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A, | ||
| 119 | AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B, | ||
| 120 | AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A, | ||
| 121 | AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B, | ||
| 122 | /* SDMA:256~335*/ | ||
| 123 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100, | ||
| 124 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A, | ||
| 125 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114, | ||
| 126 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E, | ||
| 127 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128, | ||
| 128 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132, | ||
| 129 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C, | ||
| 130 | AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146, | ||
| 131 | /* IH: 376~391 */ | ||
| 132 | AMDGPU_VEGA20_DOORBELL_IH = 0x178, | ||
| 133 | /* MMSCH: 392~407 | ||
| 134 | * overlap the doorbell assignment with VCN as they are mutually exclusive | ||
| 135 | * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD | ||
| 136 | */ | ||
| 137 | AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ | ||
| 138 | AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189, | ||
| 139 | AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A, | ||
| 140 | AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B, | ||
| 141 | |||
| 142 | AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188, | ||
| 143 | AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189, | ||
| 144 | AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A, | ||
| 145 | AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B, | ||
| 146 | |||
| 147 | AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C, | ||
| 148 | AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D, | ||
| 149 | AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E, | ||
| 150 | AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F, | ||
| 151 | AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F, | ||
| 152 | AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF | ||
| 153 | } AMDGPU_VEGA20_DOORBELL_ASSIGNMENT; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space | ||
| 157 | */ | ||
| 158 | typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT | ||
| 159 | { | ||
| 160 | /* | ||
| 161 | * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in | ||
| 162 | * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. | ||
| 163 | * Compute related doorbells are allocated from 0x00 to 0x8a | ||
| 164 | */ | ||
| 165 | |||
| 166 | |||
| 167 | /* kernel scheduling */ | ||
| 168 | AMDGPU_DOORBELL64_KIQ = 0x00, | ||
| 169 | |||
| 170 | /* HSA interface queue and debug queue */ | ||
| 171 | AMDGPU_DOORBELL64_HIQ = 0x01, | ||
| 172 | AMDGPU_DOORBELL64_DIQ = 0x02, | ||
| 173 | |||
| 174 | /* Compute engines */ | ||
| 175 | AMDGPU_DOORBELL64_MEC_RING0 = 0x03, | ||
| 176 | AMDGPU_DOORBELL64_MEC_RING1 = 0x04, | ||
| 177 | AMDGPU_DOORBELL64_MEC_RING2 = 0x05, | ||
| 178 | AMDGPU_DOORBELL64_MEC_RING3 = 0x06, | ||
| 179 | AMDGPU_DOORBELL64_MEC_RING4 = 0x07, | ||
| 180 | AMDGPU_DOORBELL64_MEC_RING5 = 0x08, | ||
| 181 | AMDGPU_DOORBELL64_MEC_RING6 = 0x09, | ||
| 182 | AMDGPU_DOORBELL64_MEC_RING7 = 0x0a, | ||
| 183 | |||
| 184 | /* User queue doorbell range (128 doorbells) */ | ||
| 185 | AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b, | ||
| 186 | AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a, | ||
| 187 | |||
| 188 | /* Graphics engine */ | ||
| 189 | AMDGPU_DOORBELL64_GFX_RING0 = 0x8b, | ||
| 190 | |||
| 191 | /* | ||
| 192 | * Other graphics doorbells can be allocated here: from 0x8c to 0xdf | ||
| 193 | * Graphics voltage island aperture 1 | ||
| 194 | * default non-graphics QWORD index is 0xe0 - 0xFF inclusive | ||
| 195 | */ | ||
| 196 | |||
| 197 | /* For vega10 sriov, the sdma doorbell must be fixed as follow | ||
| 198 | * to keep the same setting with host driver, or it will | ||
| 199 | * happen conflicts | ||
| 200 | */ | ||
| 201 | AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0, | ||
| 202 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, | ||
| 203 | AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2, | ||
| 204 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, | ||
| 205 | |||
| 206 | /* Interrupt handler */ | ||
| 207 | AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */ | ||
| 208 | AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */ | ||
| 209 | AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */ | ||
| 210 | |||
| 211 | /* VCN engine use 32 bits doorbell */ | ||
| 212 | AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ | ||
| 213 | AMDGPU_DOORBELL64_VCN2_3 = 0xF9, | ||
| 214 | AMDGPU_DOORBELL64_VCN4_5 = 0xFA, | ||
| 215 | AMDGPU_DOORBELL64_VCN6_7 = 0xFB, | ||
| 216 | |||
| 217 | /* overlap the doorbell assignment with VCN as they are mutually exclusive | ||
| 218 | * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD | ||
| 219 | */ | ||
| 220 | AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, | ||
| 221 | AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, | ||
| 222 | AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, | ||
| 223 | AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, | ||
| 224 | |||
| 225 | AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, | ||
| 226 | AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, | ||
| 227 | AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, | ||
| 228 | AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, | ||
| 229 | |||
| 230 | AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, | ||
| 231 | AMDGPU_DOORBELL64_INVALID = 0xFFFF | ||
| 232 | } AMDGPU_DOORBELL64_ASSIGNMENT; | ||
| 233 | |||
| 234 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); | ||
| 235 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); | ||
| 236 | u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); | ||
| 237 | void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); | ||
| 238 | |||
| 239 | #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) | ||
| 240 | #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) | ||
| 241 | #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) | ||
| 242 | #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v)) | ||
| 243 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8de55f7f1a3a..90f474f98b6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -454,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444); | |||
| 454 | 454 | ||
| 455 | /** | 455 | /** |
| 456 | * DOC: param_buf_per_se (int) | 456 | * DOC: param_buf_per_se (int) |
| 457 | * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx). | 457 | * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte. |
| 458 | * The default is 0 (depending on gfx). | ||
| 458 | */ | 459 | */ |
| 459 | MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)"); | 460 | MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)"); |
| 460 | module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444); | 461 | module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444); |
| 461 | 462 | ||
| 462 | /** | 463 | /** |
| @@ -1220,9 +1221,6 @@ static struct drm_driver kms_driver = { | |||
| 1220 | .patchlevel = KMS_DRIVER_PATCHLEVEL, | 1221 | .patchlevel = KMS_DRIVER_PATCHLEVEL, |
| 1221 | }; | 1222 | }; |
| 1222 | 1223 | ||
| 1223 | static struct drm_driver *driver; | ||
| 1224 | static struct pci_driver *pdriver; | ||
| 1225 | |||
| 1226 | static struct pci_driver amdgpu_kms_pci_driver = { | 1224 | static struct pci_driver amdgpu_kms_pci_driver = { |
| 1227 | .name = DRIVER_NAME, | 1225 | .name = DRIVER_NAME, |
| 1228 | .id_table = pciidlist, | 1226 | .id_table = pciidlist, |
| @@ -1252,16 +1250,14 @@ static int __init amdgpu_init(void) | |||
| 1252 | goto error_fence; | 1250 | goto error_fence; |
| 1253 | 1251 | ||
| 1254 | DRM_INFO("amdgpu kernel modesetting enabled.\n"); | 1252 | DRM_INFO("amdgpu kernel modesetting enabled.\n"); |
| 1255 | driver = &kms_driver; | 1253 | kms_driver.num_ioctls = amdgpu_max_kms_ioctl; |
| 1256 | pdriver = &amdgpu_kms_pci_driver; | ||
| 1257 | driver->num_ioctls = amdgpu_max_kms_ioctl; | ||
| 1258 | amdgpu_register_atpx_handler(); | 1254 | amdgpu_register_atpx_handler(); |
| 1259 | 1255 | ||
| 1260 | /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ | 1256 | /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ |
| 1261 | amdgpu_amdkfd_init(); | 1257 | amdgpu_amdkfd_init(); |
| 1262 | 1258 | ||
| 1263 | /* let modprobe override vga console setting */ | 1259 | /* let modprobe override vga console setting */ |
| 1264 | return pci_register_driver(pdriver); | 1260 | return pci_register_driver(&amdgpu_kms_pci_driver); |
| 1265 | 1261 | ||
| 1266 | error_fence: | 1262 | error_fence: |
| 1267 | amdgpu_sync_fini(); | 1263 | amdgpu_sync_fini(); |
| @@ -1273,7 +1269,7 @@ error_sync: | |||
| 1273 | static void __exit amdgpu_exit(void) | 1269 | static void __exit amdgpu_exit(void) |
| 1274 | { | 1270 | { |
| 1275 | amdgpu_amdkfd_fini(); | 1271 | amdgpu_amdkfd_fini(); |
| 1276 | pci_unregister_driver(pdriver); | 1272 | pci_unregister_driver(&amdgpu_kms_pci_driver); |
| 1277 | amdgpu_unregister_atpx_handler(); | 1273 | amdgpu_unregister_atpx_handler(); |
| 1278 | amdgpu_sync_fini(); | 1274 | amdgpu_sync_fini(); |
| 1279 | amdgpu_fence_slab_fini(); | 1275 | amdgpu_fence_slab_fini(); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 6a70c0b7105f..97a60da62004 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |||
| @@ -250,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, | |||
| 250 | ring->adev = NULL; | 250 | ring->adev = NULL; |
| 251 | ring->ring_obj = NULL; | 251 | ring->ring_obj = NULL; |
| 252 | ring->use_doorbell = true; | 252 | ring->use_doorbell = true; |
| 253 | ring->doorbell_index = AMDGPU_DOORBELL_KIQ; | 253 | ring->doorbell_index = adev->doorbell_index.kiq; |
| 254 | 254 | ||
| 255 | r = amdgpu_gfx_kiq_acquire(adev, ring); | 255 | r = amdgpu_gfx_kiq_acquire(adev, ring); |
| 256 | if (r) | 256 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 9b3164c0f861..bc62bf41b7e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 467 | if (!info->return_size || !info->return_pointer) | 467 | if (!info->return_size || !info->return_pointer) |
| 468 | return -EINVAL; | 468 | return -EINVAL; |
| 469 | 469 | ||
| 470 | /* Ensure IB tests are run on ring */ | ||
| 471 | flush_delayed_work(&adev->late_init_work); | ||
| 472 | |||
| 473 | switch (info->query) { | 470 | switch (info->query) { |
| 474 | case AMDGPU_INFO_ACCEL_WORKING: | 471 | case AMDGPU_INFO_ACCEL_WORKING: |
| 475 | ui32 = adev->accel_working; | 472 | ui32 = adev->accel_working; |
| @@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
| 950 | struct amdgpu_fpriv *fpriv; | 947 | struct amdgpu_fpriv *fpriv; |
| 951 | int r, pasid; | 948 | int r, pasid; |
| 952 | 949 | ||
| 950 | /* Ensure IB tests are run on ring */ | ||
| 951 | flush_delayed_work(&adev->late_init_work); | ||
| 952 | |||
| 953 | file_priv->driver_priv = NULL; | 953 | file_priv->driver_priv = NULL; |
| 954 | 954 | ||
| 955 | r = pm_runtime_get_sync(dev->dev); | 955 | r = pm_runtime_get_sync(dev->dev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 0dc2c5c57015..aadd0fa42e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include <drm/drm_crtc_helper.h> | 38 | #include <drm/drm_crtc_helper.h> |
| 39 | #include <drm/drm_fb_helper.h> | 39 | #include <drm/drm_fb_helper.h> |
| 40 | #include <drm/drm_plane_helper.h> | 40 | #include <drm/drm_plane_helper.h> |
| 41 | #include <drm/drm_fb_helper.h> | ||
| 42 | #include <linux/i2c.h> | 41 | #include <linux/i2c.h> |
| 43 | #include <linux/i2c-algo-bit.h> | 42 | #include <linux/i2c-algo-bit.h> |
| 44 | #include <linux/hrtimer.h> | 43 | #include <linux/hrtimer.h> |
| @@ -294,13 +293,6 @@ struct amdgpu_display_funcs { | |||
| 294 | uint16_t connector_object_id, | 293 | uint16_t connector_object_id, |
| 295 | struct amdgpu_hpd *hpd, | 294 | struct amdgpu_hpd *hpd, |
| 296 | struct amdgpu_router *router); | 295 | struct amdgpu_router *router); |
| 297 | /* it is used to enter or exit into free sync mode */ | ||
| 298 | int (*notify_freesync)(struct drm_device *dev, void *data, | ||
| 299 | struct drm_file *filp); | ||
| 300 | /* it is used to allow enablement of freesync mode */ | ||
| 301 | int (*set_freesync_property)(struct drm_connector *connector, | ||
| 302 | struct drm_property *property, | ||
| 303 | uint64_t val); | ||
| 304 | 296 | ||
| 305 | 297 | ||
| 306 | }; | 298 | }; |
| @@ -340,6 +332,8 @@ struct amdgpu_mode_info { | |||
| 340 | struct drm_property *dither_property; | 332 | struct drm_property *dither_property; |
| 341 | /* maximum number of bits per channel for monitor color */ | 333 | /* maximum number of bits per channel for monitor color */ |
| 342 | struct drm_property *max_bpc_property; | 334 | struct drm_property *max_bpc_property; |
| 335 | /* Adaptive Backlight Modulation (power feature) */ | ||
| 336 | struct drm_property *abm_level_property; | ||
| 343 | /* hardcoded DFP edid from BIOS */ | 337 | /* hardcoded DFP edid from BIOS */ |
| 344 | struct edid *bios_hardcoded_edid; | 338 | struct edid *bios_hardcoded_edid; |
| 345 | int bios_hardcoded_edid_size; | 339 | int bios_hardcoded_edid_size; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7235cd0b0fa9..1f61ed95727c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #include <linux/hwmon.h> | 33 | #include <linux/hwmon.h> |
| 34 | #include <linux/hwmon-sysfs.h> | 34 | #include <linux/hwmon-sysfs.h> |
| 35 | #include <linux/nospec.h> | 35 | #include <linux/nospec.h> |
| 36 | #include "hwmgr.h" | ||
| 37 | #define WIDTH_4K 3840 | ||
| 36 | 38 | ||
| 37 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); | 39 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); |
| 38 | 40 | ||
| @@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, | |||
| 1642 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) | 1644 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) |
| 1643 | return 0; | 1645 | return 0; |
| 1644 | 1646 | ||
| 1647 | /* Skip fan attributes on APU */ | ||
| 1648 | if ((adev->flags & AMD_IS_APU) && | ||
| 1649 | (attr == &sensor_dev_attr_pwm1.dev_attr.attr || | ||
| 1650 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || | ||
| 1651 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || | ||
| 1652 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || | ||
| 1653 | attr == &sensor_dev_attr_fan1_input.dev_attr.attr || | ||
| 1654 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr || | ||
| 1655 | attr == &sensor_dev_attr_fan1_max.dev_attr.attr || | ||
| 1656 | attr == &sensor_dev_attr_fan1_target.dev_attr.attr || | ||
| 1657 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) | ||
| 1658 | return 0; | ||
| 1659 | |||
| 1645 | /* Skip limit attributes if DPM is not enabled */ | 1660 | /* Skip limit attributes if DPM is not enabled */ |
| 1646 | if (!adev->pm.dpm_enabled && | 1661 | if (!adev->pm.dpm_enabled && |
| 1647 | (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || | 1662 | (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || |
| @@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) | |||
| 1956 | amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); | 1971 | amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); |
| 1957 | mutex_unlock(&adev->pm.mutex); | 1972 | mutex_unlock(&adev->pm.mutex); |
| 1958 | } | 1973 | } |
| 1974 | /* enable/disable Low Memory PState for UVD (4k videos) */ | ||
| 1975 | if (adev->asic_type == CHIP_STONEY && | ||
| 1976 | adev->uvd.decode_image_width >= WIDTH_4K) { | ||
| 1977 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | ||
| 1978 | |||
| 1979 | if (hwmgr && hwmgr->hwmgr_func && | ||
| 1980 | hwmgr->hwmgr_func->update_nbdpm_pstate) | ||
| 1981 | hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, | ||
| 1982 | !enable, | ||
| 1983 | true); | ||
| 1984 | } | ||
| 1959 | } | 1985 | } |
| 1960 | 1986 | ||
| 1961 | void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) | 1987 | void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 69896f451e8a..4e5d13e41f6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, | |||
| 692 | buf_sizes[0x1] = dpb_size; | 692 | buf_sizes[0x1] = dpb_size; |
| 693 | buf_sizes[0x2] = image_size; | 693 | buf_sizes[0x2] = image_size; |
| 694 | buf_sizes[0x4] = min_ctx_size; | 694 | buf_sizes[0x4] = min_ctx_size; |
| 695 | /* store image width to adjust nb memory pstate */ | ||
| 696 | adev->uvd.decode_image_width = width; | ||
| 695 | return 0; | 697 | return 0; |
| 696 | } | 698 | } |
| 697 | 699 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index a3ab1a41060f..5eb63288d157 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
| @@ -65,6 +65,8 @@ struct amdgpu_uvd { | |||
| 65 | struct drm_sched_entity entity; | 65 | struct drm_sched_entity entity; |
| 66 | struct delayed_work idle_work; | 66 | struct delayed_work idle_work; |
| 67 | unsigned harvest_config; | 67 | unsigned harvest_config; |
| 68 | /* store image width to adjust nb memory state */ | ||
| 69 | unsigned decode_image_width; | ||
| 68 | }; | 70 | }; |
| 69 | 71 | ||
| 70 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); | 72 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index cfee74732edb..462a04e0f5e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |||
| @@ -334,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) | |||
| 334 | 334 | ||
| 335 | if (adev->fw_vram_usage.va != NULL) { | 335 | if (adev->fw_vram_usage.va != NULL) { |
| 336 | adev->virt.fw_reserve.p_pf2vf = | 336 | adev->virt.fw_reserve.p_pf2vf = |
| 337 | (struct amdgim_pf2vf_info_header *)( | 337 | (struct amd_sriov_msg_pf2vf_info_header *)( |
| 338 | adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); | 338 | adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); |
| 339 | AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); | 339 | AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); |
| 340 | AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); | 340 | AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 0728fbc9a692..722deefc0a7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | |||
| @@ -63,8 +63,8 @@ struct amdgpu_virt_ops { | |||
| 63 | * Firmware Reserve Frame buffer | 63 | * Firmware Reserve Frame buffer |
| 64 | */ | 64 | */ |
| 65 | struct amdgpu_virt_fw_reserve { | 65 | struct amdgpu_virt_fw_reserve { |
| 66 | struct amdgim_pf2vf_info_header *p_pf2vf; | 66 | struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; |
| 67 | struct amdgim_vf2pf_info_header *p_vf2pf; | 67 | struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; |
| 68 | unsigned int checksum_key; | 68 | unsigned int checksum_key; |
| 69 | }; | 69 | }; |
| 70 | /* | 70 | /* |
| @@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG { | |||
| 85 | AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, | 85 | AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | struct amdgim_pf2vf_info_header { | 88 | struct amd_sriov_msg_pf2vf_info_header { |
| 89 | /* the total structure size in byte. */ | 89 | /* the total structure size in byte. */ |
| 90 | uint32_t size; | 90 | uint32_t size; |
| 91 | /* version of this structure, written by the GIM */ | 91 | /* version of this structure, written by the GIM */ |
| 92 | uint32_t version; | 92 | uint32_t version; |
| 93 | /* reserved */ | ||
| 94 | uint32_t reserved[2]; | ||
| 93 | } __aligned(4); | 95 | } __aligned(4); |
| 94 | struct amdgim_pf2vf_info_v1 { | 96 | struct amdgim_pf2vf_info_v1 { |
| 95 | /* header contains size and version */ | 97 | /* header contains size and version */ |
| 96 | struct amdgim_pf2vf_info_header header; | 98 | struct amd_sriov_msg_pf2vf_info_header header; |
| 97 | /* max_width * max_height */ | 99 | /* max_width * max_height */ |
| 98 | unsigned int uvd_enc_max_pixels_count; | 100 | unsigned int uvd_enc_max_pixels_count; |
| 99 | /* 16x16 pixels/sec, codec independent */ | 101 | /* 16x16 pixels/sec, codec independent */ |
| @@ -112,7 +114,7 @@ struct amdgim_pf2vf_info_v1 { | |||
| 112 | 114 | ||
| 113 | struct amdgim_pf2vf_info_v2 { | 115 | struct amdgim_pf2vf_info_v2 { |
| 114 | /* header contains size and version */ | 116 | /* header contains size and version */ |
| 115 | struct amdgim_pf2vf_info_header header; | 117 | struct amd_sriov_msg_pf2vf_info_header header; |
| 116 | /* use private key from mailbox 2 to create chueksum */ | 118 | /* use private key from mailbox 2 to create chueksum */ |
| 117 | uint32_t checksum; | 119 | uint32_t checksum; |
| 118 | /* The features flags of the GIM driver supports. */ | 120 | /* The features flags of the GIM driver supports. */ |
| @@ -137,20 +139,22 @@ struct amdgim_pf2vf_info_v2 { | |||
| 137 | uint64_t vcefw_kboffset; | 139 | uint64_t vcefw_kboffset; |
| 138 | /* VCE FW size in KB */ | 140 | /* VCE FW size in KB */ |
| 139 | uint32_t vcefw_ksize; | 141 | uint32_t vcefw_ksize; |
| 140 | uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)]; | 142 | uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)]; |
| 141 | } __aligned(4); | 143 | } __aligned(4); |
| 142 | 144 | ||
| 143 | 145 | ||
| 144 | struct amdgim_vf2pf_info_header { | 146 | struct amd_sriov_msg_vf2pf_info_header { |
| 145 | /* the total structure size in byte. */ | 147 | /* the total structure size in byte. */ |
| 146 | uint32_t size; | 148 | uint32_t size; |
| 147 | /*version of this structure, written by the guest */ | 149 | /*version of this structure, written by the guest */ |
| 148 | uint32_t version; | 150 | uint32_t version; |
| 151 | /* reserved */ | ||
| 152 | uint32_t reserved[2]; | ||
| 149 | } __aligned(4); | 153 | } __aligned(4); |
| 150 | 154 | ||
| 151 | struct amdgim_vf2pf_info_v1 { | 155 | struct amdgim_vf2pf_info_v1 { |
| 152 | /* header contains size and version */ | 156 | /* header contains size and version */ |
| 153 | struct amdgim_vf2pf_info_header header; | 157 | struct amd_sriov_msg_vf2pf_info_header header; |
| 154 | /* driver version */ | 158 | /* driver version */ |
| 155 | char driver_version[64]; | 159 | char driver_version[64]; |
| 156 | /* driver certification, 1=WHQL, 0=None */ | 160 | /* driver certification, 1=WHQL, 0=None */ |
| @@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 { | |||
| 180 | 184 | ||
| 181 | struct amdgim_vf2pf_info_v2 { | 185 | struct amdgim_vf2pf_info_v2 { |
| 182 | /* header contains size and version */ | 186 | /* header contains size and version */ |
| 183 | struct amdgim_vf2pf_info_header header; | 187 | struct amd_sriov_msg_vf2pf_info_header header; |
| 184 | uint32_t checksum; | 188 | uint32_t checksum; |
| 185 | /* driver version */ | 189 | /* driver version */ |
| 186 | uint8_t driver_version[64]; | 190 | uint8_t driver_version[64]; |
| @@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 { | |||
| 206 | uint32_t uvd_enc_usage; | 210 | uint32_t uvd_enc_usage; |
| 207 | /* guest uvd engine usage percentage. 0xffff means N/A. */ | 211 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
| 208 | uint32_t uvd_enc_health; | 212 | uint32_t uvd_enc_health; |
| 209 | uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)]; | 213 | uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; |
| 210 | } __aligned(4); | 214 | } __aligned(4); |
| 211 | 215 | ||
| 212 | #define AMDGPU_FW_VRAM_VF2PF_VER 2 | 216 | #define AMDGPU_FW_VRAM_VF2PF_VER 2 |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 909216a9b447..fb37e69f1bba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
| 25 | #include "amdgpu.h" | 25 | #include "amdgpu.h" |
| 26 | #include "amdgpu_psp.h" | 26 | #include "amdgpu_xgmi.h" |
| 27 | 27 | ||
| 28 | 28 | ||
| 29 | static DEFINE_MUTEX(xgmi_mutex); | 29 | static DEFINE_MUTEX(xgmi_mutex); |
| @@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex); | |||
| 31 | #define AMDGPU_MAX_XGMI_HIVE 8 | 31 | #define AMDGPU_MAX_XGMI_HIVE 8 |
| 32 | #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 | 32 | #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 |
| 33 | 33 | ||
| 34 | struct amdgpu_hive_info { | ||
| 35 | uint64_t hive_id; | ||
| 36 | struct list_head device_list; | ||
| 37 | }; | ||
| 38 | |||
| 39 | static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; | 34 | static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; |
| 40 | static unsigned hive_count = 0; | 35 | static unsigned hive_count = 0; |
| 41 | 36 | ||
| 42 | static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) | 37 | |
| 38 | void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) | ||
| 39 | { | ||
| 40 | return &hive->device_list; | ||
| 41 | } | ||
| 42 | |||
| 43 | struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) | ||
| 43 | { | 44 | { |
| 44 | int i; | 45 | int i; |
| 45 | struct amdgpu_hive_info *tmp; | 46 | struct amdgpu_hive_info *tmp; |
| @@ -58,15 +59,38 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) | |||
| 58 | tmp = &xgmi_hives[hive_count++]; | 59 | tmp = &xgmi_hives[hive_count++]; |
| 59 | tmp->hive_id = adev->gmc.xgmi.hive_id; | 60 | tmp->hive_id = adev->gmc.xgmi.hive_id; |
| 60 | INIT_LIST_HEAD(&tmp->device_list); | 61 | INIT_LIST_HEAD(&tmp->device_list); |
| 62 | mutex_init(&tmp->hive_lock); | ||
| 63 | |||
| 61 | return tmp; | 64 | return tmp; |
| 62 | } | 65 | } |
| 63 | 66 | ||
| 67 | int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev) | ||
| 68 | { | ||
| 69 | int ret = -EINVAL; | ||
| 70 | |||
| 71 | /* Each psp need to set the latest topology */ | ||
| 72 | ret = psp_xgmi_set_topology_info(&adev->psp, | ||
| 73 | hive->number_devices, | ||
| 74 | &hive->topology_info); | ||
| 75 | if (ret) | ||
| 76 | dev_err(adev->dev, | ||
| 77 | "XGMI: Set topology failure on device %llx, hive %llx, ret %d", | ||
| 78 | adev->gmc.xgmi.node_id, | ||
| 79 | adev->gmc.xgmi.hive_id, ret); | ||
| 80 | else | ||
| 81 | dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n", | ||
| 82 | adev->gmc.xgmi.physical_node_id, | ||
| 83 | adev->gmc.xgmi.hive_id); | ||
| 84 | |||
| 85 | return ret; | ||
| 86 | } | ||
| 87 | |||
| 64 | int amdgpu_xgmi_add_device(struct amdgpu_device *adev) | 88 | int amdgpu_xgmi_add_device(struct amdgpu_device *adev) |
| 65 | { | 89 | { |
| 66 | struct psp_xgmi_topology_info *tmp_topology; | 90 | struct psp_xgmi_topology_info *hive_topology; |
| 67 | struct amdgpu_hive_info *hive; | 91 | struct amdgpu_hive_info *hive; |
| 68 | struct amdgpu_xgmi *entry; | 92 | struct amdgpu_xgmi *entry; |
| 69 | struct amdgpu_device *tmp_adev; | 93 | struct amdgpu_device *tmp_adev = NULL; |
| 70 | 94 | ||
| 71 | int count = 0, ret = -EINVAL; | 95 | int count = 0, ret = -EINVAL; |
| 72 | 96 | ||
| @@ -76,21 +100,21 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) | |||
| 76 | adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); | 100 | adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); |
| 77 | adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); | 101 | adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); |
| 78 | 102 | ||
| 79 | tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL); | ||
| 80 | if (!tmp_topology) | ||
| 81 | return -ENOMEM; | ||
| 82 | mutex_lock(&xgmi_mutex); | 103 | mutex_lock(&xgmi_mutex); |
| 83 | hive = amdgpu_get_xgmi_hive(adev); | 104 | hive = amdgpu_get_xgmi_hive(adev); |
| 84 | if (!hive) | 105 | if (!hive) |
| 85 | goto exit; | 106 | goto exit; |
| 86 | 107 | ||
| 108 | hive_topology = &hive->topology_info; | ||
| 109 | |||
| 87 | list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); | 110 | list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); |
| 88 | list_for_each_entry(entry, &hive->device_list, head) | 111 | list_for_each_entry(entry, &hive->device_list, head) |
| 89 | tmp_topology->nodes[count++].node_id = entry->node_id; | 112 | hive_topology->nodes[count++].node_id = entry->node_id; |
| 113 | hive->number_devices = count; | ||
| 90 | 114 | ||
| 91 | /* Each psp need to get the latest topology */ | 115 | /* Each psp need to get the latest topology */ |
| 92 | list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { | 116 | list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { |
| 93 | ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology); | 117 | ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology); |
| 94 | if (ret) { | 118 | if (ret) { |
| 95 | dev_err(tmp_adev->dev, | 119 | dev_err(tmp_adev->dev, |
| 96 | "XGMI: Get topology failure on device %llx, hive %llx, ret %d", | 120 | "XGMI: Get topology failure on device %llx, hive %llx, ret %d", |
| @@ -101,25 +125,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) | |||
| 101 | } | 125 | } |
| 102 | } | 126 | } |
| 103 | 127 | ||
| 104 | /* Each psp need to set the latest topology */ | ||
| 105 | list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { | 128 | list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { |
| 106 | ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); | 129 | ret = amdgpu_xgmi_update_topology(hive, tmp_adev); |
| 107 | if (ret) { | 130 | if (ret) |
| 108 | dev_err(tmp_adev->dev, | ||
| 109 | "XGMI: Set topology failure on device %llx, hive %llx, ret %d", | ||
| 110 | tmp_adev->gmc.xgmi.node_id, | ||
| 111 | tmp_adev->gmc.xgmi.hive_id, ret); | ||
| 112 | /* To do : continue with some node failed or disable the whole hive */ | ||
| 113 | break; | 131 | break; |
| 114 | } | ||
| 115 | } | 132 | } |
| 116 | if (!ret) | ||
| 117 | dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n", | ||
| 118 | adev->gmc.xgmi.physical_node_id, | ||
| 119 | adev->gmc.xgmi.hive_id); | ||
| 120 | 133 | ||
| 121 | exit: | 134 | exit: |
| 122 | mutex_unlock(&xgmi_mutex); | 135 | mutex_unlock(&xgmi_mutex); |
| 123 | kfree(tmp_topology); | ||
| 124 | return ret; | 136 | return ret; |
| 125 | } | 137 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h new file mode 100644 index 000000000000..6335bfdcc51d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | #ifndef __AMDGPU_XGMI_H__ | ||
| 23 | #define __AMDGPU_XGMI_H__ | ||
| 24 | |||
| 25 | #include "amdgpu_psp.h" | ||
| 26 | |||
| 27 | struct amdgpu_hive_info { | ||
| 28 | uint64_t hive_id; | ||
| 29 | struct list_head device_list; | ||
| 30 | struct psp_xgmi_topology_info topology_info; | ||
| 31 | int number_devices; | ||
| 32 | struct mutex hive_lock; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev); | ||
| 36 | int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); | ||
| 37 | int amdgpu_xgmi_add_device(struct amdgpu_device *adev); | ||
| 38 | |||
| 39 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index f41f5f57e9f3..71c50d8900e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
| @@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = | |||
| 1755 | .flush_hdp = &cik_flush_hdp, | 1755 | .flush_hdp = &cik_flush_hdp, |
| 1756 | .invalidate_hdp = &cik_invalidate_hdp, | 1756 | .invalidate_hdp = &cik_invalidate_hdp, |
| 1757 | .need_full_reset = &cik_need_full_reset, | 1757 | .need_full_reset = &cik_need_full_reset, |
| 1758 | .init_doorbell_index = &legacy_doorbell_index_init, | ||
| 1758 | }; | 1759 | }; |
| 1759 | 1760 | ||
| 1760 | static int cik_common_early_init(void *handle) | 1761 | static int cik_common_early_init(void *handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index e49c6f15a0a0..54c625a2e570 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h | |||
| @@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev, | |||
| 30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 30 | u32 me, u32 pipe, u32 queue, u32 vmid); |
| 31 | int cik_set_ip_blocks(struct amdgpu_device *adev); | 31 | int cik_set_ip_blocks(struct amdgpu_device *adev); |
| 32 | 32 | ||
| 33 | void legacy_doorbell_index_init(struct amdgpu_device *adev); | ||
| 33 | #endif | 34 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index f467b9bd090d..3a9fb6018c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
| @@ -4363,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, | |||
| 4363 | 4363 | ||
| 4364 | ring->ring_obj = NULL; | 4364 | ring->ring_obj = NULL; |
| 4365 | ring->use_doorbell = true; | 4365 | ring->use_doorbell = true; |
| 4366 | ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id; | 4366 | ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; |
| 4367 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); | 4367 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); |
| 4368 | 4368 | ||
| 4369 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP | 4369 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index cb066a8dccd7..1454fc306783 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -44,7 +44,6 @@ | |||
| 44 | #include "gca/gfx_8_0_d.h" | 44 | #include "gca/gfx_8_0_d.h" |
| 45 | #include "gca/gfx_8_0_enum.h" | 45 | #include "gca/gfx_8_0_enum.h" |
| 46 | #include "gca/gfx_8_0_sh_mask.h" | 46 | #include "gca/gfx_8_0_sh_mask.h" |
| 47 | #include "gca/gfx_8_0_enum.h" | ||
| 48 | 47 | ||
| 49 | #include "dce/dce_10_0_d.h" | 48 | #include "dce/dce_10_0_d.h" |
| 50 | #include "dce/dce_10_0_sh_mask.h" | 49 | #include "dce/dce_10_0_sh_mask.h" |
| @@ -1891,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, | |||
| 1891 | 1890 | ||
| 1892 | ring->ring_obj = NULL; | 1891 | ring->ring_obj = NULL; |
| 1893 | ring->use_doorbell = true; | 1892 | ring->use_doorbell = true; |
| 1894 | ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id; | 1893 | ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; |
| 1895 | ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr | 1894 | ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr |
| 1896 | + (ring_id * GFX8_MEC_HPD_SIZE); | 1895 | + (ring_id * GFX8_MEC_HPD_SIZE); |
| 1897 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); | 1896 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); |
| @@ -2002,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle) | |||
| 2002 | /* no gfx doorbells on iceland */ | 2001 | /* no gfx doorbells on iceland */ |
| 2003 | if (adev->asic_type != CHIP_TOPAZ) { | 2002 | if (adev->asic_type != CHIP_TOPAZ) { |
| 2004 | ring->use_doorbell = true; | 2003 | ring->use_doorbell = true; |
| 2005 | ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; | 2004 | ring->doorbell_index = adev->doorbell_index.gfx_ring0; |
| 2006 | } | 2005 | } |
| 2007 | 2006 | ||
| 2008 | r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, | 2007 | r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, |
| @@ -4216,7 +4215,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu | |||
| 4216 | 4215 | ||
| 4217 | tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, | 4216 | tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, |
| 4218 | DOORBELL_RANGE_LOWER, | 4217 | DOORBELL_RANGE_LOWER, |
| 4219 | AMDGPU_DOORBELL_GFX_RING0); | 4218 | adev->doorbell_index.gfx_ring0); |
| 4220 | WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp); | 4219 | WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp); |
| 4221 | 4220 | ||
| 4222 | WREG32(mmCP_RB_DOORBELL_RANGE_UPPER, | 4221 | WREG32(mmCP_RB_DOORBELL_RANGE_UPPER, |
| @@ -4645,8 +4644,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) | |||
| 4645 | static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev) | 4644 | static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev) |
| 4646 | { | 4645 | { |
| 4647 | if (adev->asic_type > CHIP_TONGA) { | 4646 | if (adev->asic_type > CHIP_TONGA) { |
| 4648 | WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); | 4647 | WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2); |
| 4649 | WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2); | 4648 | WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2); |
| 4650 | } | 4649 | } |
| 4651 | /* enable doorbells */ | 4650 | /* enable doorbells */ |
| 4652 | WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1); | 4651 | WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c27caa144c57..af8ccb014be3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
| @@ -1566,7 +1566,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, | |||
| 1566 | 1566 | ||
| 1567 | ring->ring_obj = NULL; | 1567 | ring->ring_obj = NULL; |
| 1568 | ring->use_doorbell = true; | 1568 | ring->use_doorbell = true; |
| 1569 | ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1; | 1569 | ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; |
| 1570 | ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr | 1570 | ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr |
| 1571 | + (ring_id * GFX9_MEC_HPD_SIZE); | 1571 | + (ring_id * GFX9_MEC_HPD_SIZE); |
| 1572 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); | 1572 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); |
| @@ -1655,7 +1655,7 @@ static int gfx_v9_0_sw_init(void *handle) | |||
| 1655 | else | 1655 | else |
| 1656 | sprintf(ring->name, "gfx_%d", i); | 1656 | sprintf(ring->name, "gfx_%d", i); |
| 1657 | ring->use_doorbell = true; | 1657 | ring->use_doorbell = true; |
| 1658 | ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1; | 1658 | ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; |
| 1659 | r = amdgpu_ring_init(adev, ring, 1024, | 1659 | r = amdgpu_ring_init(adev, ring, 1024, |
| 1660 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); | 1660 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); |
| 1661 | if (r) | 1661 | if (r) |
| @@ -2981,9 +2981,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) | |||
| 2981 | /* enable the doorbell if requested */ | 2981 | /* enable the doorbell if requested */ |
| 2982 | if (ring->use_doorbell) { | 2982 | if (ring->use_doorbell) { |
| 2983 | WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, | 2983 | WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, |
| 2984 | (AMDGPU_DOORBELL64_KIQ *2) << 2); | 2984 | (adev->doorbell_index.kiq * 2) << 2); |
| 2985 | WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, | 2985 | WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, |
| 2986 | (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2); | 2986 | (adev->doorbell_index.userqueue_end * 2) << 2); |
| 2987 | } | 2987 | } |
| 2988 | 2988 | ||
| 2989 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, | 2989 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 811231e4ec53..3a4e5d8d5162 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
| @@ -338,9 +338,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, | |||
| 338 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; | 338 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; |
| 339 | u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); | 339 | u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); |
| 340 | 340 | ||
| 341 | if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset && | 341 | /* This is necessary for a HW workaround under SRIOV as well |
| 342 | adev->gfx.kiq.ring.sched.ready && | 342 | * as GFXOFF under bare metal |
| 343 | (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { | 343 | */ |
| 344 | if (adev->gfx.kiq.ring.sched.ready && | ||
| 345 | (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && | ||
| 346 | !adev->in_gpu_reset) { | ||
| 344 | uint32_t req = hub->vm_inv_eng0_req + eng; | 347 | uint32_t req = hub->vm_inv_eng0_req + eng; |
| 345 | uint32_t ack = hub->vm_inv_eng0_ack + eng; | 348 | uint32_t ack = hub->vm_inv_eng0_ack + eng; |
| 346 | 349 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 64e875d528dd..6a0fcd67662a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | |||
| @@ -37,7 +37,6 @@ | |||
| 37 | #include "gmc/gmc_8_2_sh_mask.h" | 37 | #include "gmc/gmc_8_2_sh_mask.h" |
| 38 | #include "oss/oss_3_0_d.h" | 38 | #include "oss/oss_3_0_d.h" |
| 39 | #include "oss/oss_3_0_sh_mask.h" | 39 | #include "oss/oss_3_0_sh_mask.h" |
| 40 | #include "gca/gfx_8_0_sh_mask.h" | ||
| 41 | #include "dce/dce_10_0_d.h" | 40 | #include "dce/dce_10_0_d.h" |
| 42 | #include "dce/dce_10_0_sh_mask.h" | 41 | #include "dce/dce_10_0_sh_mask.h" |
| 43 | #include "smu/smu_7_1_3_d.h" | 42 | #include "smu/smu_7_1_3_d.h" |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 295c2205485a..d78b4306a36f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | |||
| @@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp, | |||
| 240 | enum psp_ring_type ring_type) | 240 | enum psp_ring_type ring_type) |
| 241 | { | 241 | { |
| 242 | int ret = 0; | 242 | int ret = 0; |
| 243 | struct psp_ring *ring; | ||
| 244 | unsigned int psp_ring_reg = 0; | 243 | unsigned int psp_ring_reg = 0; |
| 245 | struct amdgpu_device *adev = psp->adev; | 244 | struct amdgpu_device *adev = psp->adev; |
| 246 | 245 | ||
| 247 | ring = &psp->km_ring; | ||
| 248 | |||
| 249 | /* Write the ring destroy command to C2PMSG_64 */ | 246 | /* Write the ring destroy command to C2PMSG_64 */ |
| 250 | psp_ring_reg = 3 << 16; | 247 | psp_ring_reg = 3 << 16; |
| 251 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); | 248 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 9cea0bbe4525..7efb823dd3b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | |||
| @@ -356,12 +356,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, | |||
| 356 | enum psp_ring_type ring_type) | 356 | enum psp_ring_type ring_type) |
| 357 | { | 357 | { |
| 358 | int ret = 0; | 358 | int ret = 0; |
| 359 | struct psp_ring *ring; | ||
| 360 | unsigned int psp_ring_reg = 0; | 359 | unsigned int psp_ring_reg = 0; |
| 361 | struct amdgpu_device *adev = psp->adev; | 360 | struct amdgpu_device *adev = psp->adev; |
| 362 | 361 | ||
| 363 | ring = &psp->km_ring; | ||
| 364 | |||
| 365 | /* Write the ring destroy command to C2PMSG_64 */ | 362 | /* Write the ring destroy command to C2PMSG_64 */ |
| 366 | psp_ring_reg = 3 << 16; | 363 | psp_ring_reg = 3 << 16; |
| 367 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); | 364 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); |
| @@ -593,7 +590,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) | |||
| 593 | } | 590 | } |
| 594 | 591 | ||
| 595 | /*send the mode 1 reset command*/ | 592 | /*send the mode 1 reset command*/ |
| 596 | WREG32(offset, 0x70000); | 593 | WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST); |
| 597 | 594 | ||
| 598 | mdelay(1000); | 595 | mdelay(1000); |
| 599 | 596 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index b6a25f92d566..1bccc5fe2d9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -1146,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle) | |||
| 1146 | if (!amdgpu_sriov_vf(adev)) { | 1146 | if (!amdgpu_sriov_vf(adev)) { |
| 1147 | ring->use_doorbell = true; | 1147 | ring->use_doorbell = true; |
| 1148 | ring->doorbell_index = (i == 0) ? | 1148 | ring->doorbell_index = (i == 0) ? |
| 1149 | AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1; | 1149 | adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1; |
| 1150 | } else { | 1150 | } else { |
| 1151 | ring->use_pollmem = true; | 1151 | ring->use_pollmem = true; |
| 1152 | } | 1152 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index f4490cdd9804..4b6d3e5c821f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
| @@ -925,11 +925,9 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) | |||
| 925 | OFFSET, ring->doorbell_index); | 925 | OFFSET, ring->doorbell_index); |
| 926 | WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell); | 926 | WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell); |
| 927 | WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset); | 927 | WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset); |
| 928 | /* TODO: enable doorbell support */ | ||
| 929 | /*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, | ||
| 930 | ring->doorbell_index);*/ | ||
| 931 | 928 | ||
| 932 | sdma_v4_0_ring_set_wptr(ring); | 929 | /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */ |
| 930 | sdma_v4_0_page_ring_set_wptr(ring); | ||
| 933 | 931 | ||
| 934 | /* set minor_ptr_update to 0 after wptr programed */ | 932 | /* set minor_ptr_update to 0 after wptr programed */ |
| 935 | WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0); | 933 | WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0); |
| @@ -1449,23 +1447,46 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, | |||
| 1449 | sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10); | 1447 | sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10); |
| 1450 | } | 1448 | } |
| 1451 | 1449 | ||
| 1450 | static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) | ||
| 1451 | { | ||
| 1452 | uint fw_version = adev->sdma.instance[0].fw_version; | ||
| 1453 | |||
| 1454 | switch (adev->asic_type) { | ||
| 1455 | case CHIP_VEGA10: | ||
| 1456 | return fw_version >= 430; | ||
| 1457 | case CHIP_VEGA12: | ||
| 1458 | /*return fw_version >= 31;*/ | ||
| 1459 | return false; | ||
| 1460 | case CHIP_VEGA20: | ||
| 1461 | /*return fw_version >= 115;*/ | ||
| 1462 | return false; | ||
| 1463 | default: | ||
| 1464 | return false; | ||
| 1465 | } | ||
| 1466 | } | ||
| 1467 | |||
| 1452 | static int sdma_v4_0_early_init(void *handle) | 1468 | static int sdma_v4_0_early_init(void *handle) |
| 1453 | { | 1469 | { |
| 1454 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1470 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1471 | int r; | ||
| 1455 | 1472 | ||
| 1456 | if (adev->asic_type == CHIP_RAVEN) { | 1473 | if (adev->asic_type == CHIP_RAVEN) |
| 1457 | adev->sdma.num_instances = 1; | 1474 | adev->sdma.num_instances = 1; |
| 1458 | adev->sdma.has_page_queue = false; | 1475 | else |
| 1459 | } else { | ||
| 1460 | adev->sdma.num_instances = 2; | 1476 | adev->sdma.num_instances = 2; |
| 1461 | /* TODO: Page queue breaks driver reload under SRIOV */ | 1477 | |
| 1462 | if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) | 1478 | r = sdma_v4_0_init_microcode(adev); |
| 1463 | adev->sdma.has_page_queue = false; | 1479 | if (r) { |
| 1464 | else if (adev->asic_type != CHIP_VEGA20 && | 1480 | DRM_ERROR("Failed to load sdma firmware!\n"); |
| 1465 | adev->asic_type != CHIP_VEGA12) | 1481 | return r; |
| 1466 | adev->sdma.has_page_queue = true; | ||
| 1467 | } | 1482 | } |
| 1468 | 1483 | ||
| 1484 | /* TODO: Page queue breaks driver reload under SRIOV */ | ||
| 1485 | if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) | ||
| 1486 | adev->sdma.has_page_queue = false; | ||
| 1487 | else if (sdma_v4_0_fw_support_paging_queue(adev)) | ||
| 1488 | adev->sdma.has_page_queue = true; | ||
| 1489 | |||
| 1469 | sdma_v4_0_set_ring_funcs(adev); | 1490 | sdma_v4_0_set_ring_funcs(adev); |
| 1470 | sdma_v4_0_set_buffer_funcs(adev); | 1491 | sdma_v4_0_set_buffer_funcs(adev); |
| 1471 | sdma_v4_0_set_vm_pte_funcs(adev); | 1492 | sdma_v4_0_set_vm_pte_funcs(adev); |
| @@ -1474,7 +1495,6 @@ static int sdma_v4_0_early_init(void *handle) | |||
| 1474 | return 0; | 1495 | return 0; |
| 1475 | } | 1496 | } |
| 1476 | 1497 | ||
| 1477 | |||
| 1478 | static int sdma_v4_0_sw_init(void *handle) | 1498 | static int sdma_v4_0_sw_init(void *handle) |
| 1479 | { | 1499 | { |
| 1480 | struct amdgpu_ring *ring; | 1500 | struct amdgpu_ring *ring; |
| @@ -1493,12 +1513,6 @@ static int sdma_v4_0_sw_init(void *handle) | |||
| 1493 | if (r) | 1513 | if (r) |
| 1494 | return r; | 1514 | return r; |
| 1495 | 1515 | ||
| 1496 | r = sdma_v4_0_init_microcode(adev); | ||
| 1497 | if (r) { | ||
| 1498 | DRM_ERROR("Failed to load sdma firmware!\n"); | ||
| 1499 | return r; | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | for (i = 0; i < adev->sdma.num_instances; i++) { | 1516 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 1503 | ring = &adev->sdma.instance[i].ring; | 1517 | ring = &adev->sdma.instance[i].ring; |
| 1504 | ring->ring_obj = NULL; | 1518 | ring->ring_obj = NULL; |
| @@ -1507,15 +1521,10 @@ static int sdma_v4_0_sw_init(void *handle) | |||
| 1507 | DRM_INFO("use_doorbell being set to: [%s]\n", | 1521 | DRM_INFO("use_doorbell being set to: [%s]\n", |
| 1508 | ring->use_doorbell?"true":"false"); | 1522 | ring->use_doorbell?"true":"false"); |
| 1509 | 1523 | ||
| 1510 | if (adev->asic_type == CHIP_VEGA10) | 1524 | /* doorbell size is 2 dwords, get DWORD offset */ |
| 1511 | ring->doorbell_index = (i == 0) ? | 1525 | ring->doorbell_index = (i == 0) ? |
| 1512 | (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset | 1526 | (adev->doorbell_index.sdma_engine0 << 1) |
| 1513 | : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset | 1527 | : (adev->doorbell_index.sdma_engine1 << 1); |
| 1514 | else | ||
| 1515 | ring->doorbell_index = (i == 0) ? | ||
| 1516 | (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset | ||
| 1517 | : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset | ||
| 1518 | |||
| 1519 | 1528 | ||
| 1520 | sprintf(ring->name, "sdma%d", i); | 1529 | sprintf(ring->name, "sdma%d", i); |
| 1521 | r = amdgpu_ring_init(adev, ring, 1024, | 1530 | r = amdgpu_ring_init(adev, ring, 1024, |
| @@ -1529,7 +1538,15 @@ static int sdma_v4_0_sw_init(void *handle) | |||
| 1529 | if (adev->sdma.has_page_queue) { | 1538 | if (adev->sdma.has_page_queue) { |
| 1530 | ring = &adev->sdma.instance[i].page; | 1539 | ring = &adev->sdma.instance[i].page; |
| 1531 | ring->ring_obj = NULL; | 1540 | ring->ring_obj = NULL; |
| 1532 | ring->use_doorbell = false; | 1541 | ring->use_doorbell = true; |
| 1542 | |||
| 1543 | /* paging queue use same doorbell index/routing as gfx queue | ||
| 1544 | * with 0x400 (4096 dwords) offset on second doorbell page | ||
| 1545 | */ | ||
| 1546 | ring->doorbell_index = (i == 0) ? | ||
| 1547 | (adev->doorbell_index.sdma_engine0 << 1) | ||
| 1548 | : (adev->doorbell_index.sdma_engine1 << 1); | ||
| 1549 | ring->doorbell_index += 0x400; | ||
| 1533 | 1550 | ||
| 1534 | sprintf(ring->name, "page%d", i); | 1551 | sprintf(ring->name, "page%d", i); |
| 1535 | r = amdgpu_ring_init(adev, ring, 1024, | 1552 | r = amdgpu_ring_init(adev, ring, 1024, |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4cc0dcb1a187..83624e150ca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |||
| @@ -613,6 +613,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = | |||
| 613 | .flush_hdp = &soc15_flush_hdp, | 613 | .flush_hdp = &soc15_flush_hdp, |
| 614 | .invalidate_hdp = &soc15_invalidate_hdp, | 614 | .invalidate_hdp = &soc15_invalidate_hdp, |
| 615 | .need_full_reset = &soc15_need_full_reset, | 615 | .need_full_reset = &soc15_need_full_reset, |
| 616 | .init_doorbell_index = &vega10_doorbell_index_init, | ||
| 617 | }; | ||
| 618 | |||
| 619 | static const struct amdgpu_asic_funcs vega20_asic_funcs = | ||
| 620 | { | ||
| 621 | .read_disabled_bios = &soc15_read_disabled_bios, | ||
| 622 | .read_bios_from_rom = &soc15_read_bios_from_rom, | ||
| 623 | .read_register = &soc15_read_register, | ||
| 624 | .reset = &soc15_asic_reset, | ||
| 625 | .set_vga_state = &soc15_vga_set_state, | ||
| 626 | .get_xclk = &soc15_get_xclk, | ||
| 627 | .set_uvd_clocks = &soc15_set_uvd_clocks, | ||
| 628 | .set_vce_clocks = &soc15_set_vce_clocks, | ||
| 629 | .get_config_memsize = &soc15_get_config_memsize, | ||
| 630 | .flush_hdp = &soc15_flush_hdp, | ||
| 631 | .invalidate_hdp = &soc15_invalidate_hdp, | ||
| 632 | .need_full_reset = &soc15_need_full_reset, | ||
| 633 | .init_doorbell_index = &vega20_doorbell_index_init, | ||
| 616 | }; | 634 | }; |
| 617 | 635 | ||
| 618 | static int soc15_common_early_init(void *handle) | 636 | static int soc15_common_early_init(void *handle) |
| @@ -632,11 +650,11 @@ static int soc15_common_early_init(void *handle) | |||
| 632 | adev->se_cac_rreg = &soc15_se_cac_rreg; | 650 | adev->se_cac_rreg = &soc15_se_cac_rreg; |
| 633 | adev->se_cac_wreg = &soc15_se_cac_wreg; | 651 | adev->se_cac_wreg = &soc15_se_cac_wreg; |
| 634 | 652 | ||
| 635 | adev->asic_funcs = &soc15_asic_funcs; | ||
| 636 | 653 | ||
| 637 | adev->external_rev_id = 0xFF; | 654 | adev->external_rev_id = 0xFF; |
| 638 | switch (adev->asic_type) { | 655 | switch (adev->asic_type) { |
| 639 | case CHIP_VEGA10: | 656 | case CHIP_VEGA10: |
| 657 | adev->asic_funcs = &soc15_asic_funcs; | ||
| 640 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | 658 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
| 641 | AMD_CG_SUPPORT_GFX_MGLS | | 659 | AMD_CG_SUPPORT_GFX_MGLS | |
| 642 | AMD_CG_SUPPORT_GFX_RLC_LS | | 660 | AMD_CG_SUPPORT_GFX_RLC_LS | |
| @@ -660,6 +678,7 @@ static int soc15_common_early_init(void *handle) | |||
| 660 | adev->external_rev_id = 0x1; | 678 | adev->external_rev_id = 0x1; |
| 661 | break; | 679 | break; |
| 662 | case CHIP_VEGA12: | 680 | case CHIP_VEGA12: |
| 681 | adev->asic_funcs = &soc15_asic_funcs; | ||
| 663 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | 682 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
| 664 | AMD_CG_SUPPORT_GFX_MGLS | | 683 | AMD_CG_SUPPORT_GFX_MGLS | |
| 665 | AMD_CG_SUPPORT_GFX_CGCG | | 684 | AMD_CG_SUPPORT_GFX_CGCG | |
| @@ -682,6 +701,7 @@ static int soc15_common_early_init(void *handle) | |||
| 682 | adev->external_rev_id = adev->rev_id + 0x14; | 701 | adev->external_rev_id = adev->rev_id + 0x14; |
| 683 | break; | 702 | break; |
| 684 | case CHIP_VEGA20: | 703 | case CHIP_VEGA20: |
| 704 | adev->asic_funcs = &vega20_asic_funcs; | ||
| 685 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | 705 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
| 686 | AMD_CG_SUPPORT_GFX_MGLS | | 706 | AMD_CG_SUPPORT_GFX_MGLS | |
| 687 | AMD_CG_SUPPORT_GFX_CGCG | | 707 | AMD_CG_SUPPORT_GFX_CGCG | |
| @@ -704,6 +724,7 @@ static int soc15_common_early_init(void *handle) | |||
| 704 | adev->external_rev_id = adev->rev_id + 0x28; | 724 | adev->external_rev_id = adev->rev_id + 0x28; |
| 705 | break; | 725 | break; |
| 706 | case CHIP_RAVEN: | 726 | case CHIP_RAVEN: |
| 727 | adev->asic_funcs = &soc15_asic_funcs; | ||
| 707 | if (adev->rev_id >= 0x8) | 728 | if (adev->rev_id >= 0x8) |
| 708 | adev->external_rev_id = adev->rev_id + 0x81; | 729 | adev->external_rev_id = adev->rev_id + 0x81; |
| 709 | else if (adev->pdev->device == 0x15d8) | 730 | else if (adev->pdev->device == 0x15d8) |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index f8ad7804dc40..a66c8bfbbaa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h | |||
| @@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, | |||
| 58 | int vega10_reg_base_init(struct amdgpu_device *adev); | 58 | int vega10_reg_base_init(struct amdgpu_device *adev); |
| 59 | int vega20_reg_base_init(struct amdgpu_device *adev); | 59 | int vega20_reg_base_init(struct amdgpu_device *adev); |
| 60 | 60 | ||
| 61 | void vega10_doorbell_index_init(struct amdgpu_device *adev); | ||
| 62 | void vega20_doorbell_index_init(struct amdgpu_device *adev); | ||
| 61 | #endif | 63 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 3abffd06b5c7..dcdbb4d72472 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c | |||
| @@ -322,7 +322,7 @@ static int tonga_ih_sw_init(void *handle) | |||
| 322 | return r; | 322 | return r; |
| 323 | 323 | ||
| 324 | adev->irq.ih.use_doorbell = true; | 324 | adev->irq.ih.use_doorbell = true; |
| 325 | adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH; | 325 | adev->irq.ih.doorbell_index = adev->doorbell_index.ih; |
| 326 | 326 | ||
| 327 | r = amdgpu_irq_init(adev); | 327 | r = amdgpu_irq_init(adev); |
| 328 | 328 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 90bbcee00f28..d69c8f6daaf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
| @@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle) | |||
| 116 | if (r) | 116 | if (r) |
| 117 | return r; | 117 | return r; |
| 118 | 118 | ||
| 119 | r = amdgpu_uvd_resume(adev); | ||
| 120 | if (r) | ||
| 121 | return r; | ||
| 122 | |||
| 123 | ring = &adev->uvd.inst->ring; | 119 | ring = &adev->uvd.inst->ring; |
| 124 | sprintf(ring->name, "uvd"); | 120 | sprintf(ring->name, "uvd"); |
| 125 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); | 121 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 126 | if (r) | 122 | if (r) |
| 127 | return r; | 123 | return r; |
| 128 | 124 | ||
| 125 | r = amdgpu_uvd_resume(adev); | ||
| 126 | if (r) | ||
| 127 | return r; | ||
| 128 | |||
| 129 | r = amdgpu_uvd_entity_init(adev); | 129 | r = amdgpu_uvd_entity_init(adev); |
| 130 | 130 | ||
| 131 | return r; | 131 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 1c5e12703103..ee8cd06ddc38 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
| @@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle) | |||
| 113 | if (r) | 113 | if (r) |
| 114 | return r; | 114 | return r; |
| 115 | 115 | ||
| 116 | r = amdgpu_uvd_resume(adev); | ||
| 117 | if (r) | ||
| 118 | return r; | ||
| 119 | |||
| 120 | ring = &adev->uvd.inst->ring; | 116 | ring = &adev->uvd.inst->ring; |
| 121 | sprintf(ring->name, "uvd"); | 117 | sprintf(ring->name, "uvd"); |
| 122 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); | 118 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 123 | if (r) | 119 | if (r) |
| 124 | return r; | 120 | return r; |
| 125 | 121 | ||
| 122 | r = amdgpu_uvd_resume(adev); | ||
| 123 | if (r) | ||
| 124 | return r; | ||
| 125 | |||
| 126 | r = amdgpu_uvd_entity_init(adev); | 126 | r = amdgpu_uvd_entity_init(adev); |
| 127 | 127 | ||
| 128 | return r; | 128 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index f184842ef2a2..d4f4a66f8324 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
| @@ -400,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle) | |||
| 400 | DRM_INFO("UVD ENC is disabled\n"); | 400 | DRM_INFO("UVD ENC is disabled\n"); |
| 401 | } | 401 | } |
| 402 | 402 | ||
| 403 | r = amdgpu_uvd_resume(adev); | ||
| 404 | if (r) | ||
| 405 | return r; | ||
| 406 | |||
| 407 | ring = &adev->uvd.inst->ring; | 403 | ring = &adev->uvd.inst->ring; |
| 408 | sprintf(ring->name, "uvd"); | 404 | sprintf(ring->name, "uvd"); |
| 409 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); | 405 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 410 | if (r) | 406 | if (r) |
| 411 | return r; | 407 | return r; |
| 412 | 408 | ||
| 409 | r = amdgpu_uvd_resume(adev); | ||
| 410 | if (r) | ||
| 411 | return r; | ||
| 412 | |||
| 413 | if (uvd_v6_0_enc_support(adev)) { | 413 | if (uvd_v6_0_enc_support(adev)) { |
| 414 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 414 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
| 415 | ring = &adev->uvd.inst->ring_enc[i]; | 415 | ring = &adev->uvd.inst->ring_enc[i]; |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 8a4595968d98..089645e78f98 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | |||
| @@ -430,10 +430,6 @@ static int uvd_v7_0_sw_init(void *handle) | |||
| 430 | DRM_INFO("PSP loading UVD firmware\n"); | 430 | DRM_INFO("PSP loading UVD firmware\n"); |
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | r = amdgpu_uvd_resume(adev); | ||
| 434 | if (r) | ||
| 435 | return r; | ||
| 436 | |||
| 437 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { | 433 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
| 438 | if (adev->uvd.harvest_config & (1 << j)) | 434 | if (adev->uvd.harvest_config & (1 << j)) |
| 439 | continue; | 435 | continue; |
| @@ -455,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle) | |||
| 455 | * sriov, so set unused location for other unused rings. | 451 | * sriov, so set unused location for other unused rings. |
| 456 | */ | 452 | */ |
| 457 | if (i == 0) | 453 | if (i == 0) |
| 458 | ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; | 454 | ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2; |
| 459 | else | 455 | else |
| 460 | ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; | 456 | ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1; |
| 461 | } | 457 | } |
| 462 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); | 458 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); |
| 463 | if (r) | 459 | if (r) |
| @@ -465,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle) | |||
| 465 | } | 461 | } |
| 466 | } | 462 | } |
| 467 | 463 | ||
| 464 | r = amdgpu_uvd_resume(adev); | ||
| 465 | if (r) | ||
| 466 | return r; | ||
| 467 | |||
| 468 | r = amdgpu_uvd_entity_init(adev); | 468 | r = amdgpu_uvd_entity_init(adev); |
| 469 | if (r) | 469 | if (r) |
| 470 | return r; | 470 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 3e84840859a7..2668effadd27 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -37,7 +37,6 @@ | |||
| 37 | #include "gca/gfx_8_0_d.h" | 37 | #include "gca/gfx_8_0_d.h" |
| 38 | #include "smu/smu_7_1_2_d.h" | 38 | #include "smu/smu_7_1_2_d.h" |
| 39 | #include "smu/smu_7_1_2_sh_mask.h" | 39 | #include "smu/smu_7_1_2_sh_mask.h" |
| 40 | #include "gca/gfx_8_0_d.h" | ||
| 41 | #include "gca/gfx_8_0_sh_mask.h" | 40 | #include "gca/gfx_8_0_sh_mask.h" |
| 42 | #include "ivsrcid/ivsrcid_vislands30.h" | 41 | #include "ivsrcid/ivsrcid_vislands30.h" |
| 43 | 42 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 0054ba1b9a68..9fb34b7d8e03 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | |||
| @@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle) | |||
| 466 | * so set unused location for other unused rings. | 466 | * so set unused location for other unused rings. |
| 467 | */ | 467 | */ |
| 468 | if (i == 0) | 468 | if (i == 0) |
| 469 | ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2; | 469 | ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2; |
| 470 | else | 470 | else |
| 471 | ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1; | 471 | ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; |
| 472 | } | 472 | } |
| 473 | r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); | 473 | r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); |
| 474 | if (r) | 474 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index a0fda6f9252a..d84b687240d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c | |||
| @@ -385,7 +385,7 @@ static int vega10_ih_sw_init(void *handle) | |||
| 385 | return r; | 385 | return r; |
| 386 | 386 | ||
| 387 | adev->irq.ih.use_doorbell = true; | 387 | adev->irq.ih.use_doorbell = true; |
| 388 | adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1; | 388 | adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; |
| 389 | 389 | ||
| 390 | r = amdgpu_irq_init(adev); | 390 | r = amdgpu_irq_init(adev); |
| 391 | 391 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c index c5c9b2bc190d..422674bb3cdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c | |||
| @@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev) | |||
| 56 | return 0; | 56 | return 0; |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | void vega10_doorbell_index_init(struct amdgpu_device *adev) | ||
| 60 | { | ||
| 61 | adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ; | ||
| 62 | adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0; | ||
| 63 | adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1; | ||
| 64 | adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2; | ||
| 65 | adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3; | ||
| 66 | adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4; | ||
| 67 | adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5; | ||
| 68 | adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6; | ||
| 69 | adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7; | ||
| 70 | adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START; | ||
| 71 | adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END; | ||
| 72 | adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0; | ||
| 73 | adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0; | ||
| 74 | adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1; | ||
| 75 | adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH; | ||
| 76 | adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1; | ||
| 77 | adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3; | ||
| 78 | adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5; | ||
| 79 | adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7; | ||
| 80 | adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1; | ||
| 81 | adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3; | ||
| 82 | adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5; | ||
| 83 | adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7; | ||
| 84 | /* In unit of dword doorbell */ | ||
| 85 | adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1; | ||
| 86 | } | ||
| 59 | 87 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index d13fc4fcb517..edce413fda9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | |||
| @@ -54,4 +54,37 @@ int vega20_reg_base_init(struct amdgpu_device *adev) | |||
| 54 | return 0; | 54 | return 0; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | void vega20_doorbell_index_init(struct amdgpu_device *adev) | ||
| 58 | { | ||
| 59 | adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ; | ||
| 60 | adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0; | ||
| 61 | adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1; | ||
| 62 | adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2; | ||
| 63 | adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3; | ||
| 64 | adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4; | ||
| 65 | adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5; | ||
| 66 | adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6; | ||
| 67 | adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7; | ||
| 68 | adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START; | ||
| 69 | adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END; | ||
| 70 | adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0; | ||
| 71 | adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0; | ||
| 72 | adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1; | ||
| 73 | adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2; | ||
| 74 | adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3; | ||
| 75 | adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4; | ||
| 76 | adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5; | ||
| 77 | adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6; | ||
| 78 | adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7; | ||
| 79 | adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH; | ||
| 80 | adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1; | ||
| 81 | adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3; | ||
| 82 | adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5; | ||
| 83 | adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7; | ||
| 84 | adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1; | ||
| 85 | adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3; | ||
| 86 | adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5; | ||
| 87 | adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7; | ||
| 88 | adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1; | ||
| 89 | } | ||
| 57 | 90 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 07880d35e9de..ff2906c215fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
| @@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = | |||
| 955 | .flush_hdp = &vi_flush_hdp, | 955 | .flush_hdp = &vi_flush_hdp, |
| 956 | .invalidate_hdp = &vi_invalidate_hdp, | 956 | .invalidate_hdp = &vi_invalidate_hdp, |
| 957 | .need_full_reset = &vi_need_full_reset, | 957 | .need_full_reset = &vi_need_full_reset, |
| 958 | .init_doorbell_index = &legacy_doorbell_index_init, | ||
| 958 | }; | 959 | }; |
| 959 | 960 | ||
| 960 | #define CZ_REV_BRISTOL(rev) \ | 961 | #define CZ_REV_BRISTOL(rev) \ |
| @@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) | |||
| 1712 | 1713 | ||
| 1713 | return 0; | 1714 | return 0; |
| 1714 | } | 1715 | } |
| 1716 | |||
| 1717 | void legacy_doorbell_index_init(struct amdgpu_device *adev) | ||
| 1718 | { | ||
| 1719 | adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; | ||
| 1720 | adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; | ||
| 1721 | adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; | ||
| 1722 | adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; | ||
| 1723 | adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; | ||
| 1724 | adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; | ||
| 1725 | adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; | ||
| 1726 | adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; | ||
| 1727 | adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; | ||
| 1728 | adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; | ||
| 1729 | adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0; | ||
| 1730 | adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1; | ||
| 1731 | adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; | ||
| 1732 | adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; | ||
| 1733 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 0429fe332269..8de0772f986c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h | |||
| @@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev, | |||
| 30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 30 | u32 me, u32 pipe, u32 queue, u32 vmid); |
| 31 | int vi_set_ip_blocks(struct amdgpu_device *adev); | 31 | int vi_set_ip_blocks(struct amdgpu_device *adev); |
| 32 | 32 | ||
| 33 | void legacy_doorbell_index_init(struct amdgpu_device *adev); | ||
| 33 | #endif | 34 | #endif |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 3783d122f283..c02adbbeef2a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
| @@ -133,6 +133,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { | |||
| 133 | #define fiji_cache_info carrizo_cache_info | 133 | #define fiji_cache_info carrizo_cache_info |
| 134 | #define polaris10_cache_info carrizo_cache_info | 134 | #define polaris10_cache_info carrizo_cache_info |
| 135 | #define polaris11_cache_info carrizo_cache_info | 135 | #define polaris11_cache_info carrizo_cache_info |
| 136 | #define polaris12_cache_info carrizo_cache_info | ||
| 136 | /* TODO - check & update Vega10 cache details */ | 137 | /* TODO - check & update Vega10 cache details */ |
| 137 | #define vega10_cache_info carrizo_cache_info | 138 | #define vega10_cache_info carrizo_cache_info |
| 138 | #define raven_cache_info carrizo_cache_info | 139 | #define raven_cache_info carrizo_cache_info |
| @@ -647,7 +648,12 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, | |||
| 647 | pcache_info = polaris11_cache_info; | 648 | pcache_info = polaris11_cache_info; |
| 648 | num_of_cache_types = ARRAY_SIZE(polaris11_cache_info); | 649 | num_of_cache_types = ARRAY_SIZE(polaris11_cache_info); |
| 649 | break; | 650 | break; |
| 651 | case CHIP_POLARIS12: | ||
| 652 | pcache_info = polaris12_cache_info; | ||
| 653 | num_of_cache_types = ARRAY_SIZE(polaris12_cache_info); | ||
| 654 | break; | ||
| 650 | case CHIP_VEGA10: | 655 | case CHIP_VEGA10: |
| 656 | case CHIP_VEGA12: | ||
| 651 | case CHIP_VEGA20: | 657 | case CHIP_VEGA20: |
| 652 | pcache_info = vega10_cache_info; | 658 | pcache_info = vega10_cache_info; |
| 653 | num_of_cache_types = ARRAY_SIZE(vega10_cache_info); | 659 | num_of_cache_types = ARRAY_SIZE(vega10_cache_info); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index c004647c8cb4..9ed14a11afa2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
| @@ -205,6 +205,22 @@ static const struct kfd_device_info polaris11_device_info = { | |||
| 205 | .num_sdma_queues_per_engine = 2, | 205 | .num_sdma_queues_per_engine = 2, |
| 206 | }; | 206 | }; |
| 207 | 207 | ||
| 208 | static const struct kfd_device_info polaris12_device_info = { | ||
| 209 | .asic_family = CHIP_POLARIS12, | ||
| 210 | .max_pasid_bits = 16, | ||
| 211 | .max_no_of_hqd = 24, | ||
| 212 | .doorbell_size = 4, | ||
| 213 | .ih_ring_entry_size = 4 * sizeof(uint32_t), | ||
| 214 | .event_interrupt_class = &event_interrupt_class_cik, | ||
| 215 | .num_of_watch_points = 4, | ||
| 216 | .mqd_size_aligned = MQD_SIZE_ALIGNED, | ||
| 217 | .supports_cwsr = true, | ||
| 218 | .needs_iommu_device = false, | ||
| 219 | .needs_pci_atomics = true, | ||
| 220 | .num_sdma_engines = 2, | ||
| 221 | .num_sdma_queues_per_engine = 2, | ||
| 222 | }; | ||
| 223 | |||
| 208 | static const struct kfd_device_info vega10_device_info = { | 224 | static const struct kfd_device_info vega10_device_info = { |
| 209 | .asic_family = CHIP_VEGA10, | 225 | .asic_family = CHIP_VEGA10, |
| 210 | .max_pasid_bits = 16, | 226 | .max_pasid_bits = 16, |
| @@ -237,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = { | |||
| 237 | .num_sdma_queues_per_engine = 2, | 253 | .num_sdma_queues_per_engine = 2, |
| 238 | }; | 254 | }; |
| 239 | 255 | ||
| 256 | static const struct kfd_device_info vega12_device_info = { | ||
| 257 | .asic_family = CHIP_VEGA12, | ||
| 258 | .max_pasid_bits = 16, | ||
| 259 | .max_no_of_hqd = 24, | ||
| 260 | .doorbell_size = 8, | ||
| 261 | .ih_ring_entry_size = 8 * sizeof(uint32_t), | ||
| 262 | .event_interrupt_class = &event_interrupt_class_v9, | ||
| 263 | .num_of_watch_points = 4, | ||
| 264 | .mqd_size_aligned = MQD_SIZE_ALIGNED, | ||
| 265 | .supports_cwsr = true, | ||
| 266 | .needs_iommu_device = false, | ||
| 267 | .needs_pci_atomics = false, | ||
| 268 | .num_sdma_engines = 2, | ||
| 269 | .num_sdma_queues_per_engine = 2, | ||
| 270 | }; | ||
| 271 | |||
| 240 | static const struct kfd_device_info vega20_device_info = { | 272 | static const struct kfd_device_info vega20_device_info = { |
| 241 | .asic_family = CHIP_VEGA20, | 273 | .asic_family = CHIP_VEGA20, |
| 242 | .max_pasid_bits = 16, | 274 | .max_pasid_bits = 16, |
| @@ -331,6 +363,14 @@ static const struct kfd_deviceid supported_devices[] = { | |||
| 331 | { 0x67EB, &polaris11_device_info }, /* Polaris11 */ | 363 | { 0x67EB, &polaris11_device_info }, /* Polaris11 */ |
| 332 | { 0x67EF, &polaris11_device_info }, /* Polaris11 */ | 364 | { 0x67EF, &polaris11_device_info }, /* Polaris11 */ |
| 333 | { 0x67FF, &polaris11_device_info }, /* Polaris11 */ | 365 | { 0x67FF, &polaris11_device_info }, /* Polaris11 */ |
| 366 | { 0x6980, &polaris12_device_info }, /* Polaris12 */ | ||
| 367 | { 0x6981, &polaris12_device_info }, /* Polaris12 */ | ||
| 368 | { 0x6985, &polaris12_device_info }, /* Polaris12 */ | ||
| 369 | { 0x6986, &polaris12_device_info }, /* Polaris12 */ | ||
| 370 | { 0x6987, &polaris12_device_info }, /* Polaris12 */ | ||
| 371 | { 0x6995, &polaris12_device_info }, /* Polaris12 */ | ||
| 372 | { 0x6997, &polaris12_device_info }, /* Polaris12 */ | ||
| 373 | { 0x699F, &polaris12_device_info }, /* Polaris12 */ | ||
| 334 | { 0x6860, &vega10_device_info }, /* Vega10 */ | 374 | { 0x6860, &vega10_device_info }, /* Vega10 */ |
| 335 | { 0x6861, &vega10_device_info }, /* Vega10 */ | 375 | { 0x6861, &vega10_device_info }, /* Vega10 */ |
| 336 | { 0x6862, &vega10_device_info }, /* Vega10 */ | 376 | { 0x6862, &vega10_device_info }, /* Vega10 */ |
| @@ -340,6 +380,11 @@ static const struct kfd_deviceid supported_devices[] = { | |||
| 340 | { 0x6868, &vega10_device_info }, /* Vega10 */ | 380 | { 0x6868, &vega10_device_info }, /* Vega10 */ |
| 341 | { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ | 381 | { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ |
| 342 | { 0x687F, &vega10_device_info }, /* Vega10 */ | 382 | { 0x687F, &vega10_device_info }, /* Vega10 */ |
| 383 | { 0x69A0, &vega12_device_info }, /* Vega12 */ | ||
| 384 | { 0x69A1, &vega12_device_info }, /* Vega12 */ | ||
| 385 | { 0x69A2, &vega12_device_info }, /* Vega12 */ | ||
| 386 | { 0x69A3, &vega12_device_info }, /* Vega12 */ | ||
| 387 | { 0x69AF, &vega12_device_info }, /* Vega12 */ | ||
| 343 | { 0x66a0, &vega20_device_info }, /* Vega20 */ | 388 | { 0x66a0, &vega20_device_info }, /* Vega20 */ |
| 344 | { 0x66a1, &vega20_device_info }, /* Vega20 */ | 389 | { 0x66a1, &vega20_device_info }, /* Vega20 */ |
| 345 | { 0x66a2, &vega20_device_info }, /* Vega20 */ | 390 | { 0x66a2, &vega20_device_info }, /* Vega20 */ |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index fb9d66ea13b7..8372556b52eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
| @@ -1547,7 +1547,7 @@ static int get_wave_state(struct device_queue_manager *dqm, | |||
| 1547 | u32 *ctl_stack_used_size, | 1547 | u32 *ctl_stack_used_size, |
| 1548 | u32 *save_area_used_size) | 1548 | u32 *save_area_used_size) |
| 1549 | { | 1549 | { |
| 1550 | struct mqd_manager *mqd; | 1550 | struct mqd_manager *mqd_mgr; |
| 1551 | int r; | 1551 | int r; |
| 1552 | 1552 | ||
| 1553 | dqm_lock(dqm); | 1553 | dqm_lock(dqm); |
| @@ -1558,19 +1558,19 @@ static int get_wave_state(struct device_queue_manager *dqm, | |||
| 1558 | goto dqm_unlock; | 1558 | goto dqm_unlock; |
| 1559 | } | 1559 | } |
| 1560 | 1560 | ||
| 1561 | mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); | 1561 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); |
| 1562 | if (!mqd) { | 1562 | if (!mqd_mgr) { |
| 1563 | r = -ENOMEM; | 1563 | r = -ENOMEM; |
| 1564 | goto dqm_unlock; | 1564 | goto dqm_unlock; |
| 1565 | } | 1565 | } |
| 1566 | 1566 | ||
| 1567 | if (!mqd->get_wave_state) { | 1567 | if (!mqd_mgr->get_wave_state) { |
| 1568 | r = -EINVAL; | 1568 | r = -EINVAL; |
| 1569 | goto dqm_unlock; | 1569 | goto dqm_unlock; |
| 1570 | } | 1570 | } |
| 1571 | 1571 | ||
| 1572 | r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size, | 1572 | r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, |
| 1573 | save_area_used_size); | 1573 | ctl_stack_used_size, save_area_used_size); |
| 1574 | 1574 | ||
| 1575 | dqm_unlock: | 1575 | dqm_unlock: |
| 1576 | dqm_unlock(dqm); | 1576 | dqm_unlock(dqm); |
| @@ -1741,10 +1741,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) | |||
| 1741 | case CHIP_FIJI: | 1741 | case CHIP_FIJI: |
| 1742 | case CHIP_POLARIS10: | 1742 | case CHIP_POLARIS10: |
| 1743 | case CHIP_POLARIS11: | 1743 | case CHIP_POLARIS11: |
| 1744 | case CHIP_POLARIS12: | ||
| 1744 | device_queue_manager_init_vi_tonga(&dqm->asic_ops); | 1745 | device_queue_manager_init_vi_tonga(&dqm->asic_ops); |
| 1745 | break; | 1746 | break; |
| 1746 | 1747 | ||
| 1747 | case CHIP_VEGA10: | 1748 | case CHIP_VEGA10: |
| 1749 | case CHIP_VEGA12: | ||
| 1748 | case CHIP_VEGA20: | 1750 | case CHIP_VEGA20: |
| 1749 | case CHIP_RAVEN: | 1751 | case CHIP_RAVEN: |
| 1750 | device_queue_manager_init_v9(&dqm->asic_ops); | 1752 | device_queue_manager_init_v9(&dqm->asic_ops); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index fd60a116be37..c3a5dcfe877a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include "kfd_device_queue_manager.h" | 24 | #include "kfd_device_queue_manager.h" |
| 25 | #include "gca/gfx_8_0_enum.h" | 25 | #include "gca/gfx_8_0_enum.h" |
| 26 | #include "gca/gfx_8_0_sh_mask.h" | 26 | #include "gca/gfx_8_0_sh_mask.h" |
| 27 | #include "gca/gfx_8_0_enum.h" | ||
| 28 | #include "oss/oss_3_0_sh_mask.h" | 27 | #include "oss/oss_3_0_sh_mask.h" |
| 29 | 28 | ||
| 30 | static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, | 29 | static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 3d66cec414af..213ea5454d11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | |||
| @@ -397,9 +397,11 @@ int kfd_init_apertures(struct kfd_process *process) | |||
| 397 | case CHIP_FIJI: | 397 | case CHIP_FIJI: |
| 398 | case CHIP_POLARIS10: | 398 | case CHIP_POLARIS10: |
| 399 | case CHIP_POLARIS11: | 399 | case CHIP_POLARIS11: |
| 400 | case CHIP_POLARIS12: | ||
| 400 | kfd_init_apertures_vi(pdd, id); | 401 | kfd_init_apertures_vi(pdd, id); |
| 401 | break; | 402 | break; |
| 402 | case CHIP_VEGA10: | 403 | case CHIP_VEGA10: |
| 404 | case CHIP_VEGA12: | ||
| 403 | case CHIP_VEGA20: | 405 | case CHIP_VEGA20: |
| 404 | case CHIP_RAVEN: | 406 | case CHIP_RAVEN: |
| 405 | kfd_init_apertures_v9(pdd, id); | 407 | kfd_init_apertures_v9(pdd, id); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index f836897bbf58..a85904ad0d5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #include "kfd_priv.h" | 23 | #include "kfd_priv.h" |
| 24 | #include "kfd_events.h" | 24 | #include "kfd_events.h" |
| 25 | #include "soc15_int.h" | 25 | #include "soc15_int.h" |
| 26 | 26 | #include "kfd_device_queue_manager.h" | |
| 27 | 27 | ||
| 28 | static bool event_interrupt_isr_v9(struct kfd_dev *dev, | 28 | static bool event_interrupt_isr_v9(struct kfd_dev *dev, |
| 29 | const uint32_t *ih_ring_entry, | 29 | const uint32_t *ih_ring_entry, |
| @@ -39,20 +39,39 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, | |||
| 39 | vmid > dev->vm_info.last_vmid_kfd) | 39 | vmid > dev->vm_info.last_vmid_kfd) |
| 40 | return 0; | 40 | return 0; |
| 41 | 41 | ||
| 42 | /* If there is no valid PASID, it's likely a firmware bug */ | ||
| 43 | pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); | ||
| 44 | if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt")) | ||
| 45 | return 0; | ||
| 46 | |||
| 47 | source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); | 42 | source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); |
| 48 | client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); | 43 | client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); |
| 44 | pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); | ||
| 45 | |||
| 46 | /* This is a known issue for gfx9. Under non HWS, pasid is not set | ||
| 47 | * in the interrupt payload, so we need to find out the pasid on our | ||
| 48 | * own. | ||
| 49 | */ | ||
| 50 | if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { | ||
| 51 | const uint32_t pasid_mask = 0xffff; | ||
| 49 | 52 | ||
| 50 | pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n", | 53 | *patched_flag = true; |
| 51 | client_id, source_id, pasid); | 54 | memcpy(patched_ihre, ih_ring_entry, |
| 55 | dev->device_info->ih_ring_entry_size); | ||
| 56 | |||
| 57 | pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid( | ||
| 58 | dev->kgd, vmid); | ||
| 59 | |||
| 60 | /* Patch the pasid field */ | ||
| 61 | patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3]) | ||
| 62 | & ~pasid_mask) | pasid); | ||
| 63 | } | ||
| 64 | |||
| 65 | pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", | ||
| 66 | client_id, source_id, vmid, pasid); | ||
| 52 | pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", | 67 | pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", |
| 53 | data[0], data[1], data[2], data[3], | 68 | data[0], data[1], data[2], data[3], |
| 54 | data[4], data[5], data[6], data[7]); | 69 | data[4], data[5], data[6], data[7]); |
| 55 | 70 | ||
| 71 | /* If there is no valid PASID, it's likely a bug */ | ||
| 72 | if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) | ||
| 73 | return 0; | ||
| 74 | |||
| 56 | /* Interrupt types we care about: various signals and faults. | 75 | /* Interrupt types we care about: various signals and faults. |
| 57 | * They will be forwarded to a work queue (see below). | 76 | * They will be forwarded to a work queue (see below). |
| 58 | */ | 77 | */ |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 6c31f7370193..f1596881f20a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | |||
| @@ -313,6 +313,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, | |||
| 313 | case CHIP_FIJI: | 313 | case CHIP_FIJI: |
| 314 | case CHIP_POLARIS10: | 314 | case CHIP_POLARIS10: |
| 315 | case CHIP_POLARIS11: | 315 | case CHIP_POLARIS11: |
| 316 | case CHIP_POLARIS12: | ||
| 316 | kernel_queue_init_vi(&kq->ops_asic_specific); | 317 | kernel_queue_init_vi(&kq->ops_asic_specific); |
| 317 | break; | 318 | break; |
| 318 | 319 | ||
| @@ -322,6 +323,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, | |||
| 322 | break; | 323 | break; |
| 323 | 324 | ||
| 324 | case CHIP_VEGA10: | 325 | case CHIP_VEGA10: |
| 326 | case CHIP_VEGA12: | ||
| 325 | case CHIP_VEGA20: | 327 | case CHIP_VEGA20: |
| 326 | case CHIP_RAVEN: | 328 | case CHIP_RAVEN: |
| 327 | kernel_queue_init_v9(&kq->ops_asic_specific); | 329 | kernel_queue_init_v9(&kq->ops_asic_specific); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 6910028010d6..aed9b9b82213 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | |||
| @@ -38,8 +38,10 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, | |||
| 38 | case CHIP_FIJI: | 38 | case CHIP_FIJI: |
| 39 | case CHIP_POLARIS10: | 39 | case CHIP_POLARIS10: |
| 40 | case CHIP_POLARIS11: | 40 | case CHIP_POLARIS11: |
| 41 | case CHIP_POLARIS12: | ||
| 41 | return mqd_manager_init_vi_tonga(type, dev); | 42 | return mqd_manager_init_vi_tonga(type, dev); |
| 42 | case CHIP_VEGA10: | 43 | case CHIP_VEGA10: |
| 44 | case CHIP_VEGA12: | ||
| 43 | case CHIP_VEGA20: | 45 | case CHIP_VEGA20: |
| 44 | case CHIP_RAVEN: | 46 | case CHIP_RAVEN: |
| 45 | return mqd_manager_init_v9(type, dev); | 47 | return mqd_manager_init_v9(type, dev); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index c6080ed3b6a7..045a229436a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | |||
| @@ -226,9 +226,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) | |||
| 226 | case CHIP_FIJI: | 226 | case CHIP_FIJI: |
| 227 | case CHIP_POLARIS10: | 227 | case CHIP_POLARIS10: |
| 228 | case CHIP_POLARIS11: | 228 | case CHIP_POLARIS11: |
| 229 | case CHIP_POLARIS12: | ||
| 229 | pm->pmf = &kfd_vi_pm_funcs; | 230 | pm->pmf = &kfd_vi_pm_funcs; |
| 230 | break; | 231 | break; |
| 231 | case CHIP_VEGA10: | 232 | case CHIP_VEGA10: |
| 233 | case CHIP_VEGA12: | ||
| 232 | case CHIP_VEGA20: | 234 | case CHIP_VEGA20: |
| 233 | case CHIP_RAVEN: | 235 | case CHIP_RAVEN: |
| 234 | pm->pmf = &kfd_v9_pm_funcs; | 236 | pm->pmf = &kfd_v9_pm_funcs; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c73b4ff61f99..aa793fcbbdcc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -1272,12 +1272,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu) | |||
| 1272 | case CHIP_FIJI: | 1272 | case CHIP_FIJI: |
| 1273 | case CHIP_POLARIS10: | 1273 | case CHIP_POLARIS10: |
| 1274 | case CHIP_POLARIS11: | 1274 | case CHIP_POLARIS11: |
| 1275 | case CHIP_POLARIS12: | ||
| 1275 | pr_debug("Adding doorbell packet type capability\n"); | 1276 | pr_debug("Adding doorbell packet type capability\n"); |
| 1276 | dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 << | 1277 | dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 << |
| 1277 | HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & | 1278 | HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & |
| 1278 | HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); | 1279 | HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); |
| 1279 | break; | 1280 | break; |
| 1280 | case CHIP_VEGA10: | 1281 | case CHIP_VEGA10: |
| 1282 | case CHIP_VEGA12: | ||
| 1281 | case CHIP_VEGA20: | 1283 | case CHIP_VEGA20: |
| 1282 | case CHIP_RAVEN: | 1284 | case CHIP_RAVEN: |
| 1283 | dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << | 1285 | dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << |
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index c97dc9613325..cfde1568c79a 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile | |||
| @@ -32,11 +32,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc | |||
| 32 | subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync | 32 | subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync |
| 33 | subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color | 33 | subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color |
| 34 | subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet | 34 | subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet |
| 35 | subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power | ||
| 35 | 36 | ||
| 36 | #TODO: remove when Timing Sync feature is complete | 37 | #TODO: remove when Timing Sync feature is complete |
| 37 | subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 | 38 | subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 |
| 38 | 39 | ||
| 39 | DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet | 40 | DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power |
| 40 | 41 | ||
| 41 | AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) | 42 | AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) |
| 42 | 43 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d8d0b206a79c..32e791d9b9a8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include "amd_shared.h" | 38 | #include "amd_shared.h" |
| 39 | #include "amdgpu_dm_irq.h" | 39 | #include "amdgpu_dm_irq.h" |
| 40 | #include "dm_helpers.h" | 40 | #include "dm_helpers.h" |
| 41 | #include "dm_services_types.h" | ||
| 42 | #include "amdgpu_dm_mst_types.h" | 41 | #include "amdgpu_dm_mst_types.h" |
| 43 | #if defined(CONFIG_DEBUG_FS) | 42 | #if defined(CONFIG_DEBUG_FS) |
| 44 | #include "amdgpu_dm_debugfs.h" | 43 | #include "amdgpu_dm_debugfs.h" |
| @@ -72,6 +71,7 @@ | |||
| 72 | #endif | 71 | #endif |
| 73 | 72 | ||
| 74 | #include "modules/inc/mod_freesync.h" | 73 | #include "modules/inc/mod_freesync.h" |
| 74 | #include "modules/power/power_helpers.h" | ||
| 75 | 75 | ||
| 76 | #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" | 76 | #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" |
| 77 | MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); | 77 | MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); |
| @@ -643,6 +643,26 @@ static int dm_late_init(void *handle) | |||
| 643 | { | 643 | { |
| 644 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 644 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 645 | 645 | ||
| 646 | struct dmcu_iram_parameters params; | ||
| 647 | unsigned int linear_lut[16]; | ||
| 648 | int i; | ||
| 649 | struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; | ||
| 650 | bool ret; | ||
| 651 | |||
| 652 | for (i = 0; i < 16; i++) | ||
| 653 | linear_lut[i] = 0xFFFF * i / 15; | ||
| 654 | |||
| 655 | params.set = 0; | ||
| 656 | params.backlight_ramping_start = 0xCCCC; | ||
| 657 | params.backlight_ramping_reduction = 0xCCCCCCCC; | ||
| 658 | params.backlight_lut_array_size = 16; | ||
| 659 | params.backlight_lut_array = linear_lut; | ||
| 660 | |||
| 661 | ret = dmcu_load_iram(dmcu, params); | ||
| 662 | |||
| 663 | if (!ret) | ||
| 664 | return -EINVAL; | ||
| 665 | |||
| 646 | return detect_mst_link_for_all_connectors(adev->ddev); | 666 | return detect_mst_link_for_all_connectors(adev->ddev); |
| 647 | } | 667 | } |
| 648 | 668 | ||
| @@ -969,45 +989,6 @@ const struct amdgpu_ip_block_version dm_ip_block = | |||
| 969 | }; | 989 | }; |
| 970 | 990 | ||
| 971 | 991 | ||
| 972 | static struct drm_atomic_state * | ||
| 973 | dm_atomic_state_alloc(struct drm_device *dev) | ||
| 974 | { | ||
| 975 | struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
| 976 | |||
| 977 | if (!state) | ||
| 978 | return NULL; | ||
| 979 | |||
| 980 | if (drm_atomic_state_init(dev, &state->base) < 0) | ||
| 981 | goto fail; | ||
| 982 | |||
| 983 | return &state->base; | ||
| 984 | |||
| 985 | fail: | ||
| 986 | kfree(state); | ||
| 987 | return NULL; | ||
| 988 | } | ||
| 989 | |||
| 990 | static void | ||
| 991 | dm_atomic_state_clear(struct drm_atomic_state *state) | ||
| 992 | { | ||
| 993 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | ||
| 994 | |||
| 995 | if (dm_state->context) { | ||
| 996 | dc_release_state(dm_state->context); | ||
| 997 | dm_state->context = NULL; | ||
| 998 | } | ||
| 999 | |||
| 1000 | drm_atomic_state_default_clear(state); | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | static void | ||
| 1004 | dm_atomic_state_alloc_free(struct drm_atomic_state *state) | ||
| 1005 | { | ||
| 1006 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | ||
| 1007 | drm_atomic_state_default_release(state); | ||
| 1008 | kfree(dm_state); | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | /** | 992 | /** |
| 1012 | * DOC: atomic | 993 | * DOC: atomic |
| 1013 | * | 994 | * |
| @@ -1019,9 +1000,6 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { | |||
| 1019 | .output_poll_changed = drm_fb_helper_output_poll_changed, | 1000 | .output_poll_changed = drm_fb_helper_output_poll_changed, |
| 1020 | .atomic_check = amdgpu_dm_atomic_check, | 1001 | .atomic_check = amdgpu_dm_atomic_check, |
| 1021 | .atomic_commit = amdgpu_dm_atomic_commit, | 1002 | .atomic_commit = amdgpu_dm_atomic_commit, |
| 1022 | .atomic_state_alloc = dm_atomic_state_alloc, | ||
| 1023 | .atomic_state_clear = dm_atomic_state_clear, | ||
| 1024 | .atomic_state_free = dm_atomic_state_alloc_free | ||
| 1025 | }; | 1003 | }; |
| 1026 | 1004 | ||
| 1027 | static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { | 1005 | static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { |
| @@ -1543,8 +1521,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) | |||
| 1543 | } | 1521 | } |
| 1544 | #endif | 1522 | #endif |
| 1545 | 1523 | ||
| 1524 | /* | ||
| 1525 | * Acquires the lock for the atomic state object and returns | ||
| 1526 | * the new atomic state. | ||
| 1527 | * | ||
| 1528 | * This should only be called during atomic check. | ||
| 1529 | */ | ||
| 1530 | static int dm_atomic_get_state(struct drm_atomic_state *state, | ||
| 1531 | struct dm_atomic_state **dm_state) | ||
| 1532 | { | ||
| 1533 | struct drm_device *dev = state->dev; | ||
| 1534 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1535 | struct amdgpu_display_manager *dm = &adev->dm; | ||
| 1536 | struct drm_private_state *priv_state; | ||
| 1537 | int ret; | ||
| 1538 | |||
| 1539 | if (*dm_state) | ||
| 1540 | return 0; | ||
| 1541 | |||
| 1542 | ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx); | ||
| 1543 | if (ret) | ||
| 1544 | return ret; | ||
| 1545 | |||
| 1546 | priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); | ||
| 1547 | if (IS_ERR(priv_state)) | ||
| 1548 | return PTR_ERR(priv_state); | ||
| 1549 | |||
| 1550 | *dm_state = to_dm_atomic_state(priv_state); | ||
| 1551 | |||
| 1552 | return 0; | ||
| 1553 | } | ||
| 1554 | |||
| 1555 | struct dm_atomic_state * | ||
| 1556 | dm_atomic_get_new_state(struct drm_atomic_state *state) | ||
| 1557 | { | ||
| 1558 | struct drm_device *dev = state->dev; | ||
| 1559 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1560 | struct amdgpu_display_manager *dm = &adev->dm; | ||
| 1561 | struct drm_private_obj *obj; | ||
| 1562 | struct drm_private_state *new_obj_state; | ||
| 1563 | int i; | ||
| 1564 | |||
| 1565 | for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { | ||
| 1566 | if (obj->funcs == dm->atomic_obj.funcs) | ||
| 1567 | return to_dm_atomic_state(new_obj_state); | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | return NULL; | ||
| 1571 | } | ||
| 1572 | |||
| 1573 | struct dm_atomic_state * | ||
| 1574 | dm_atomic_get_old_state(struct drm_atomic_state *state) | ||
| 1575 | { | ||
| 1576 | struct drm_device *dev = state->dev; | ||
| 1577 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1578 | struct amdgpu_display_manager *dm = &adev->dm; | ||
| 1579 | struct drm_private_obj *obj; | ||
| 1580 | struct drm_private_state *old_obj_state; | ||
| 1581 | int i; | ||
| 1582 | |||
| 1583 | for_each_old_private_obj_in_state(state, obj, old_obj_state, i) { | ||
| 1584 | if (obj->funcs == dm->atomic_obj.funcs) | ||
| 1585 | return to_dm_atomic_state(old_obj_state); | ||
| 1586 | } | ||
| 1587 | |||
| 1588 | return NULL; | ||
| 1589 | } | ||
| 1590 | |||
| 1591 | static struct drm_private_state * | ||
| 1592 | dm_atomic_duplicate_state(struct drm_private_obj *obj) | ||
| 1593 | { | ||
| 1594 | struct dm_atomic_state *old_state, *new_state; | ||
| 1595 | |||
| 1596 | new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); | ||
| 1597 | if (!new_state) | ||
| 1598 | return NULL; | ||
| 1599 | |||
| 1600 | __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); | ||
| 1601 | |||
| 1602 | new_state->context = dc_create_state(); | ||
| 1603 | if (!new_state->context) { | ||
| 1604 | kfree(new_state); | ||
| 1605 | return NULL; | ||
| 1606 | } | ||
| 1607 | |||
| 1608 | old_state = to_dm_atomic_state(obj->state); | ||
| 1609 | if (old_state && old_state->context) | ||
| 1610 | dc_resource_state_copy_construct(old_state->context, | ||
| 1611 | new_state->context); | ||
| 1612 | |||
| 1613 | return &new_state->base; | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | static void dm_atomic_destroy_state(struct drm_private_obj *obj, | ||
| 1617 | struct drm_private_state *state) | ||
| 1618 | { | ||
| 1619 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | ||
| 1620 | |||
| 1621 | if (dm_state && dm_state->context) | ||
| 1622 | dc_release_state(dm_state->context); | ||
| 1623 | |||
| 1624 | kfree(dm_state); | ||
| 1625 | } | ||
| 1626 | |||
| 1627 | static struct drm_private_state_funcs dm_atomic_state_funcs = { | ||
| 1628 | .atomic_duplicate_state = dm_atomic_duplicate_state, | ||
| 1629 | .atomic_destroy_state = dm_atomic_destroy_state, | ||
| 1630 | }; | ||
| 1631 | |||
| 1546 | static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) | 1632 | static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) |
| 1547 | { | 1633 | { |
| 1634 | struct dm_atomic_state *state; | ||
| 1548 | int r; | 1635 | int r; |
| 1549 | 1636 | ||
| 1550 | adev->mode_info.mode_config_initialized = true; | 1637 | adev->mode_info.mode_config_initialized = true; |
| @@ -1562,6 +1649,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) | |||
| 1562 | 1649 | ||
| 1563 | adev->ddev->mode_config.fb_base = adev->gmc.aper_base; | 1650 | adev->ddev->mode_config.fb_base = adev->gmc.aper_base; |
| 1564 | 1651 | ||
| 1652 | drm_modeset_lock_init(&adev->dm.atomic_obj_lock); | ||
| 1653 | |||
| 1654 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
| 1655 | if (!state) | ||
| 1656 | return -ENOMEM; | ||
| 1657 | |||
| 1658 | state->context = dc_create_state(); | ||
| 1659 | if (!state->context) { | ||
| 1660 | kfree(state); | ||
| 1661 | return -ENOMEM; | ||
| 1662 | } | ||
| 1663 | |||
| 1664 | dc_resource_state_copy_construct_current(adev->dm.dc, state->context); | ||
| 1665 | |||
| 1666 | drm_atomic_private_obj_init(&adev->dm.atomic_obj, | ||
| 1667 | &state->base, | ||
| 1668 | &dm_atomic_state_funcs); | ||
| 1669 | |||
| 1565 | r = amdgpu_display_modeset_create_props(adev); | 1670 | r = amdgpu_display_modeset_create_props(adev); |
| 1566 | if (r) | 1671 | if (r) |
| 1567 | return r; | 1672 | return r; |
| @@ -1569,27 +1674,60 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) | |||
| 1569 | return 0; | 1674 | return 0; |
| 1570 | } | 1675 | } |
| 1571 | 1676 | ||
| 1677 | #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 | ||
| 1678 | #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 | ||
| 1679 | |||
| 1572 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ | 1680 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ |
| 1573 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | 1681 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
| 1574 | 1682 | ||
| 1683 | static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) | ||
| 1684 | { | ||
| 1685 | #if defined(CONFIG_ACPI) | ||
| 1686 | struct amdgpu_dm_backlight_caps caps; | ||
| 1687 | |||
| 1688 | if (dm->backlight_caps.caps_valid) | ||
| 1689 | return; | ||
| 1690 | |||
| 1691 | amdgpu_acpi_get_backlight_caps(dm->adev, &caps); | ||
| 1692 | if (caps.caps_valid) { | ||
| 1693 | dm->backlight_caps.min_input_signal = caps.min_input_signal; | ||
| 1694 | dm->backlight_caps.max_input_signal = caps.max_input_signal; | ||
| 1695 | dm->backlight_caps.caps_valid = true; | ||
| 1696 | } else { | ||
| 1697 | dm->backlight_caps.min_input_signal = | ||
| 1698 | AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; | ||
| 1699 | dm->backlight_caps.max_input_signal = | ||
| 1700 | AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; | ||
| 1701 | } | ||
| 1702 | #else | ||
| 1703 | dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; | ||
| 1704 | dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; | ||
| 1705 | #endif | ||
| 1706 | } | ||
| 1707 | |||
| 1575 | static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | 1708 | static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) |
| 1576 | { | 1709 | { |
| 1577 | struct amdgpu_display_manager *dm = bl_get_data(bd); | 1710 | struct amdgpu_display_manager *dm = bl_get_data(bd); |
| 1711 | struct amdgpu_dm_backlight_caps caps; | ||
| 1712 | uint32_t brightness = bd->props.brightness; | ||
| 1578 | 1713 | ||
| 1579 | /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer | 1714 | amdgpu_dm_update_backlight_caps(dm); |
| 1580 | * and 16 bit fractional, where 1.0 is max backlight value. | 1715 | caps = dm->backlight_caps; |
| 1581 | * bd->props.brightness is 8 bit format and needs to be converted by | ||
| 1582 | * scaling via copy lower byte to upper byte of 16 bit value. | ||
| 1583 | */ | ||
| 1584 | uint32_t brightness = bd->props.brightness * 0x101; | ||
| 1585 | |||
| 1586 | /* | 1716 | /* |
| 1587 | * PWM interperts 0 as 100% rather than 0% because of HW | 1717 | * The brightness input is in the range 0-255 |
| 1588 | * limitation for level 0. So limiting minimum brightness level | 1718 | * It needs to be rescaled to be between the |
| 1589 | * to 1. | 1719 | * requested min and max input signal |
| 1720 | * | ||
| 1721 | * It also needs to be scaled up by 0x101 to | ||
| 1722 | * match the DC interface which has a range of | ||
| 1723 | * 0 to 0xffff | ||
| 1590 | */ | 1724 | */ |
| 1591 | if (bd->props.brightness < 1) | 1725 | brightness = |
| 1592 | brightness = 0x101; | 1726 | brightness |
| 1727 | * 0x101 | ||
| 1728 | * (caps.max_input_signal - caps.min_input_signal) | ||
| 1729 | / AMDGPU_MAX_BL_LEVEL | ||
| 1730 | + caps.min_input_signal * 0x101; | ||
| 1593 | 1731 | ||
| 1594 | if (dc_link_set_backlight_level(dm->backlight_link, | 1732 | if (dc_link_set_backlight_level(dm->backlight_link, |
| 1595 | brightness, 0, 0)) | 1733 | brightness, 0, 0)) |
| @@ -1619,6 +1757,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) | |||
| 1619 | char bl_name[16]; | 1757 | char bl_name[16]; |
| 1620 | struct backlight_properties props = { 0 }; | 1758 | struct backlight_properties props = { 0 }; |
| 1621 | 1759 | ||
| 1760 | amdgpu_dm_update_backlight_caps(dm); | ||
| 1761 | |||
| 1622 | props.max_brightness = AMDGPU_MAX_BL_LEVEL; | 1762 | props.max_brightness = AMDGPU_MAX_BL_LEVEL; |
| 1623 | props.brightness = AMDGPU_MAX_BL_LEVEL; | 1763 | props.brightness = AMDGPU_MAX_BL_LEVEL; |
| 1624 | props.type = BACKLIGHT_RAW; | 1764 | props.type = BACKLIGHT_RAW; |
| @@ -1850,6 +1990,7 @@ fail: | |||
| 1850 | static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) | 1990 | static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) |
| 1851 | { | 1991 | { |
| 1852 | drm_mode_config_cleanup(dm->ddev); | 1992 | drm_mode_config_cleanup(dm->ddev); |
| 1993 | drm_atomic_private_obj_fini(&dm->atomic_obj); | ||
| 1853 | return; | 1994 | return; |
| 1854 | } | 1995 | } |
| 1855 | 1996 | ||
| @@ -1869,73 +2010,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev) | |||
| 1869 | /* TODO: implement later */ | 2010 | /* TODO: implement later */ |
| 1870 | } | 2011 | } |
| 1871 | 2012 | ||
| 1872 | static int amdgpu_notify_freesync(struct drm_device *dev, void *data, | ||
| 1873 | struct drm_file *filp) | ||
| 1874 | { | ||
| 1875 | struct drm_atomic_state *state; | ||
| 1876 | struct drm_modeset_acquire_ctx ctx; | ||
| 1877 | struct drm_crtc *crtc; | ||
| 1878 | struct drm_connector *connector; | ||
| 1879 | struct drm_connector_state *old_con_state, *new_con_state; | ||
| 1880 | int ret = 0; | ||
| 1881 | uint8_t i; | ||
| 1882 | bool enable = false; | ||
| 1883 | |||
| 1884 | drm_modeset_acquire_init(&ctx, 0); | ||
| 1885 | |||
| 1886 | state = drm_atomic_state_alloc(dev); | ||
| 1887 | if (!state) { | ||
| 1888 | ret = -ENOMEM; | ||
| 1889 | goto out; | ||
| 1890 | } | ||
| 1891 | state->acquire_ctx = &ctx; | ||
| 1892 | |||
| 1893 | retry: | ||
| 1894 | drm_for_each_crtc(crtc, dev) { | ||
| 1895 | ret = drm_atomic_add_affected_connectors(state, crtc); | ||
| 1896 | if (ret) | ||
| 1897 | goto fail; | ||
| 1898 | |||
| 1899 | /* TODO rework amdgpu_dm_commit_planes so we don't need this */ | ||
| 1900 | ret = drm_atomic_add_affected_planes(state, crtc); | ||
| 1901 | if (ret) | ||
| 1902 | goto fail; | ||
| 1903 | } | ||
| 1904 | |||
| 1905 | for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { | ||
| 1906 | struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); | ||
| 1907 | struct drm_crtc_state *new_crtc_state; | ||
| 1908 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); | ||
| 1909 | struct dm_crtc_state *dm_new_crtc_state; | ||
| 1910 | |||
| 1911 | if (!acrtc) { | ||
| 1912 | ASSERT(0); | ||
| 1913 | continue; | ||
| 1914 | } | ||
| 1915 | |||
| 1916 | new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); | ||
| 1917 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); | ||
| 1918 | |||
| 1919 | dm_new_crtc_state->freesync_enabled = enable; | ||
| 1920 | } | ||
| 1921 | |||
| 1922 | ret = drm_atomic_commit(state); | ||
| 1923 | |||
| 1924 | fail: | ||
| 1925 | if (ret == -EDEADLK) { | ||
| 1926 | drm_atomic_state_clear(state); | ||
| 1927 | drm_modeset_backoff(&ctx); | ||
| 1928 | goto retry; | ||
| 1929 | } | ||
| 1930 | |||
| 1931 | drm_atomic_state_put(state); | ||
| 1932 | |||
| 1933 | out: | ||
| 1934 | drm_modeset_drop_locks(&ctx); | ||
| 1935 | drm_modeset_acquire_fini(&ctx); | ||
| 1936 | return ret; | ||
| 1937 | } | ||
| 1938 | |||
| 1939 | static const struct amdgpu_display_funcs dm_display_funcs = { | 2013 | static const struct amdgpu_display_funcs dm_display_funcs = { |
| 1940 | .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ | 2014 | .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ |
| 1941 | .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ | 2015 | .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ |
| @@ -1948,8 +2022,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = { | |||
| 1948 | dm_crtc_get_scanoutpos,/* called unconditionally */ | 2022 | dm_crtc_get_scanoutpos,/* called unconditionally */ |
| 1949 | .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ | 2023 | .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ |
| 1950 | .add_connector = NULL, /* VBIOS parsing. DAL does it. */ | 2024 | .add_connector = NULL, /* VBIOS parsing. DAL does it. */ |
| 1951 | .notify_freesync = amdgpu_notify_freesync, | ||
| 1952 | |||
| 1953 | }; | 2025 | }; |
| 1954 | 2026 | ||
| 1955 | #if defined(CONFIG_DEBUG_KERNEL_DC) | 2027 | #if defined(CONFIG_DEBUG_KERNEL_DC) |
| @@ -2550,7 +2622,8 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ | |||
| 2550 | static void | 2622 | static void |
| 2551 | fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, | 2623 | fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, |
| 2552 | const struct drm_display_mode *mode_in, | 2624 | const struct drm_display_mode *mode_in, |
| 2553 | const struct drm_connector *connector) | 2625 | const struct drm_connector *connector, |
| 2626 | const struct dc_stream_state *old_stream) | ||
| 2554 | { | 2627 | { |
| 2555 | struct dc_crtc_timing *timing_out = &stream->timing; | 2628 | struct dc_crtc_timing *timing_out = &stream->timing; |
| 2556 | const struct drm_display_info *info = &connector->display_info; | 2629 | const struct drm_display_info *info = &connector->display_info; |
| @@ -2576,7 +2649,18 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, | |||
| 2576 | connector); | 2649 | connector); |
| 2577 | timing_out->scan_type = SCANNING_TYPE_NODATA; | 2650 | timing_out->scan_type = SCANNING_TYPE_NODATA; |
| 2578 | timing_out->hdmi_vic = 0; | 2651 | timing_out->hdmi_vic = 0; |
| 2579 | timing_out->vic = drm_match_cea_mode(mode_in); | 2652 | |
| 2653 | if(old_stream) { | ||
| 2654 | timing_out->vic = old_stream->timing.vic; | ||
| 2655 | timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; | ||
| 2656 | timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; | ||
| 2657 | } else { | ||
| 2658 | timing_out->vic = drm_match_cea_mode(mode_in); | ||
| 2659 | if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) | ||
| 2660 | timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; | ||
| 2661 | if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) | ||
| 2662 | timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; | ||
| 2663 | } | ||
| 2580 | 2664 | ||
| 2581 | timing_out->h_addressable = mode_in->crtc_hdisplay; | 2665 | timing_out->h_addressable = mode_in->crtc_hdisplay; |
| 2582 | timing_out->h_total = mode_in->crtc_htotal; | 2666 | timing_out->h_total = mode_in->crtc_htotal; |
| @@ -2592,10 +2676,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, | |||
| 2592 | mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; | 2676 | mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; |
| 2593 | timing_out->pix_clk_khz = mode_in->crtc_clock; | 2677 | timing_out->pix_clk_khz = mode_in->crtc_clock; |
| 2594 | timing_out->aspect_ratio = get_aspect_ratio(mode_in); | 2678 | timing_out->aspect_ratio = get_aspect_ratio(mode_in); |
| 2595 | if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) | ||
| 2596 | timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; | ||
| 2597 | if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) | ||
| 2598 | timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; | ||
| 2599 | 2679 | ||
| 2600 | stream->output_color_space = get_output_color_space(timing_out); | 2680 | stream->output_color_space = get_output_color_space(timing_out); |
| 2601 | 2681 | ||
| @@ -2758,13 +2838,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) | |||
| 2758 | static struct dc_stream_state * | 2838 | static struct dc_stream_state * |
| 2759 | create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | 2839 | create_stream_for_sink(struct amdgpu_dm_connector *aconnector, |
| 2760 | const struct drm_display_mode *drm_mode, | 2840 | const struct drm_display_mode *drm_mode, |
| 2761 | const struct dm_connector_state *dm_state) | 2841 | const struct dm_connector_state *dm_state, |
| 2842 | const struct dc_stream_state *old_stream) | ||
| 2762 | { | 2843 | { |
| 2763 | struct drm_display_mode *preferred_mode = NULL; | 2844 | struct drm_display_mode *preferred_mode = NULL; |
| 2764 | struct drm_connector *drm_connector; | 2845 | struct drm_connector *drm_connector; |
| 2765 | struct dc_stream_state *stream = NULL; | 2846 | struct dc_stream_state *stream = NULL; |
| 2766 | struct drm_display_mode mode = *drm_mode; | 2847 | struct drm_display_mode mode = *drm_mode; |
| 2767 | bool native_mode_found = false; | 2848 | bool native_mode_found = false; |
| 2849 | bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; | ||
| 2850 | int mode_refresh; | ||
| 2851 | int preferred_refresh = 0; | ||
| 2852 | |||
| 2768 | struct dc_sink *sink = NULL; | 2853 | struct dc_sink *sink = NULL; |
| 2769 | if (aconnector == NULL) { | 2854 | if (aconnector == NULL) { |
| 2770 | DRM_ERROR("aconnector is NULL!\n"); | 2855 | DRM_ERROR("aconnector is NULL!\n"); |
| @@ -2803,6 +2888,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
| 2803 | struct drm_display_mode, | 2888 | struct drm_display_mode, |
| 2804 | head); | 2889 | head); |
| 2805 | 2890 | ||
| 2891 | mode_refresh = drm_mode_vrefresh(&mode); | ||
| 2892 | |||
| 2806 | if (preferred_mode == NULL) { | 2893 | if (preferred_mode == NULL) { |
| 2807 | /* | 2894 | /* |
| 2808 | * This may not be an error, the use case is when we have no | 2895 | * This may not be an error, the use case is when we have no |
| @@ -2815,13 +2902,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
| 2815 | decide_crtc_timing_for_drm_display_mode( | 2902 | decide_crtc_timing_for_drm_display_mode( |
| 2816 | &mode, preferred_mode, | 2903 | &mode, preferred_mode, |
| 2817 | dm_state ? (dm_state->scaling != RMX_OFF) : false); | 2904 | dm_state ? (dm_state->scaling != RMX_OFF) : false); |
| 2905 | preferred_refresh = drm_mode_vrefresh(preferred_mode); | ||
| 2818 | } | 2906 | } |
| 2819 | 2907 | ||
| 2820 | if (!dm_state) | 2908 | if (!dm_state) |
| 2821 | drm_mode_set_crtcinfo(&mode, 0); | 2909 | drm_mode_set_crtcinfo(&mode, 0); |
| 2822 | 2910 | ||
| 2823 | fill_stream_properties_from_drm_display_mode(stream, | 2911 | /* |
| 2824 | &mode, &aconnector->base); | 2912 | * If scaling is enabled and refresh rate didn't change |
| 2913 | * we copy the vic and polarities of the old timings | ||
| 2914 | */ | ||
| 2915 | if (!scale || mode_refresh != preferred_refresh) | ||
| 2916 | fill_stream_properties_from_drm_display_mode(stream, | ||
| 2917 | &mode, &aconnector->base, NULL); | ||
| 2918 | else | ||
| 2919 | fill_stream_properties_from_drm_display_mode(stream, | ||
| 2920 | &mode, &aconnector->base, old_stream); | ||
| 2921 | |||
| 2825 | update_stream_scaling_settings(&mode, dm_state, stream); | 2922 | update_stream_scaling_settings(&mode, dm_state, stream); |
| 2826 | 2923 | ||
| 2827 | fill_audio_info( | 2924 | fill_audio_info( |
| @@ -2901,7 +2998,9 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) | |||
| 2901 | 2998 | ||
| 2902 | state->adjust = cur->adjust; | 2999 | state->adjust = cur->adjust; |
| 2903 | state->vrr_infopacket = cur->vrr_infopacket; | 3000 | state->vrr_infopacket = cur->vrr_infopacket; |
| 2904 | state->freesync_enabled = cur->freesync_enabled; | 3001 | state->abm_level = cur->abm_level; |
| 3002 | state->vrr_supported = cur->vrr_supported; | ||
| 3003 | state->freesync_config = cur->freesync_config; | ||
| 2905 | 3004 | ||
| 2906 | /* TODO Duplicate dc_stream after objects are stream object is flattened */ | 3005 | /* TODO Duplicate dc_stream after objects are stream object is flattened */ |
| 2907 | 3006 | ||
| @@ -2995,9 +3094,11 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, | |||
| 2995 | rmx_type = RMX_FULL; | 3094 | rmx_type = RMX_FULL; |
| 2996 | break; | 3095 | break; |
| 2997 | case DRM_MODE_SCALE_NONE: | 3096 | case DRM_MODE_SCALE_NONE: |
| 2998 | default: | ||
| 2999 | rmx_type = RMX_OFF; | 3097 | rmx_type = RMX_OFF; |
| 3000 | break; | 3098 | break; |
| 3099 | default: | ||
| 3100 | rmx_type = RMX_ASPECT; | ||
| 3101 | break; | ||
| 3001 | } | 3102 | } |
| 3002 | 3103 | ||
| 3003 | if (dm_old_state->scaling == rmx_type) | 3104 | if (dm_old_state->scaling == rmx_type) |
| @@ -3017,6 +3118,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, | |||
| 3017 | } else if (property == adev->mode_info.max_bpc_property) { | 3118 | } else if (property == adev->mode_info.max_bpc_property) { |
| 3018 | dm_new_state->max_bpc = val; | 3119 | dm_new_state->max_bpc = val; |
| 3019 | ret = 0; | 3120 | ret = 0; |
| 3121 | } else if (property == adev->mode_info.abm_level_property) { | ||
| 3122 | dm_new_state->abm_level = val; | ||
| 3123 | ret = 0; | ||
| 3020 | } | 3124 | } |
| 3021 | 3125 | ||
| 3022 | return ret; | 3126 | return ret; |
| @@ -3062,7 +3166,11 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, | |||
| 3062 | } else if (property == adev->mode_info.max_bpc_property) { | 3166 | } else if (property == adev->mode_info.max_bpc_property) { |
| 3063 | *val = dm_state->max_bpc; | 3167 | *val = dm_state->max_bpc; |
| 3064 | ret = 0; | 3168 | ret = 0; |
| 3169 | } else if (property == adev->mode_info.abm_level_property) { | ||
| 3170 | *val = dm_state->abm_level; | ||
| 3171 | ret = 0; | ||
| 3065 | } | 3172 | } |
| 3173 | |||
| 3066 | return ret; | 3174 | return ret; |
| 3067 | } | 3175 | } |
| 3068 | 3176 | ||
| @@ -3102,7 +3210,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) | |||
| 3102 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 3210 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
| 3103 | 3211 | ||
| 3104 | if (state) { | 3212 | if (state) { |
| 3105 | state->scaling = RMX_OFF; | 3213 | state->scaling = RMX_ASPECT; |
| 3106 | state->underscan_enable = false; | 3214 | state->underscan_enable = false; |
| 3107 | state->underscan_hborder = 0; | 3215 | state->underscan_hborder = 0; |
| 3108 | state->underscan_vborder = 0; | 3216 | state->underscan_vborder = 0; |
| @@ -3126,7 +3234,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) | |||
| 3126 | __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); | 3234 | __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); |
| 3127 | 3235 | ||
| 3128 | new_state->freesync_capable = state->freesync_capable; | 3236 | new_state->freesync_capable = state->freesync_capable; |
| 3129 | new_state->freesync_enable = state->freesync_enable; | 3237 | new_state->abm_level = state->abm_level; |
| 3130 | 3238 | ||
| 3131 | return &new_state->base; | 3239 | return &new_state->base; |
| 3132 | } | 3240 | } |
| @@ -3228,7 +3336,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec | |||
| 3228 | goto fail; | 3336 | goto fail; |
| 3229 | } | 3337 | } |
| 3230 | 3338 | ||
| 3231 | stream = create_stream_for_sink(aconnector, mode, NULL); | 3339 | stream = create_stream_for_sink(aconnector, mode, NULL, NULL); |
| 3232 | if (stream == NULL) { | 3340 | if (stream == NULL) { |
| 3233 | DRM_ERROR("Failed to create stream for sink!\n"); | 3341 | DRM_ERROR("Failed to create stream for sink!\n"); |
| 3234 | goto fail; | 3342 | goto fail; |
| @@ -3876,6 +3984,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
| 3876 | adev->mode_info.max_bpc_property, | 3984 | adev->mode_info.max_bpc_property, |
| 3877 | 0); | 3985 | 0); |
| 3878 | 3986 | ||
| 3987 | if (connector_type == DRM_MODE_CONNECTOR_eDP && | ||
| 3988 | dc_is_dmcu_initialized(adev->dm.dc)) { | ||
| 3989 | drm_object_attach_property(&aconnector->base.base, | ||
| 3990 | adev->mode_info.abm_level_property, 0); | ||
| 3991 | } | ||
| 3992 | |||
| 3993 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || | ||
| 3994 | connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | ||
| 3995 | drm_connector_attach_vrr_capable_property( | ||
| 3996 | &aconnector->base); | ||
| 3997 | } | ||
| 3879 | } | 3998 | } |
| 3880 | 3999 | ||
| 3881 | static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, | 4000 | static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, |
| @@ -4252,6 +4371,91 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc) | |||
| 4252 | acrtc->crtc_id); | 4371 | acrtc->crtc_id); |
| 4253 | } | 4372 | } |
| 4254 | 4373 | ||
| 4374 | struct dc_stream_status *dc_state_get_stream_status( | ||
| 4375 | struct dc_state *state, | ||
| 4376 | struct dc_stream_state *stream) | ||
| 4377 | { | ||
| 4378 | uint8_t i; | ||
| 4379 | |||
| 4380 | for (i = 0; i < state->stream_count; i++) { | ||
| 4381 | if (stream == state->streams[i]) | ||
| 4382 | return &state->stream_status[i]; | ||
| 4383 | } | ||
| 4384 | |||
| 4385 | return NULL; | ||
| 4386 | } | ||
| 4387 | |||
| 4388 | static void update_freesync_state_on_stream( | ||
| 4389 | struct amdgpu_display_manager *dm, | ||
| 4390 | struct dm_crtc_state *new_crtc_state, | ||
| 4391 | struct dc_stream_state *new_stream) | ||
| 4392 | { | ||
| 4393 | struct mod_vrr_params vrr = {0}; | ||
| 4394 | struct dc_info_packet vrr_infopacket = {0}; | ||
| 4395 | struct mod_freesync_config config = new_crtc_state->freesync_config; | ||
| 4396 | |||
| 4397 | if (!new_stream) | ||
| 4398 | return; | ||
| 4399 | |||
| 4400 | /* | ||
| 4401 | * TODO: Determine why min/max totals and vrefresh can be 0 here. | ||
| 4402 | * For now it's sufficient to just guard against these conditions. | ||
| 4403 | */ | ||
| 4404 | |||
| 4405 | if (!new_stream->timing.h_total || !new_stream->timing.v_total) | ||
| 4406 | return; | ||
| 4407 | |||
| 4408 | if (new_crtc_state->vrr_supported && | ||
| 4409 | config.min_refresh_in_uhz && | ||
| 4410 | config.max_refresh_in_uhz) { | ||
| 4411 | config.state = new_crtc_state->base.vrr_enabled ? | ||
| 4412 | VRR_STATE_ACTIVE_VARIABLE : | ||
| 4413 | VRR_STATE_INACTIVE; | ||
| 4414 | } else { | ||
| 4415 | config.state = VRR_STATE_UNSUPPORTED; | ||
| 4416 | } | ||
| 4417 | |||
| 4418 | mod_freesync_build_vrr_params(dm->freesync_module, | ||
| 4419 | new_stream, | ||
| 4420 | &config, &vrr); | ||
| 4421 | |||
| 4422 | mod_freesync_build_vrr_infopacket( | ||
| 4423 | dm->freesync_module, | ||
| 4424 | new_stream, | ||
| 4425 | &vrr, | ||
| 4426 | packet_type_vrr, | ||
| 4427 | transfer_func_unknown, | ||
| 4428 | &vrr_infopacket); | ||
| 4429 | |||
| 4430 | new_crtc_state->freesync_timing_changed = | ||
| 4431 | (memcmp(&new_crtc_state->adjust, | ||
| 4432 | &vrr.adjust, | ||
| 4433 | sizeof(vrr.adjust)) != 0); | ||
| 4434 | |||
| 4435 | new_crtc_state->freesync_vrr_info_changed = | ||
| 4436 | (memcmp(&new_crtc_state->vrr_infopacket, | ||
| 4437 | &vrr_infopacket, | ||
| 4438 | sizeof(vrr_infopacket)) != 0); | ||
| 4439 | |||
| 4440 | new_crtc_state->adjust = vrr.adjust; | ||
| 4441 | new_crtc_state->vrr_infopacket = vrr_infopacket; | ||
| 4442 | |||
| 4443 | new_stream->adjust = new_crtc_state->adjust; | ||
| 4444 | new_stream->vrr_infopacket = vrr_infopacket; | ||
| 4445 | |||
| 4446 | if (new_crtc_state->freesync_vrr_info_changed) | ||
| 4447 | DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", | ||
| 4448 | new_crtc_state->base.crtc->base.id, | ||
| 4449 | (int)new_crtc_state->base.vrr_enabled, | ||
| 4450 | (int)vrr.state); | ||
| 4451 | |||
| 4452 | if (new_crtc_state->freesync_timing_changed) | ||
| 4453 | DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n", | ||
| 4454 | new_crtc_state->base.crtc->base.id, | ||
| 4455 | vrr.adjust.v_total_min, | ||
| 4456 | vrr.adjust.v_total_max); | ||
| 4457 | } | ||
| 4458 | |||
| 4255 | /* | 4459 | /* |
| 4256 | * Executes flip | 4460 | * Executes flip |
| 4257 | * | 4461 | * |
| @@ -4273,6 +4477,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, | |||
| 4273 | struct dc_flip_addrs addr = { {0} }; | 4477 | struct dc_flip_addrs addr = { {0} }; |
| 4274 | /* TODO eliminate or rename surface_update */ | 4478 | /* TODO eliminate or rename surface_update */ |
| 4275 | struct dc_surface_update surface_updates[1] = { {0} }; | 4479 | struct dc_surface_update surface_updates[1] = { {0} }; |
| 4480 | struct dc_stream_update stream_update = {0}; | ||
| 4276 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); | 4481 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); |
| 4277 | struct dc_stream_status *stream_status; | 4482 | struct dc_stream_status *stream_status; |
| 4278 | 4483 | ||
| @@ -4345,11 +4550,26 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, | |||
| 4345 | } | 4550 | } |
| 4346 | surface_updates->flip_addr = &addr; | 4551 | surface_updates->flip_addr = &addr; |
| 4347 | 4552 | ||
| 4553 | if (acrtc_state->stream) { | ||
| 4554 | update_freesync_state_on_stream( | ||
| 4555 | &adev->dm, | ||
| 4556 | acrtc_state, | ||
| 4557 | acrtc_state->stream); | ||
| 4558 | |||
| 4559 | if (acrtc_state->freesync_timing_changed) | ||
| 4560 | stream_update.adjust = | ||
| 4561 | &acrtc_state->stream->adjust; | ||
| 4562 | |||
| 4563 | if (acrtc_state->freesync_vrr_info_changed) | ||
| 4564 | stream_update.vrr_infopacket = | ||
| 4565 | &acrtc_state->stream->vrr_infopacket; | ||
| 4566 | } | ||
| 4567 | |||
| 4348 | dc_commit_updates_for_stream(adev->dm.dc, | 4568 | dc_commit_updates_for_stream(adev->dm.dc, |
| 4349 | surface_updates, | 4569 | surface_updates, |
| 4350 | 1, | 4570 | 1, |
| 4351 | acrtc_state->stream, | 4571 | acrtc_state->stream, |
| 4352 | NULL, | 4572 | &stream_update, |
| 4353 | &surface_updates->surface, | 4573 | &surface_updates->surface, |
| 4354 | state); | 4574 | state); |
| 4355 | 4575 | ||
| @@ -4382,6 +4602,7 @@ static bool commit_planes_to_stream( | |||
| 4382 | struct dc_stream_state *dc_stream = dm_new_crtc_state->stream; | 4602 | struct dc_stream_state *dc_stream = dm_new_crtc_state->stream; |
| 4383 | struct dc_stream_update *stream_update = | 4603 | struct dc_stream_update *stream_update = |
| 4384 | kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL); | 4604 | kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL); |
| 4605 | unsigned int abm_level; | ||
| 4385 | 4606 | ||
| 4386 | if (!stream_update) { | 4607 | if (!stream_update) { |
| 4387 | BREAK_TO_DEBUGGER(); | 4608 | BREAK_TO_DEBUGGER(); |
| @@ -4409,9 +4630,9 @@ static bool commit_planes_to_stream( | |||
| 4409 | stream_update->dst = dc_stream->dst; | 4630 | stream_update->dst = dc_stream->dst; |
| 4410 | stream_update->out_transfer_func = dc_stream->out_transfer_func; | 4631 | stream_update->out_transfer_func = dc_stream->out_transfer_func; |
| 4411 | 4632 | ||
| 4412 | if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) { | 4633 | if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) { |
| 4413 | stream_update->vrr_infopacket = &dc_stream->vrr_infopacket; | 4634 | abm_level = dm_new_crtc_state->abm_level; |
| 4414 | stream_update->adjust = &dc_stream->adjust; | 4635 | stream_update->abm_level = &abm_level; |
| 4415 | } | 4636 | } |
| 4416 | 4637 | ||
| 4417 | for (i = 0; i < new_plane_count; i++) { | 4638 | for (i = 0; i < new_plane_count; i++) { |
| @@ -4455,6 +4676,7 @@ static bool commit_planes_to_stream( | |||
| 4455 | } | 4676 | } |
| 4456 | 4677 | ||
| 4457 | static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | 4678 | static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, |
| 4679 | struct dc_state *dc_state, | ||
| 4458 | struct drm_device *dev, | 4680 | struct drm_device *dev, |
| 4459 | struct amdgpu_display_manager *dm, | 4681 | struct amdgpu_display_manager *dm, |
| 4460 | struct drm_crtc *pcrtc, | 4682 | struct drm_crtc *pcrtc, |
| @@ -4471,7 +4693,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | |||
| 4471 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); | 4693 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); |
| 4472 | struct dm_crtc_state *dm_old_crtc_state = | 4694 | struct dm_crtc_state *dm_old_crtc_state = |
| 4473 | to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); | 4695 | to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); |
| 4474 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | ||
| 4475 | int planes_count = 0; | 4696 | int planes_count = 0; |
| 4476 | unsigned long flags; | 4697 | unsigned long flags; |
| 4477 | 4698 | ||
| @@ -4532,7 +4753,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | |||
| 4532 | crtc, | 4753 | crtc, |
| 4533 | fb, | 4754 | fb, |
| 4534 | (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, | 4755 | (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, |
| 4535 | dm_state->context); | 4756 | dc_state); |
| 4536 | } | 4757 | } |
| 4537 | 4758 | ||
| 4538 | } | 4759 | } |
| @@ -4549,15 +4770,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | |||
| 4549 | spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); | 4770 | spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); |
| 4550 | } | 4771 | } |
| 4551 | 4772 | ||
| 4552 | dc_stream_attach->adjust = acrtc_state->adjust; | 4773 | dc_stream_attach->abm_level = acrtc_state->abm_level; |
| 4553 | dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket; | ||
| 4554 | 4774 | ||
| 4555 | if (false == commit_planes_to_stream(dm->dc, | 4775 | if (false == commit_planes_to_stream(dm->dc, |
| 4556 | plane_states_constructed, | 4776 | plane_states_constructed, |
| 4557 | planes_count, | 4777 | planes_count, |
| 4558 | acrtc_state, | 4778 | acrtc_state, |
| 4559 | dm_old_crtc_state, | 4779 | dm_old_crtc_state, |
| 4560 | dm_state->context)) | 4780 | dc_state)) |
| 4561 | dm_error("%s: Failed to attach plane!\n", __func__); | 4781 | dm_error("%s: Failed to attach plane!\n", __func__); |
| 4562 | } else { | 4782 | } else { |
| 4563 | /*TODO BUG Here should go disable planes on CRTC. */ | 4783 | /*TODO BUG Here should go disable planes on CRTC. */ |
| @@ -4625,6 +4845,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4625 | struct amdgpu_device *adev = dev->dev_private; | 4845 | struct amdgpu_device *adev = dev->dev_private; |
| 4626 | struct amdgpu_display_manager *dm = &adev->dm; | 4846 | struct amdgpu_display_manager *dm = &adev->dm; |
| 4627 | struct dm_atomic_state *dm_state; | 4847 | struct dm_atomic_state *dm_state; |
| 4848 | struct dc_state *dc_state = NULL, *dc_state_temp = NULL; | ||
| 4628 | uint32_t i, j; | 4849 | uint32_t i, j; |
| 4629 | struct drm_crtc *crtc; | 4850 | struct drm_crtc *crtc; |
| 4630 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; | 4851 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| @@ -4637,7 +4858,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4637 | 4858 | ||
| 4638 | drm_atomic_helper_update_legacy_modeset_state(dev, state); | 4859 | drm_atomic_helper_update_legacy_modeset_state(dev, state); |
| 4639 | 4860 | ||
| 4640 | dm_state = to_dm_atomic_state(state); | 4861 | dm_state = dm_atomic_get_new_state(state); |
| 4862 | if (dm_state && dm_state->context) { | ||
| 4863 | dc_state = dm_state->context; | ||
| 4864 | } else { | ||
| 4865 | /* No state changes, retain current state. */ | ||
| 4866 | dc_state_temp = dc_create_state(); | ||
| 4867 | ASSERT(dc_state_temp); | ||
| 4868 | dc_state = dc_state_temp; | ||
| 4869 | dc_resource_state_copy_construct_current(dm->dc, dc_state); | ||
| 4870 | } | ||
| 4641 | 4871 | ||
| 4642 | /* update changed items */ | 4872 | /* update changed items */ |
| 4643 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 4873 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| @@ -4710,9 +4940,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4710 | } | 4940 | } |
| 4711 | } /* for_each_crtc_in_state() */ | 4941 | } /* for_each_crtc_in_state() */ |
| 4712 | 4942 | ||
| 4713 | if (dm_state->context) { | 4943 | if (dc_state) { |
| 4714 | dm_enable_per_frame_crtc_master_sync(dm_state->context); | 4944 | dm_enable_per_frame_crtc_master_sync(dc_state); |
| 4715 | WARN_ON(!dc_commit_state(dm->dc, dm_state->context)); | 4945 | WARN_ON(!dc_commit_state(dm->dc, dc_state)); |
| 4716 | } | 4946 | } |
| 4717 | 4947 | ||
| 4718 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { | 4948 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { |
| @@ -4725,13 +4955,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4725 | dc_stream_get_status(dm_new_crtc_state->stream); | 4955 | dc_stream_get_status(dm_new_crtc_state->stream); |
| 4726 | 4956 | ||
| 4727 | if (!status) | 4957 | if (!status) |
| 4958 | status = dc_state_get_stream_status(dc_state, | ||
| 4959 | dm_new_crtc_state->stream); | ||
| 4960 | |||
| 4961 | if (!status) | ||
| 4728 | DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); | 4962 | DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); |
| 4729 | else | 4963 | else |
| 4730 | acrtc->otg_inst = status->primary_otg_inst; | 4964 | acrtc->otg_inst = status->primary_otg_inst; |
| 4731 | } | 4965 | } |
| 4732 | } | 4966 | } |
| 4733 | 4967 | ||
| 4734 | /* Handle scaling and underscan changes*/ | 4968 | /* Handle scaling, underscan, and abm changes*/ |
| 4735 | for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { | 4969 | for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { |
| 4736 | struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); | 4970 | struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); |
| 4737 | struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); | 4971 | struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); |
| @@ -4747,11 +4981,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4747 | if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) | 4981 | if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) |
| 4748 | continue; | 4982 | continue; |
| 4749 | 4983 | ||
| 4750 | /* Skip anything that is not scaling or underscan changes */ | ||
| 4751 | if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) | ||
| 4752 | continue; | ||
| 4753 | 4984 | ||
| 4754 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); | 4985 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4986 | dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); | ||
| 4987 | |||
| 4988 | /* Skip anything that is not scaling or underscan changes */ | ||
| 4989 | if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) && | ||
| 4990 | (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level)) | ||
| 4991 | continue; | ||
| 4755 | 4992 | ||
| 4756 | update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, | 4993 | update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, |
| 4757 | dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); | 4994 | dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); |
| @@ -4763,8 +5000,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4763 | WARN_ON(!status); | 5000 | WARN_ON(!status); |
| 4764 | WARN_ON(!status->plane_count); | 5001 | WARN_ON(!status->plane_count); |
| 4765 | 5002 | ||
| 4766 | dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust; | 5003 | dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; |
| 4767 | dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket; | ||
| 4768 | 5004 | ||
| 4769 | /*TODO How it works with MPO ?*/ | 5005 | /*TODO How it works with MPO ?*/ |
| 4770 | if (!commit_planes_to_stream( | 5006 | if (!commit_planes_to_stream( |
| @@ -4773,7 +5009,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4773 | status->plane_count, | 5009 | status->plane_count, |
| 4774 | dm_new_crtc_state, | 5010 | dm_new_crtc_state, |
| 4775 | to_dm_crtc_state(old_crtc_state), | 5011 | to_dm_crtc_state(old_crtc_state), |
| 4776 | dm_state->context)) | 5012 | dc_state)) |
| 4777 | dm_error("%s: Failed to update stream scaling!\n", __func__); | 5013 | dm_error("%s: Failed to update stream scaling!\n", __func__); |
| 4778 | } | 5014 | } |
| 4779 | 5015 | ||
| @@ -4806,7 +5042,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4806 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); | 5042 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4807 | 5043 | ||
| 4808 | if (dm_new_crtc_state->stream) | 5044 | if (dm_new_crtc_state->stream) |
| 4809 | amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank); | 5045 | amdgpu_dm_commit_planes(state, dc_state, dev, |
| 5046 | dm, crtc, &wait_for_vblank); | ||
| 4810 | } | 5047 | } |
| 4811 | 5048 | ||
| 4812 | 5049 | ||
| @@ -4846,6 +5083,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4846 | for (i = 0; i < crtc_disable_count; i++) | 5083 | for (i = 0; i < crtc_disable_count; i++) |
| 4847 | pm_runtime_put_autosuspend(dev->dev); | 5084 | pm_runtime_put_autosuspend(dev->dev); |
| 4848 | pm_runtime_mark_last_busy(dev->dev); | 5085 | pm_runtime_mark_last_busy(dev->dev); |
| 5086 | |||
| 5087 | if (dc_state_temp) | ||
| 5088 | dc_release_state(dc_state_temp); | ||
| 4849 | } | 5089 | } |
| 4850 | 5090 | ||
| 4851 | 5091 | ||
| @@ -4989,20 +5229,18 @@ static int do_aquire_global_lock(struct drm_device *dev, | |||
| 4989 | return ret < 0 ? ret : 0; | 5229 | return ret < 0 ? ret : 0; |
| 4990 | } | 5230 | } |
| 4991 | 5231 | ||
| 4992 | void set_freesync_on_stream(struct amdgpu_display_manager *dm, | 5232 | static void get_freesync_config_for_crtc( |
| 4993 | struct dm_crtc_state *new_crtc_state, | 5233 | struct dm_crtc_state *new_crtc_state, |
| 4994 | struct dm_connector_state *new_con_state, | 5234 | struct dm_connector_state *new_con_state) |
| 4995 | struct dc_stream_state *new_stream) | ||
| 4996 | { | 5235 | { |
| 4997 | struct mod_freesync_config config = {0}; | 5236 | struct mod_freesync_config config = {0}; |
| 4998 | struct mod_vrr_params vrr = {0}; | ||
| 4999 | struct dc_info_packet vrr_infopacket = {0}; | ||
| 5000 | struct amdgpu_dm_connector *aconnector = | 5237 | struct amdgpu_dm_connector *aconnector = |
| 5001 | to_amdgpu_dm_connector(new_con_state->base.connector); | 5238 | to_amdgpu_dm_connector(new_con_state->base.connector); |
| 5002 | 5239 | ||
| 5003 | if (new_con_state->freesync_capable && | 5240 | new_crtc_state->vrr_supported = new_con_state->freesync_capable; |
| 5004 | new_con_state->freesync_enable) { | 5241 | |
| 5005 | config.state = new_crtc_state->freesync_enabled ? | 5242 | if (new_con_state->freesync_capable) { |
| 5243 | config.state = new_crtc_state->base.vrr_enabled ? | ||
| 5006 | VRR_STATE_ACTIVE_VARIABLE : | 5244 | VRR_STATE_ACTIVE_VARIABLE : |
| 5007 | VRR_STATE_INACTIVE; | 5245 | VRR_STATE_INACTIVE; |
| 5008 | config.min_refresh_in_uhz = | 5246 | config.min_refresh_in_uhz = |
| @@ -5012,19 +5250,18 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm, | |||
| 5012 | config.vsif_supported = true; | 5250 | config.vsif_supported = true; |
| 5013 | } | 5251 | } |
| 5014 | 5252 | ||
| 5015 | mod_freesync_build_vrr_params(dm->freesync_module, | 5253 | new_crtc_state->freesync_config = config; |
| 5016 | new_stream, | 5254 | } |
| 5017 | &config, &vrr); | ||
| 5018 | 5255 | ||
| 5019 | mod_freesync_build_vrr_infopacket(dm->freesync_module, | 5256 | static void reset_freesync_config_for_crtc( |
| 5020 | new_stream, | 5257 | struct dm_crtc_state *new_crtc_state) |
| 5021 | &vrr, | 5258 | { |
| 5022 | packet_type_fs1, | 5259 | new_crtc_state->vrr_supported = false; |
| 5023 | NULL, | ||
| 5024 | &vrr_infopacket); | ||
| 5025 | 5260 | ||
| 5026 | new_crtc_state->adjust = vrr.adjust; | 5261 | memset(&new_crtc_state->adjust, 0, |
| 5027 | new_crtc_state->vrr_infopacket = vrr_infopacket; | 5262 | sizeof(new_crtc_state->adjust)); |
| 5263 | memset(&new_crtc_state->vrr_infopacket, 0, | ||
| 5264 | sizeof(new_crtc_state->vrr_infopacket)); | ||
| 5028 | } | 5265 | } |
| 5029 | 5266 | ||
| 5030 | static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | 5267 | static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, |
| @@ -5032,11 +5269,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | |||
| 5032 | bool enable, | 5269 | bool enable, |
| 5033 | bool *lock_and_validation_needed) | 5270 | bool *lock_and_validation_needed) |
| 5034 | { | 5271 | { |
| 5272 | struct dm_atomic_state *dm_state = NULL; | ||
| 5035 | struct drm_crtc *crtc; | 5273 | struct drm_crtc *crtc; |
| 5036 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; | 5274 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 5037 | int i; | 5275 | int i; |
| 5038 | struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; | 5276 | struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; |
| 5039 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | ||
| 5040 | struct dc_stream_state *new_stream; | 5277 | struct dc_stream_state *new_stream; |
| 5041 | int ret = 0; | 5278 | int ret = 0; |
| 5042 | 5279 | ||
| @@ -5084,7 +5321,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | |||
| 5084 | 5321 | ||
| 5085 | new_stream = create_stream_for_sink(aconnector, | 5322 | new_stream = create_stream_for_sink(aconnector, |
| 5086 | &new_crtc_state->mode, | 5323 | &new_crtc_state->mode, |
| 5087 | dm_new_conn_state); | 5324 | dm_new_conn_state, |
| 5325 | dm_old_crtc_state->stream); | ||
| 5088 | 5326 | ||
| 5089 | /* | 5327 | /* |
| 5090 | * we can have no stream on ACTION_SET if a display | 5328 | * we can have no stream on ACTION_SET if a display |
| @@ -5099,8 +5337,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | |||
| 5099 | break; | 5337 | break; |
| 5100 | } | 5338 | } |
| 5101 | 5339 | ||
| 5102 | set_freesync_on_stream(dm, dm_new_crtc_state, | 5340 | dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; |
| 5103 | dm_new_conn_state, new_stream); | ||
| 5104 | 5341 | ||
| 5105 | if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && | 5342 | if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && |
| 5106 | dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { | 5343 | dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { |
| @@ -5110,9 +5347,6 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | |||
| 5110 | } | 5347 | } |
| 5111 | } | 5348 | } |
| 5112 | 5349 | ||
| 5113 | if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled) | ||
| 5114 | new_crtc_state->mode_changed = true; | ||
| 5115 | |||
| 5116 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) | 5350 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) |
| 5117 | goto next_crtc; | 5351 | goto next_crtc; |
| 5118 | 5352 | ||
| @@ -5134,6 +5368,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | |||
| 5134 | if (!dm_old_crtc_state->stream) | 5368 | if (!dm_old_crtc_state->stream) |
| 5135 | goto next_crtc; | 5369 | goto next_crtc; |
| 5136 | 5370 | ||
| 5371 | ret = dm_atomic_get_state(state, &dm_state); | ||
| 5372 | if (ret) | ||
| 5373 | goto fail; | ||
| 5374 | |||
| 5137 | DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", | 5375 | DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", |
| 5138 | crtc->base.id); | 5376 | crtc->base.id); |
| 5139 | 5377 | ||
| @@ -5149,6 +5387,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | |||
| 5149 | dc_stream_release(dm_old_crtc_state->stream); | 5387 | dc_stream_release(dm_old_crtc_state->stream); |
| 5150 | dm_new_crtc_state->stream = NULL; | 5388 | dm_new_crtc_state->stream = NULL; |
| 5151 | 5389 | ||
| 5390 | reset_freesync_config_for_crtc(dm_new_crtc_state); | ||
| 5391 | |||
| 5152 | *lock_and_validation_needed = true; | 5392 | *lock_and_validation_needed = true; |
| 5153 | 5393 | ||
| 5154 | } else {/* Add stream for any updated/enabled CRTC */ | 5394 | } else {/* Add stream for any updated/enabled CRTC */ |
| @@ -5168,6 +5408,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, | |||
| 5168 | 5408 | ||
| 5169 | WARN_ON(dm_new_crtc_state->stream); | 5409 | WARN_ON(dm_new_crtc_state->stream); |
| 5170 | 5410 | ||
| 5411 | ret = dm_atomic_get_state(state, &dm_state); | ||
| 5412 | if (ret) | ||
| 5413 | goto fail; | ||
| 5414 | |||
| 5171 | dm_new_crtc_state->stream = new_stream; | 5415 | dm_new_crtc_state->stream = new_stream; |
| 5172 | 5416 | ||
| 5173 | dc_stream_retain(new_stream); | 5417 | dc_stream_retain(new_stream); |
| @@ -5226,7 +5470,9 @@ next_crtc: | |||
| 5226 | amdgpu_dm_set_ctm(dm_new_crtc_state); | 5470 | amdgpu_dm_set_ctm(dm_new_crtc_state); |
| 5227 | } | 5471 | } |
| 5228 | 5472 | ||
| 5229 | 5473 | /* Update Freesync settings. */ | |
| 5474 | get_freesync_config_for_crtc(dm_new_crtc_state, | ||
| 5475 | dm_new_conn_state); | ||
| 5230 | } | 5476 | } |
| 5231 | 5477 | ||
| 5232 | return ret; | 5478 | return ret; |
| @@ -5242,12 +5488,13 @@ static int dm_update_planes_state(struct dc *dc, | |||
| 5242 | bool enable, | 5488 | bool enable, |
| 5243 | bool *lock_and_validation_needed) | 5489 | bool *lock_and_validation_needed) |
| 5244 | { | 5490 | { |
| 5491 | |||
| 5492 | struct dm_atomic_state *dm_state = NULL; | ||
| 5245 | struct drm_crtc *new_plane_crtc, *old_plane_crtc; | 5493 | struct drm_crtc *new_plane_crtc, *old_plane_crtc; |
| 5246 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; | 5494 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 5247 | struct drm_plane *plane; | 5495 | struct drm_plane *plane; |
| 5248 | struct drm_plane_state *old_plane_state, *new_plane_state; | 5496 | struct drm_plane_state *old_plane_state, *new_plane_state; |
| 5249 | struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; | 5497 | struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; |
| 5250 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | ||
| 5251 | struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; | 5498 | struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; |
| 5252 | int i ; | 5499 | int i ; |
| 5253 | /* TODO return page_flip_needed() function */ | 5500 | /* TODO return page_flip_needed() function */ |
| @@ -5285,6 +5532,10 @@ static int dm_update_planes_state(struct dc *dc, | |||
| 5285 | DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", | 5532 | DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", |
| 5286 | plane->base.id, old_plane_crtc->base.id); | 5533 | plane->base.id, old_plane_crtc->base.id); |
| 5287 | 5534 | ||
| 5535 | ret = dm_atomic_get_state(state, &dm_state); | ||
| 5536 | if (ret) | ||
| 5537 | return ret; | ||
| 5538 | |||
| 5288 | if (!dc_remove_plane_from_context( | 5539 | if (!dc_remove_plane_from_context( |
| 5289 | dc, | 5540 | dc, |
| 5290 | dm_old_crtc_state->stream, | 5541 | dm_old_crtc_state->stream, |
| @@ -5339,6 +5590,12 @@ static int dm_update_planes_state(struct dc *dc, | |||
| 5339 | return ret; | 5590 | return ret; |
| 5340 | } | 5591 | } |
| 5341 | 5592 | ||
| 5593 | ret = dm_atomic_get_state(state, &dm_state); | ||
| 5594 | if (ret) { | ||
| 5595 | dc_plane_state_release(dc_new_plane_state); | ||
| 5596 | return ret; | ||
| 5597 | } | ||
| 5598 | |||
| 5342 | /* | 5599 | /* |
| 5343 | * Any atomic check errors that occur after this will | 5600 | * Any atomic check errors that occur after this will |
| 5344 | * not need a release. The plane state will be attached | 5601 | * not need a release. The plane state will be attached |
| @@ -5370,11 +5627,14 @@ static int dm_update_planes_state(struct dc *dc, | |||
| 5370 | 5627 | ||
| 5371 | return ret; | 5628 | return ret; |
| 5372 | } | 5629 | } |
| 5373 | enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state) | ||
| 5374 | { | ||
| 5375 | |||
| 5376 | 5630 | ||
| 5377 | int i, j, num_plane; | 5631 | static int |
| 5632 | dm_determine_update_type_for_commit(struct dc *dc, | ||
| 5633 | struct drm_atomic_state *state, | ||
| 5634 | enum surface_update_type *out_type) | ||
| 5635 | { | ||
| 5636 | struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; | ||
| 5637 | int i, j, num_plane, ret = 0; | ||
| 5378 | struct drm_plane_state *old_plane_state, *new_plane_state; | 5638 | struct drm_plane_state *old_plane_state, *new_plane_state; |
| 5379 | struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; | 5639 | struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; |
| 5380 | struct drm_crtc *new_plane_crtc, *old_plane_crtc; | 5640 | struct drm_crtc *new_plane_crtc, *old_plane_crtc; |
| @@ -5394,7 +5654,7 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru | |||
| 5394 | DRM_ERROR("Plane or surface update failed to allocate"); | 5654 | DRM_ERROR("Plane or surface update failed to allocate"); |
| 5395 | /* Set type to FULL to avoid crashing in DC*/ | 5655 | /* Set type to FULL to avoid crashing in DC*/ |
| 5396 | update_type = UPDATE_TYPE_FULL; | 5656 | update_type = UPDATE_TYPE_FULL; |
| 5397 | goto ret; | 5657 | goto cleanup; |
| 5398 | } | 5658 | } |
| 5399 | 5659 | ||
| 5400 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5660 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| @@ -5448,27 +5708,40 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru | |||
| 5448 | } | 5708 | } |
| 5449 | 5709 | ||
| 5450 | if (num_plane > 0) { | 5710 | if (num_plane > 0) { |
| 5451 | status = dc_stream_get_status(new_dm_crtc_state->stream); | 5711 | ret = dm_atomic_get_state(state, &dm_state); |
| 5712 | if (ret) | ||
| 5713 | goto cleanup; | ||
| 5714 | |||
| 5715 | old_dm_state = dm_atomic_get_old_state(state); | ||
| 5716 | if (!old_dm_state) { | ||
| 5717 | ret = -EINVAL; | ||
| 5718 | goto cleanup; | ||
| 5719 | } | ||
| 5720 | |||
| 5721 | status = dc_state_get_stream_status(old_dm_state->context, | ||
| 5722 | new_dm_crtc_state->stream); | ||
| 5723 | |||
| 5452 | update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, | 5724 | update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, |
| 5453 | &stream_update, status); | 5725 | &stream_update, status); |
| 5454 | 5726 | ||
| 5455 | if (update_type > UPDATE_TYPE_MED) { | 5727 | if (update_type > UPDATE_TYPE_MED) { |
| 5456 | update_type = UPDATE_TYPE_FULL; | 5728 | update_type = UPDATE_TYPE_FULL; |
| 5457 | goto ret; | 5729 | goto cleanup; |
| 5458 | } | 5730 | } |
| 5459 | } | 5731 | } |
| 5460 | 5732 | ||
| 5461 | } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) { | 5733 | } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) { |
| 5462 | update_type = UPDATE_TYPE_FULL; | 5734 | update_type = UPDATE_TYPE_FULL; |
| 5463 | goto ret; | 5735 | goto cleanup; |
| 5464 | } | 5736 | } |
| 5465 | } | 5737 | } |
| 5466 | 5738 | ||
| 5467 | ret: | 5739 | cleanup: |
| 5468 | kfree(updates); | 5740 | kfree(updates); |
| 5469 | kfree(surface); | 5741 | kfree(surface); |
| 5470 | 5742 | ||
| 5471 | return update_type; | 5743 | *out_type = update_type; |
| 5744 | return ret; | ||
| 5472 | } | 5745 | } |
| 5473 | 5746 | ||
| 5474 | /** | 5747 | /** |
| @@ -5500,8 +5773,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5500 | struct drm_atomic_state *state) | 5773 | struct drm_atomic_state *state) |
| 5501 | { | 5774 | { |
| 5502 | struct amdgpu_device *adev = dev->dev_private; | 5775 | struct amdgpu_device *adev = dev->dev_private; |
| 5776 | struct dm_atomic_state *dm_state = NULL; | ||
| 5503 | struct dc *dc = adev->dm.dc; | 5777 | struct dc *dc = adev->dm.dc; |
| 5504 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | ||
| 5505 | struct drm_connector *connector; | 5778 | struct drm_connector *connector; |
| 5506 | struct drm_connector_state *old_con_state, *new_con_state; | 5779 | struct drm_connector_state *old_con_state, *new_con_state; |
| 5507 | struct drm_crtc *crtc; | 5780 | struct drm_crtc *crtc; |
| @@ -5522,12 +5795,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5522 | goto fail; | 5795 | goto fail; |
| 5523 | 5796 | ||
| 5524 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5797 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 5525 | struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); | ||
| 5526 | struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); | ||
| 5527 | |||
| 5528 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && | 5798 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && |
| 5529 | !new_crtc_state->color_mgmt_changed && | 5799 | !new_crtc_state->color_mgmt_changed && |
| 5530 | (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled)) | 5800 | !new_crtc_state->vrr_enabled) |
| 5531 | continue; | 5801 | continue; |
| 5532 | 5802 | ||
| 5533 | if (!new_crtc_state->enable) | 5803 | if (!new_crtc_state->enable) |
| @@ -5542,10 +5812,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5542 | goto fail; | 5812 | goto fail; |
| 5543 | } | 5813 | } |
| 5544 | 5814 | ||
| 5545 | dm_state->context = dc_create_state(); | ||
| 5546 | ASSERT(dm_state->context); | ||
| 5547 | dc_resource_state_copy_construct_current(dc, dm_state->context); | ||
| 5548 | |||
| 5549 | /* Remove exiting planes if they are modified */ | 5815 | /* Remove exiting planes if they are modified */ |
| 5550 | ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); | 5816 | ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); |
| 5551 | if (ret) { | 5817 | if (ret) { |
| @@ -5598,7 +5864,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5598 | lock_and_validation_needed = true; | 5864 | lock_and_validation_needed = true; |
| 5599 | } | 5865 | } |
| 5600 | 5866 | ||
| 5601 | update_type = dm_determine_update_type_for_commit(dc, state); | 5867 | ret = dm_determine_update_type_for_commit(dc, state, &update_type); |
| 5868 | if (ret) | ||
| 5869 | goto fail; | ||
| 5602 | 5870 | ||
| 5603 | if (overall_update_type < update_type) | 5871 | if (overall_update_type < update_type) |
| 5604 | overall_update_type = update_type; | 5872 | overall_update_type = update_type; |
| @@ -5616,6 +5884,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5616 | 5884 | ||
| 5617 | 5885 | ||
| 5618 | if (overall_update_type > UPDATE_TYPE_FAST) { | 5886 | if (overall_update_type > UPDATE_TYPE_FAST) { |
| 5887 | ret = dm_atomic_get_state(state, &dm_state); | ||
| 5888 | if (ret) | ||
| 5889 | goto fail; | ||
| 5619 | 5890 | ||
| 5620 | ret = do_aquire_global_lock(dev, state); | 5891 | ret = do_aquire_global_lock(dev, state); |
| 5621 | if (ret) | 5892 | if (ret) |
| @@ -5670,14 +5941,15 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, | |||
| 5670 | struct detailed_data_monitor_range *range; | 5941 | struct detailed_data_monitor_range *range; |
| 5671 | struct amdgpu_dm_connector *amdgpu_dm_connector = | 5942 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
| 5672 | to_amdgpu_dm_connector(connector); | 5943 | to_amdgpu_dm_connector(connector); |
| 5673 | struct dm_connector_state *dm_con_state; | 5944 | struct dm_connector_state *dm_con_state = NULL; |
| 5674 | 5945 | ||
| 5675 | struct drm_device *dev = connector->dev; | 5946 | struct drm_device *dev = connector->dev; |
| 5676 | struct amdgpu_device *adev = dev->dev_private; | 5947 | struct amdgpu_device *adev = dev->dev_private; |
| 5948 | bool freesync_capable = false; | ||
| 5677 | 5949 | ||
| 5678 | if (!connector->state) { | 5950 | if (!connector->state) { |
| 5679 | DRM_ERROR("%s - Connector has no state", __func__); | 5951 | DRM_ERROR("%s - Connector has no state", __func__); |
| 5680 | return; | 5952 | goto update; |
| 5681 | } | 5953 | } |
| 5682 | 5954 | ||
| 5683 | if (!edid) { | 5955 | if (!edid) { |
| @@ -5687,9 +5959,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, | |||
| 5687 | amdgpu_dm_connector->max_vfreq = 0; | 5959 | amdgpu_dm_connector->max_vfreq = 0; |
| 5688 | amdgpu_dm_connector->pixel_clock_mhz = 0; | 5960 | amdgpu_dm_connector->pixel_clock_mhz = 0; |
| 5689 | 5961 | ||
| 5690 | dm_con_state->freesync_capable = false; | 5962 | goto update; |
| 5691 | dm_con_state->freesync_enable = false; | ||
| 5692 | return; | ||
| 5693 | } | 5963 | } |
| 5694 | 5964 | ||
| 5695 | dm_con_state = to_dm_connector_state(connector->state); | 5965 | dm_con_state = to_dm_connector_state(connector->state); |
| @@ -5697,10 +5967,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, | |||
| 5697 | edid_check_required = false; | 5967 | edid_check_required = false; |
| 5698 | if (!amdgpu_dm_connector->dc_sink) { | 5968 | if (!amdgpu_dm_connector->dc_sink) { |
| 5699 | DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); | 5969 | DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); |
| 5700 | return; | 5970 | goto update; |
| 5701 | } | 5971 | } |
| 5702 | if (!adev->dm.freesync_module) | 5972 | if (!adev->dm.freesync_module) |
| 5703 | return; | 5973 | goto update; |
| 5704 | /* | 5974 | /* |
| 5705 | * if edid non zero restrict freesync only for dp and edp | 5975 | * if edid non zero restrict freesync only for dp and edp |
| 5706 | */ | 5976 | */ |
| @@ -5712,7 +5982,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, | |||
| 5712 | amdgpu_dm_connector); | 5982 | amdgpu_dm_connector); |
| 5713 | } | 5983 | } |
| 5714 | } | 5984 | } |
| 5715 | dm_con_state->freesync_capable = false; | ||
| 5716 | if (edid_check_required == true && (edid->version > 1 || | 5985 | if (edid_check_required == true && (edid->version > 1 || |
| 5717 | (edid->version == 1 && edid->revision > 1))) { | 5986 | (edid->version == 1 && edid->revision > 1))) { |
| 5718 | for (i = 0; i < 4; i++) { | 5987 | for (i = 0; i < 4; i++) { |
| @@ -5744,8 +6013,16 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, | |||
| 5744 | if (amdgpu_dm_connector->max_vfreq - | 6013 | if (amdgpu_dm_connector->max_vfreq - |
| 5745 | amdgpu_dm_connector->min_vfreq > 10) { | 6014 | amdgpu_dm_connector->min_vfreq > 10) { |
| 5746 | 6015 | ||
| 5747 | dm_con_state->freesync_capable = true; | 6016 | freesync_capable = true; |
| 5748 | } | 6017 | } |
| 5749 | } | 6018 | } |
| 6019 | |||
| 6020 | update: | ||
| 6021 | if (dm_con_state) | ||
| 6022 | dm_con_state->freesync_capable = freesync_capable; | ||
| 6023 | |||
| 6024 | if (connector->vrr_capable_property) | ||
| 6025 | drm_connector_set_vrr_capable_property(connector, | ||
| 6026 | freesync_capable); | ||
| 5750 | } | 6027 | } |
| 5751 | 6028 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 607c3cdd7d0c..4326dc256491 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | |||
| @@ -84,6 +84,18 @@ struct dm_comressor_info { | |||
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | /** | 86 | /** |
| 87 | * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI | ||
| 88 | * @min_input_signal: minimum possible input in range 0-255 | ||
| 89 | * @max_input_signal: maximum possible input in range 0-255 | ||
| 90 | * @caps_valid: true if these values are from the ACPI interface | ||
| 91 | */ | ||
| 92 | struct amdgpu_dm_backlight_caps { | ||
| 93 | int min_input_signal; | ||
| 94 | int max_input_signal; | ||
| 95 | bool caps_valid; | ||
| 96 | }; | ||
| 97 | |||
| 98 | /** | ||
| 87 | * struct amdgpu_display_manager - Central amdgpu display manager device | 99 | * struct amdgpu_display_manager - Central amdgpu display manager device |
| 88 | * | 100 | * |
| 89 | * @dc: Display Core control structure | 101 | * @dc: Display Core control structure |
| @@ -112,6 +124,17 @@ struct amdgpu_display_manager { | |||
| 112 | u16 display_indexes_num; | 124 | u16 display_indexes_num; |
| 113 | 125 | ||
| 114 | /** | 126 | /** |
| 127 | * @atomic_obj | ||
| 128 | * | ||
| 129 | * In combination with &dm_atomic_state it helps manage | ||
| 130 | * global atomic state that doesn't map cleanly into existing | ||
| 131 | * drm resources, like &dc_context. | ||
| 132 | */ | ||
| 133 | struct drm_private_obj atomic_obj; | ||
| 134 | |||
| 135 | struct drm_modeset_lock atomic_obj_lock; | ||
| 136 | |||
| 137 | /** | ||
| 115 | * @irq_handler_list_low_tab: | 138 | * @irq_handler_list_low_tab: |
| 116 | * | 139 | * |
| 117 | * Low priority IRQ handler table. | 140 | * Low priority IRQ handler table. |
| @@ -158,6 +181,7 @@ struct amdgpu_display_manager { | |||
| 158 | struct backlight_device *backlight_dev; | 181 | struct backlight_device *backlight_dev; |
| 159 | 182 | ||
| 160 | const struct dc_link *backlight_link; | 183 | const struct dc_link *backlight_link; |
| 184 | struct amdgpu_dm_backlight_caps backlight_caps; | ||
| 161 | 185 | ||
| 162 | struct mod_freesync *freesync_module; | 186 | struct mod_freesync *freesync_module; |
| 163 | 187 | ||
| @@ -231,15 +255,21 @@ struct dm_crtc_state { | |||
| 231 | int crc_skip_count; | 255 | int crc_skip_count; |
| 232 | bool crc_enabled; | 256 | bool crc_enabled; |
| 233 | 257 | ||
| 234 | bool freesync_enabled; | 258 | bool freesync_timing_changed; |
| 259 | bool freesync_vrr_info_changed; | ||
| 260 | |||
| 261 | bool vrr_supported; | ||
| 262 | struct mod_freesync_config freesync_config; | ||
| 235 | struct dc_crtc_timing_adjust adjust; | 263 | struct dc_crtc_timing_adjust adjust; |
| 236 | struct dc_info_packet vrr_infopacket; | 264 | struct dc_info_packet vrr_infopacket; |
| 265 | |||
| 266 | int abm_level; | ||
| 237 | }; | 267 | }; |
| 238 | 268 | ||
| 239 | #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) | 269 | #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) |
| 240 | 270 | ||
| 241 | struct dm_atomic_state { | 271 | struct dm_atomic_state { |
| 242 | struct drm_atomic_state base; | 272 | struct drm_private_state base; |
| 243 | 273 | ||
| 244 | struct dc_state *context; | 274 | struct dc_state *context; |
| 245 | }; | 275 | }; |
| @@ -254,8 +284,8 @@ struct dm_connector_state { | |||
| 254 | uint8_t underscan_hborder; | 284 | uint8_t underscan_hborder; |
| 255 | uint8_t max_bpc; | 285 | uint8_t max_bpc; |
| 256 | bool underscan_enable; | 286 | bool underscan_enable; |
| 257 | bool freesync_enable; | ||
| 258 | bool freesync_capable; | 287 | bool freesync_capable; |
| 288 | uint8_t abm_level; | ||
| 259 | }; | 289 | }; |
| 260 | 290 | ||
| 261 | #define to_dm_connector_state(x)\ | 291 | #define to_dm_connector_state(x)\ |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 3279e26c3440..dba6b57830c7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
| @@ -328,7 +328,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, | |||
| 328 | enum dc_dither_option option) | 328 | enum dc_dither_option option) |
| 329 | { | 329 | { |
| 330 | struct bit_depth_reduction_params params; | 330 | struct bit_depth_reduction_params params; |
| 331 | struct dc_link *link = stream->status.link; | 331 | struct dc_link *link = stream->sink->link; |
| 332 | struct pipe_ctx *pipes = NULL; | 332 | struct pipe_ctx *pipes = NULL; |
| 333 | int i; | 333 | int i; |
| 334 | 334 | ||
| @@ -1686,6 +1686,15 @@ void dc_resume(struct dc *dc) | |||
| 1686 | core_link_resume(dc->links[i]); | 1686 | core_link_resume(dc->links[i]); |
| 1687 | } | 1687 | } |
| 1688 | 1688 | ||
| 1689 | bool dc_is_dmcu_initialized(struct dc *dc) | ||
| 1690 | { | ||
| 1691 | struct dmcu *dmcu = dc->res_pool->dmcu; | ||
| 1692 | |||
| 1693 | if (dmcu) | ||
| 1694 | return dmcu->funcs->is_dmcu_initialized(dmcu); | ||
| 1695 | return false; | ||
| 1696 | } | ||
| 1697 | |||
| 1689 | bool dc_submit_i2c( | 1698 | bool dc_submit_i2c( |
| 1690 | struct dc *dc, | 1699 | struct dc *dc, |
| 1691 | uint32_t link_index, | 1700 | uint32_t link_index, |
| @@ -1810,4 +1819,4 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx | |||
| 1810 | info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz; | 1819 | info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz; |
| 1811 | info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz; | 1820 | info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz; |
| 1812 | info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz; | 1821 | info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz; |
| 1813 | } \ No newline at end of file | 1822 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 7ee9c033acbd..948596a02392 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -1396,8 +1396,6 @@ static enum dc_status enable_link_dp( | |||
| 1396 | else | 1396 | else |
| 1397 | status = DC_FAIL_DP_LINK_TRAINING; | 1397 | status = DC_FAIL_DP_LINK_TRAINING; |
| 1398 | 1398 | ||
| 1399 | enable_stream_features(pipe_ctx); | ||
| 1400 | |||
| 1401 | return status; | 1399 | return status; |
| 1402 | } | 1400 | } |
| 1403 | 1401 | ||
| @@ -2175,11 +2173,11 @@ bool dc_link_set_backlight_level(const struct dc_link *link, | |||
| 2175 | backlight_pwm_u16_16, backlight_pwm_u16_16); | 2173 | backlight_pwm_u16_16, backlight_pwm_u16_16); |
| 2176 | 2174 | ||
| 2177 | if (dc_is_embedded_signal(link->connector_signal)) { | 2175 | if (dc_is_embedded_signal(link->connector_signal)) { |
| 2178 | if (stream != NULL) { | 2176 | for (i = 0; i < MAX_PIPES; i++) { |
| 2179 | for (i = 0; i < MAX_PIPES; i++) { | 2177 | if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { |
| 2180 | if (core_dc->current_state->res_ctx. | 2178 | if (core_dc->current_state->res_ctx. |
| 2181 | pipe_ctx[i].stream | 2179 | pipe_ctx[i].stream->sink->link |
| 2182 | == stream) | 2180 | == link) |
| 2183 | /* DMCU -1 for all controller id values, | 2181 | /* DMCU -1 for all controller id values, |
| 2184 | * therefore +1 here | 2182 | * therefore +1 here |
| 2185 | */ | 2183 | */ |
| @@ -2218,7 +2216,7 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) | |||
| 2218 | struct dc *core_dc = link->ctx->dc; | 2216 | struct dc *core_dc = link->ctx->dc; |
| 2219 | struct dmcu *dmcu = core_dc->res_pool->dmcu; | 2217 | struct dmcu *dmcu = core_dc->res_pool->dmcu; |
| 2220 | 2218 | ||
| 2221 | if (dmcu != NULL && link->psr_enabled) | 2219 | if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled) |
| 2222 | dmcu->funcs->set_psr_enable(dmcu, enable, wait); | 2220 | dmcu->funcs->set_psr_enable(dmcu, enable, wait); |
| 2223 | 2221 | ||
| 2224 | return true; | 2222 | return true; |
| @@ -2594,6 +2592,9 @@ void core_link_enable_stream( | |||
| 2594 | core_dc->hwss.unblank_stream(pipe_ctx, | 2592 | core_dc->hwss.unblank_stream(pipe_ctx, |
| 2595 | &pipe_ctx->stream->sink->link->cur_link_settings); | 2593 | &pipe_ctx->stream->sink->link->cur_link_settings); |
| 2596 | 2594 | ||
| 2595 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) | ||
| 2596 | enable_stream_features(pipe_ctx); | ||
| 2597 | |||
| 2597 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, | 2598 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, |
| 2598 | pipe_ctx->stream->bl_pwm_level, | 2599 | pipe_ctx->stream->bl_pwm_level, |
| 2599 | 0, | 2600 | 0, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index d91df5ef0cb3..4d1f8ac069c1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
| @@ -2371,11 +2371,22 @@ static bool retrieve_link_cap(struct dc_link *link) | |||
| 2371 | dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; | 2371 | dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; |
| 2372 | 2372 | ||
| 2373 | if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) { | 2373 | if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) { |
| 2374 | core_link_read_dpcd( | 2374 | uint8_t ext_cap_data[16]; |
| 2375 | |||
| 2376 | memset(ext_cap_data, '\0', sizeof(ext_cap_data)); | ||
| 2377 | for (i = 0; i < read_dpcd_retry_cnt; i++) { | ||
| 2378 | status = core_link_read_dpcd( | ||
| 2375 | link, | 2379 | link, |
| 2376 | DP_DP13_DPCD_REV, | 2380 | DP_DP13_DPCD_REV, |
| 2377 | dpcd_data, | 2381 | ext_cap_data, |
| 2378 | sizeof(dpcd_data)); | 2382 | sizeof(ext_cap_data)); |
| 2383 | if (status == DC_OK) { | ||
| 2384 | memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); | ||
| 2385 | break; | ||
| 2386 | } | ||
| 2387 | } | ||
| 2388 | if (status != DC_OK) | ||
| 2389 | dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); | ||
| 2379 | } | 2390 | } |
| 2380 | } | 2391 | } |
| 2381 | 2392 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index fc65b0055167..0bb844a7b990 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
| @@ -478,10 +478,29 @@ static enum pixel_format convert_pixel_format_to_dalsurface( | |||
| 478 | return dal_pixel_format; | 478 | return dal_pixel_format; |
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | static void rect_swap_helper(struct rect *rect) | 481 | static inline void get_vp_scan_direction( |
| 482 | { | 482 | enum dc_rotation_angle rotation, |
| 483 | swap(rect->height, rect->width); | 483 | bool horizontal_mirror, |
| 484 | swap(rect->x, rect->y); | 484 | bool *orthogonal_rotation, |
| 485 | bool *flip_vert_scan_dir, | ||
| 486 | bool *flip_horz_scan_dir) | ||
| 487 | { | ||
| 488 | *orthogonal_rotation = false; | ||
| 489 | *flip_vert_scan_dir = false; | ||
| 490 | *flip_horz_scan_dir = false; | ||
| 491 | if (rotation == ROTATION_ANGLE_180) { | ||
| 492 | *flip_vert_scan_dir = true; | ||
| 493 | *flip_horz_scan_dir = true; | ||
| 494 | } else if (rotation == ROTATION_ANGLE_90) { | ||
| 495 | *orthogonal_rotation = true; | ||
| 496 | *flip_horz_scan_dir = true; | ||
| 497 | } else if (rotation == ROTATION_ANGLE_270) { | ||
| 498 | *orthogonal_rotation = true; | ||
| 499 | *flip_vert_scan_dir = true; | ||
| 500 | } | ||
| 501 | |||
| 502 | if (horizontal_mirror) | ||
| 503 | *flip_horz_scan_dir = !*flip_horz_scan_dir; | ||
| 485 | } | 504 | } |
| 486 | 505 | ||
| 487 | static void calculate_viewport(struct pipe_ctx *pipe_ctx) | 506 | static void calculate_viewport(struct pipe_ctx *pipe_ctx) |
| @@ -490,33 +509,14 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
| 490 | const struct dc_stream_state *stream = pipe_ctx->stream; | 509 | const struct dc_stream_state *stream = pipe_ctx->stream; |
| 491 | struct scaler_data *data = &pipe_ctx->plane_res.scl_data; | 510 | struct scaler_data *data = &pipe_ctx->plane_res.scl_data; |
| 492 | struct rect surf_src = plane_state->src_rect; | 511 | struct rect surf_src = plane_state->src_rect; |
| 493 | struct rect clip = { 0 }; | 512 | struct rect clip, dest; |
| 494 | int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 | 513 | int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 |
| 495 | || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; | 514 | || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; |
| 496 | bool pri_split = pipe_ctx->bottom_pipe && | 515 | bool pri_split = pipe_ctx->bottom_pipe && |
| 497 | pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; | 516 | pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; |
| 498 | bool sec_split = pipe_ctx->top_pipe && | 517 | bool sec_split = pipe_ctx->top_pipe && |
| 499 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; | 518 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; |
| 500 | bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; | 519 | bool orthogonal_rotation, flip_y_start, flip_x_start; |
| 501 | |||
| 502 | |||
| 503 | /* | ||
| 504 | * We need take horizontal mirror into account. On an unrotated surface this means | ||
| 505 | * that the viewport offset is actually the offset from the other side of source | ||
| 506 | * image so we have to subtract the right edge of the viewport from the right edge of | ||
| 507 | * the source window. Similar to mirror we need to take into account how offset is | ||
| 508 | * affected for 270/180 rotations | ||
| 509 | */ | ||
| 510 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { | ||
| 511 | flip_vert_scan_dir = true; | ||
| 512 | flip_horz_scan_dir = true; | ||
| 513 | } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90) | ||
| 514 | flip_vert_scan_dir = true; | ||
| 515 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | ||
| 516 | flip_horz_scan_dir = true; | ||
| 517 | |||
| 518 | if (pipe_ctx->plane_state->horizontal_mirror) | ||
| 519 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
| 520 | 520 | ||
| 521 | if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || | 521 | if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || |
| 522 | stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { | 522 | stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { |
| @@ -524,13 +524,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
| 524 | sec_split = false; | 524 | sec_split = false; |
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || | ||
| 528 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | ||
| 529 | rect_swap_helper(&surf_src); | ||
| 530 | |||
| 531 | /* The actual clip is an intersection between stream | 527 | /* The actual clip is an intersection between stream |
| 532 | * source and surface clip | 528 | * source and surface clip |
| 533 | */ | 529 | */ |
| 530 | dest = plane_state->dst_rect; | ||
| 534 | clip.x = stream->src.x > plane_state->clip_rect.x ? | 531 | clip.x = stream->src.x > plane_state->clip_rect.x ? |
| 535 | stream->src.x : plane_state->clip_rect.x; | 532 | stream->src.x : plane_state->clip_rect.x; |
| 536 | 533 | ||
| @@ -547,66 +544,77 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
| 547 | stream->src.y + stream->src.height - clip.y : | 544 | stream->src.y + stream->src.height - clip.y : |
| 548 | plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; | 545 | plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; |
| 549 | 546 | ||
| 550 | /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio | 547 | /* |
| 551 | * note: surf_src.ofs should be added after rotation/mirror offset direction | 548 | * Need to calculate how scan origin is shifted in vp space |
| 552 | * adjustment since it is already in viewport space | 549 | * to correctly rotate clip and dst |
| 553 | * num_pixels = clip.num_pix * scl_ratio | ||
| 554 | */ | 550 | */ |
| 555 | data->viewport.x = (clip.x - plane_state->dst_rect.x) * | 551 | get_vp_scan_direction( |
| 556 | surf_src.width / plane_state->dst_rect.width; | 552 | plane_state->rotation, |
| 557 | data->viewport.width = clip.width * | 553 | plane_state->horizontal_mirror, |
| 558 | surf_src.width / plane_state->dst_rect.width; | 554 | &orthogonal_rotation, |
| 559 | 555 | &flip_y_start, | |
| 560 | data->viewport.y = (clip.y - plane_state->dst_rect.y) * | 556 | &flip_x_start); |
| 561 | surf_src.height / plane_state->dst_rect.height; | ||
| 562 | data->viewport.height = clip.height * | ||
| 563 | surf_src.height / plane_state->dst_rect.height; | ||
| 564 | 557 | ||
| 565 | if (flip_vert_scan_dir) | 558 | if (orthogonal_rotation) { |
| 566 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; | 559 | swap(clip.x, clip.y); |
| 567 | if (flip_horz_scan_dir) | 560 | swap(clip.width, clip.height); |
| 568 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; | 561 | swap(dest.x, dest.y); |
| 562 | swap(dest.width, dest.height); | ||
| 563 | } | ||
| 564 | if (flip_x_start) { | ||
| 565 | clip.x = dest.x + dest.width - clip.x - clip.width; | ||
| 566 | dest.x = 0; | ||
| 567 | } | ||
| 568 | if (flip_y_start) { | ||
| 569 | clip.y = dest.y + dest.height - clip.y - clip.height; | ||
| 570 | dest.y = 0; | ||
| 571 | } | ||
| 569 | 572 | ||
| 570 | data->viewport.x += surf_src.x; | 573 | /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio |
| 571 | data->viewport.y += surf_src.y; | 574 | * num_pixels = clip.num_pix * scl_ratio |
| 575 | */ | ||
| 576 | data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width; | ||
| 577 | data->viewport.width = clip.width * surf_src.width / dest.width; | ||
| 578 | |||
| 579 | data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height; | ||
| 580 | data->viewport.height = clip.height * surf_src.height / dest.height; | ||
| 581 | |||
| 582 | /* Handle split */ | ||
| 583 | if (pri_split || sec_split) { | ||
| 584 | if (orthogonal_rotation) { | ||
| 585 | if (flip_y_start != pri_split) | ||
| 586 | data->viewport.height /= 2; | ||
| 587 | else { | ||
| 588 | data->viewport.y += data->viewport.height / 2; | ||
| 589 | /* Ceil offset pipe */ | ||
| 590 | data->viewport.height = (data->viewport.height + 1) / 2; | ||
| 591 | } | ||
| 592 | } else { | ||
| 593 | if (flip_x_start != pri_split) | ||
| 594 | data->viewport.width /= 2; | ||
| 595 | else { | ||
| 596 | data->viewport.x += data->viewport.width / 2; | ||
| 597 | /* Ceil offset pipe */ | ||
| 598 | data->viewport.width = (data->viewport.width + 1) / 2; | ||
| 599 | } | ||
| 600 | } | ||
| 601 | } | ||
| 572 | 602 | ||
| 573 | /* Round down, compensate in init */ | 603 | /* Round down, compensate in init */ |
| 574 | data->viewport_c.x = data->viewport.x / vpc_div; | 604 | data->viewport_c.x = data->viewport.x / vpc_div; |
| 575 | data->viewport_c.y = data->viewport.y / vpc_div; | 605 | data->viewport_c.y = data->viewport.y / vpc_div; |
| 576 | data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? | 606 | data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero; |
| 577 | dc_fixpt_half : dc_fixpt_zero; | 607 | data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero; |
| 578 | data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? | 608 | |
| 579 | dc_fixpt_half : dc_fixpt_zero; | ||
| 580 | /* Round up, assume original video size always even dimensions */ | 609 | /* Round up, assume original video size always even dimensions */ |
| 581 | data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div; | 610 | data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div; |
| 582 | data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div; | 611 | data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div; |
| 583 | |||
| 584 | /* Handle hsplit */ | ||
| 585 | if (sec_split) { | ||
| 586 | data->viewport.x += data->viewport.width / 2; | ||
| 587 | data->viewport_c.x += data->viewport_c.width / 2; | ||
| 588 | /* Ceil offset pipe */ | ||
| 589 | data->viewport.width = (data->viewport.width + 1) / 2; | ||
| 590 | data->viewport_c.width = (data->viewport_c.width + 1) / 2; | ||
| 591 | } else if (pri_split) { | ||
| 592 | if (data->viewport.width > 1) | ||
| 593 | data->viewport.width /= 2; | ||
| 594 | if (data->viewport_c.width > 1) | ||
| 595 | data->viewport_c.width /= 2; | ||
| 596 | } | ||
| 597 | |||
| 598 | if (plane_state->rotation == ROTATION_ANGLE_90 || | ||
| 599 | plane_state->rotation == ROTATION_ANGLE_270) { | ||
| 600 | rect_swap_helper(&data->viewport_c); | ||
| 601 | rect_swap_helper(&data->viewport); | ||
| 602 | } | ||
| 603 | } | 612 | } |
| 604 | 613 | ||
| 605 | static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full) | 614 | static void calculate_recout(struct pipe_ctx *pipe_ctx) |
| 606 | { | 615 | { |
| 607 | const struct dc_plane_state *plane_state = pipe_ctx->plane_state; | 616 | const struct dc_plane_state *plane_state = pipe_ctx->plane_state; |
| 608 | const struct dc_stream_state *stream = pipe_ctx->stream; | 617 | const struct dc_stream_state *stream = pipe_ctx->stream; |
| 609 | struct rect surf_src = plane_state->src_rect; | ||
| 610 | struct rect surf_clip = plane_state->clip_rect; | 618 | struct rect surf_clip = plane_state->clip_rect; |
| 611 | bool pri_split = pipe_ctx->bottom_pipe && | 619 | bool pri_split = pipe_ctx->bottom_pipe && |
| 612 | pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; | 620 | pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; |
| @@ -614,10 +622,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full | |||
| 614 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; | 622 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; |
| 615 | bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; | 623 | bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; |
| 616 | 624 | ||
| 617 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || | ||
| 618 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | ||
| 619 | rect_swap_helper(&surf_src); | ||
| 620 | |||
| 621 | pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x; | 625 | pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x; |
| 622 | if (stream->src.x < surf_clip.x) | 626 | if (stream->src.x < surf_clip.x) |
| 623 | pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x | 627 | pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x |
| @@ -646,7 +650,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full | |||
| 646 | stream->dst.y + stream->dst.height | 650 | stream->dst.y + stream->dst.height |
| 647 | - pipe_ctx->plane_res.scl_data.recout.y; | 651 | - pipe_ctx->plane_res.scl_data.recout.y; |
| 648 | 652 | ||
| 649 | /* Handle h & vsplit */ | 653 | /* Handle h & v split, handle rotation using viewport */ |
| 650 | if (sec_split && top_bottom_split) { | 654 | if (sec_split && top_bottom_split) { |
| 651 | pipe_ctx->plane_res.scl_data.recout.y += | 655 | pipe_ctx->plane_res.scl_data.recout.y += |
| 652 | pipe_ctx->plane_res.scl_data.recout.height / 2; | 656 | pipe_ctx->plane_res.scl_data.recout.height / 2; |
| @@ -655,44 +659,14 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full | |||
| 655 | (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2; | 659 | (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2; |
| 656 | } else if (pri_split && top_bottom_split) | 660 | } else if (pri_split && top_bottom_split) |
| 657 | pipe_ctx->plane_res.scl_data.recout.height /= 2; | 661 | pipe_ctx->plane_res.scl_data.recout.height /= 2; |
| 658 | else if (pri_split || sec_split) { | 662 | else if (sec_split) { |
| 659 | /* HMirror XOR Secondary_pipe XOR Rotation_180 */ | 663 | pipe_ctx->plane_res.scl_data.recout.x += |
| 660 | bool right_view = (sec_split != plane_state->horizontal_mirror) != | 664 | pipe_ctx->plane_res.scl_data.recout.width / 2; |
| 661 | (plane_state->rotation == ROTATION_ANGLE_180); | 665 | /* Ceil offset pipe */ |
| 662 | 666 | pipe_ctx->plane_res.scl_data.recout.width = | |
| 663 | if (plane_state->rotation == ROTATION_ANGLE_90 | 667 | (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; |
| 664 | || plane_state->rotation == ROTATION_ANGLE_270) | 668 | } else if (pri_split) |
| 665 | /* Secondary_pipe XOR Rotation_270 */ | 669 | pipe_ctx->plane_res.scl_data.recout.width /= 2; |
| 666 | right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split; | ||
| 667 | |||
| 668 | if (right_view) { | ||
| 669 | pipe_ctx->plane_res.scl_data.recout.x += | ||
| 670 | pipe_ctx->plane_res.scl_data.recout.width / 2; | ||
| 671 | /* Ceil offset pipe */ | ||
| 672 | pipe_ctx->plane_res.scl_data.recout.width = | ||
| 673 | (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; | ||
| 674 | } else { | ||
| 675 | if (pipe_ctx->plane_res.scl_data.recout.width > 1) | ||
| 676 | pipe_ctx->plane_res.scl_data.recout.width /= 2; | ||
| 677 | } | ||
| 678 | } | ||
| 679 | /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset) | ||
| 680 | * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl | ||
| 681 | * ratio) | ||
| 682 | */ | ||
| 683 | recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x) | ||
| 684 | * stream->dst.width / stream->src.width - | ||
| 685 | surf_src.x * plane_state->dst_rect.width / surf_src.width | ||
| 686 | * stream->dst.width / stream->src.width; | ||
| 687 | recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y) | ||
| 688 | * stream->dst.height / stream->src.height - | ||
| 689 | surf_src.y * plane_state->dst_rect.height / surf_src.height | ||
| 690 | * stream->dst.height / stream->src.height; | ||
| 691 | |||
| 692 | recout_full->width = plane_state->dst_rect.width | ||
| 693 | * stream->dst.width / stream->src.width; | ||
| 694 | recout_full->height = plane_state->dst_rect.height | ||
| 695 | * stream->dst.height / stream->src.height; | ||
| 696 | } | 670 | } |
| 697 | 671 | ||
| 698 | static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) | 672 | static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) |
| @@ -705,9 +679,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) | |||
| 705 | const int out_w = stream->dst.width; | 679 | const int out_w = stream->dst.width; |
| 706 | const int out_h = stream->dst.height; | 680 | const int out_h = stream->dst.height; |
| 707 | 681 | ||
| 682 | /*Swap surf_src height and width since scaling ratios are in recout rotation*/ | ||
| 708 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || | 683 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || |
| 709 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | 684 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) |
| 710 | rect_swap_helper(&surf_src); | 685 | swap(surf_src.height, surf_src.width); |
| 711 | 686 | ||
| 712 | pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction( | 687 | pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction( |
| 713 | surf_src.width, | 688 | surf_src.width, |
| @@ -744,351 +719,202 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) | |||
| 744 | pipe_ctx->plane_res.scl_data.ratios.vert_c, 19); | 719 | pipe_ctx->plane_res.scl_data.ratios.vert_c, 19); |
| 745 | } | 720 | } |
| 746 | 721 | ||
| 747 | static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full) | 722 | static inline void adjust_vp_and_init_for_seamless_clip( |
| 723 | bool flip_scan_dir, | ||
| 724 | int recout_skip, | ||
| 725 | int src_size, | ||
| 726 | int taps, | ||
| 727 | struct fixed31_32 ratio, | ||
| 728 | struct fixed31_32 *init, | ||
| 729 | int *vp_offset, | ||
| 730 | int *vp_size) | ||
| 748 | { | 731 | { |
| 749 | struct scaler_data *data = &pipe_ctx->plane_res.scl_data; | 732 | if (!flip_scan_dir) { |
| 750 | struct rect src = pipe_ctx->plane_state->src_rect; | ||
| 751 | int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 | ||
| 752 | || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; | ||
| 753 | bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; | ||
| 754 | |||
| 755 | /* | ||
| 756 | * Need to calculate the scan direction for viewport to make adjustments | ||
| 757 | */ | ||
| 758 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { | ||
| 759 | flip_vert_scan_dir = true; | ||
| 760 | flip_horz_scan_dir = true; | ||
| 761 | } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90) | ||
| 762 | flip_vert_scan_dir = true; | ||
| 763 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | ||
| 764 | flip_horz_scan_dir = true; | ||
| 765 | |||
| 766 | if (pipe_ctx->plane_state->horizontal_mirror) | ||
| 767 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
| 768 | |||
| 769 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || | ||
| 770 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { | ||
| 771 | rect_swap_helper(&src); | ||
| 772 | rect_swap_helper(&data->viewport_c); | ||
| 773 | rect_swap_helper(&data->viewport); | ||
| 774 | } | ||
| 775 | |||
| 776 | /* | ||
| 777 | * Init calculated according to formula: | ||
| 778 | * init = (scaling_ratio + number_of_taps + 1) / 2 | ||
| 779 | * init_bot = init + scaling_ratio | ||
| 780 | * init_c = init + truncated_vp_c_offset(from calculate viewport) | ||
| 781 | */ | ||
| 782 | data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int( | ||
| 783 | dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19); | ||
| 784 | |||
| 785 | data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int( | ||
| 786 | dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19); | ||
| 787 | |||
| 788 | data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int( | ||
| 789 | dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19); | ||
| 790 | |||
| 791 | data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int( | ||
| 792 | dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19); | ||
| 793 | |||
| 794 | if (!flip_horz_scan_dir) { | ||
| 795 | /* Adjust for viewport end clip-off */ | 733 | /* Adjust for viewport end clip-off */ |
| 796 | if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) { | 734 | if ((*vp_offset + *vp_size) < src_size) { |
| 797 | int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x; | 735 | int vp_clip = src_size - *vp_size - *vp_offset; |
| 798 | int int_part = dc_fixpt_floor( | 736 | int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio)); |
| 799 | dc_fixpt_sub(data->inits.h, data->ratios.horz)); | ||
| 800 | |||
| 801 | int_part = int_part > 0 ? int_part : 0; | ||
| 802 | data->viewport.width += int_part < vp_clip ? int_part : vp_clip; | ||
| 803 | } | ||
| 804 | if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) { | ||
| 805 | int vp_clip = (src.x + src.width) / vpc_div - | ||
| 806 | data->viewport_c.width - data->viewport_c.x; | ||
| 807 | int int_part = dc_fixpt_floor( | ||
| 808 | dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c)); | ||
| 809 | 737 | ||
| 810 | int_part = int_part > 0 ? int_part : 0; | 738 | int_part = int_part > 0 ? int_part : 0; |
| 811 | data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip; | 739 | *vp_size += int_part < vp_clip ? int_part : vp_clip; |
| 812 | } | 740 | } |
| 813 | 741 | ||
| 814 | /* Adjust for non-0 viewport offset */ | 742 | /* Adjust for non-0 viewport offset */ |
| 815 | if (data->viewport.x) { | 743 | if (*vp_offset) { |
| 816 | int int_part; | ||
| 817 | |||
| 818 | data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int( | ||
| 819 | data->ratios.horz, data->recout.x - recout_full->x)); | ||
| 820 | int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x; | ||
| 821 | if (int_part < data->taps.h_taps) { | ||
| 822 | int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ? | ||
| 823 | (data->taps.h_taps - int_part) : data->viewport.x; | ||
| 824 | data->viewport.x -= int_adj; | ||
| 825 | data->viewport.width += int_adj; | ||
| 826 | int_part += int_adj; | ||
| 827 | } else if (int_part > data->taps.h_taps) { | ||
| 828 | data->viewport.x += int_part - data->taps.h_taps; | ||
| 829 | data->viewport.width -= int_part - data->taps.h_taps; | ||
| 830 | int_part = data->taps.h_taps; | ||
| 831 | } | ||
| 832 | data->inits.h.value &= 0xffffffff; | ||
| 833 | data->inits.h = dc_fixpt_add_int(data->inits.h, int_part); | ||
| 834 | } | ||
| 835 | |||
| 836 | if (data->viewport_c.x) { | ||
| 837 | int int_part; | 744 | int int_part; |
| 838 | 745 | ||
| 839 | data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int( | 746 | *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip)); |
| 840 | data->ratios.horz_c, data->recout.x - recout_full->x)); | 747 | int_part = dc_fixpt_floor(*init) - *vp_offset; |
| 841 | int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x; | 748 | if (int_part < taps) { |
| 842 | if (int_part < data->taps.h_taps_c) { | 749 | int int_adj = *vp_offset >= (taps - int_part) ? |
| 843 | int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ? | 750 | (taps - int_part) : *vp_offset; |
| 844 | (data->taps.h_taps_c - int_part) : data->viewport_c.x; | 751 | *vp_offset -= int_adj; |
| 845 | data->viewport_c.x -= int_adj; | 752 | *vp_size += int_adj; |
| 846 | data->viewport_c.width += int_adj; | ||
| 847 | int_part += int_adj; | 753 | int_part += int_adj; |
| 848 | } else if (int_part > data->taps.h_taps_c) { | 754 | } else if (int_part > taps) { |
| 849 | data->viewport_c.x += int_part - data->taps.h_taps_c; | 755 | *vp_offset += int_part - taps; |
| 850 | data->viewport_c.width -= int_part - data->taps.h_taps_c; | 756 | *vp_size -= int_part - taps; |
| 851 | int_part = data->taps.h_taps_c; | 757 | int_part = taps; |
| 852 | } | 758 | } |
| 853 | data->inits.h_c.value &= 0xffffffff; | 759 | init->value &= 0xffffffff; |
| 854 | data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part); | 760 | *init = dc_fixpt_add_int(*init, int_part); |
| 855 | } | 761 | } |
| 856 | } else { | 762 | } else { |
| 857 | /* Adjust for non-0 viewport offset */ | 763 | /* Adjust for non-0 viewport offset */ |
| 858 | if (data->viewport.x) { | 764 | if (*vp_offset) { |
| 859 | int int_part = dc_fixpt_floor( | 765 | int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio)); |
| 860 | dc_fixpt_sub(data->inits.h, data->ratios.horz)); | ||
| 861 | 766 | ||
| 862 | int_part = int_part > 0 ? int_part : 0; | 767 | int_part = int_part > 0 ? int_part : 0; |
| 863 | data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x; | 768 | *vp_size += int_part < *vp_offset ? int_part : *vp_offset; |
| 864 | data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x; | 769 | *vp_offset -= int_part < *vp_offset ? int_part : *vp_offset; |
| 865 | } | ||
| 866 | if (data->viewport_c.x) { | ||
| 867 | int int_part = dc_fixpt_floor( | ||
| 868 | dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c)); | ||
| 869 | |||
| 870 | int_part = int_part > 0 ? int_part : 0; | ||
| 871 | data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x; | ||
| 872 | data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x; | ||
| 873 | } | 770 | } |
| 874 | 771 | ||
| 875 | /* Adjust for viewport end clip-off */ | 772 | /* Adjust for viewport end clip-off */ |
| 876 | if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) { | 773 | if ((*vp_offset + *vp_size) < src_size) { |
| 877 | int int_part; | 774 | int int_part; |
| 878 | int end_offset = src.x + src.width | 775 | int end_offset = src_size - *vp_offset - *vp_size; |
| 879 | - data->viewport.x - data->viewport.width; | ||
| 880 | 776 | ||
| 881 | /* | 777 | /* |
| 882 | * this is init if vp had no offset, keep in mind this is from the | 778 | * this is init if vp had no offset, keep in mind this is from the |
| 883 | * right side of vp due to scan direction | 779 | * right side of vp due to scan direction |
| 884 | */ | 780 | */ |
| 885 | data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int( | 781 | *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip)); |
| 886 | data->ratios.horz, data->recout.x - recout_full->x)); | ||
| 887 | /* | 782 | /* |
| 888 | * this is the difference between first pixel of viewport available to read | 783 | * this is the difference between first pixel of viewport available to read |
| 889 | * and init position, takning into account scan direction | 784 | * and init position, takning into account scan direction |
| 890 | */ | 785 | */ |
| 891 | int_part = dc_fixpt_floor(data->inits.h) - end_offset; | 786 | int_part = dc_fixpt_floor(*init) - end_offset; |
| 892 | if (int_part < data->taps.h_taps) { | 787 | if (int_part < taps) { |
| 893 | int int_adj = end_offset >= (data->taps.h_taps - int_part) ? | 788 | int int_adj = end_offset >= (taps - int_part) ? |
| 894 | (data->taps.h_taps - int_part) : end_offset; | 789 | (taps - int_part) : end_offset; |
| 895 | data->viewport.width += int_adj; | 790 | *vp_size += int_adj; |
| 896 | int_part += int_adj; | 791 | int_part += int_adj; |
| 897 | } else if (int_part > data->taps.h_taps) { | 792 | } else if (int_part > taps) { |
| 898 | data->viewport.width += int_part - data->taps.h_taps; | 793 | *vp_size += int_part - taps; |
| 899 | int_part = data->taps.h_taps; | 794 | int_part = taps; |
| 900 | } | 795 | } |
| 901 | data->inits.h.value &= 0xffffffff; | 796 | init->value &= 0xffffffff; |
| 902 | data->inits.h = dc_fixpt_add_int(data->inits.h, int_part); | 797 | *init = dc_fixpt_add_int(*init, int_part); |
| 903 | } | 798 | } |
| 904 | |||
| 905 | if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) { | ||
| 906 | int int_part; | ||
| 907 | int end_offset = (src.x + src.width) / vpc_div | ||
| 908 | - data->viewport_c.x - data->viewport_c.width; | ||
| 909 | |||
| 910 | /* | ||
| 911 | * this is init if vp had no offset, keep in mind this is from the | ||
| 912 | * right side of vp due to scan direction | ||
| 913 | */ | ||
| 914 | data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int( | ||
| 915 | data->ratios.horz_c, data->recout.x - recout_full->x)); | ||
| 916 | /* | ||
| 917 | * this is the difference between first pixel of viewport available to read | ||
| 918 | * and init position, takning into account scan direction | ||
| 919 | */ | ||
| 920 | int_part = dc_fixpt_floor(data->inits.h_c) - end_offset; | ||
| 921 | if (int_part < data->taps.h_taps_c) { | ||
| 922 | int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ? | ||
| 923 | (data->taps.h_taps_c - int_part) : end_offset; | ||
| 924 | data->viewport_c.width += int_adj; | ||
| 925 | int_part += int_adj; | ||
| 926 | } else if (int_part > data->taps.h_taps_c) { | ||
| 927 | data->viewport_c.width += int_part - data->taps.h_taps_c; | ||
| 928 | int_part = data->taps.h_taps_c; | ||
| 929 | } | ||
| 930 | data->inits.h_c.value &= 0xffffffff; | ||
| 931 | data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part); | ||
| 932 | } | ||
| 933 | |||
| 934 | } | 799 | } |
| 935 | if (!flip_vert_scan_dir) { | 800 | } |
| 936 | /* Adjust for viewport end clip-off */ | ||
| 937 | if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) { | ||
| 938 | int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y; | ||
| 939 | int int_part = dc_fixpt_floor( | ||
| 940 | dc_fixpt_sub(data->inits.v, data->ratios.vert)); | ||
| 941 | |||
| 942 | int_part = int_part > 0 ? int_part : 0; | ||
| 943 | data->viewport.height += int_part < vp_clip ? int_part : vp_clip; | ||
| 944 | } | ||
| 945 | if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) { | ||
| 946 | int vp_clip = (src.y + src.height) / vpc_div - | ||
| 947 | data->viewport_c.height - data->viewport_c.y; | ||
| 948 | int int_part = dc_fixpt_floor( | ||
| 949 | dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c)); | ||
| 950 | |||
| 951 | int_part = int_part > 0 ? int_part : 0; | ||
| 952 | data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip; | ||
| 953 | } | ||
| 954 | |||
| 955 | /* Adjust for non-0 viewport offset */ | ||
| 956 | if (data->viewport.y) { | ||
| 957 | int int_part; | ||
| 958 | |||
| 959 | data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int( | ||
| 960 | data->ratios.vert, data->recout.y - recout_full->y)); | ||
| 961 | int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y; | ||
| 962 | if (int_part < data->taps.v_taps) { | ||
| 963 | int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ? | ||
| 964 | (data->taps.v_taps - int_part) : data->viewport.y; | ||
| 965 | data->viewport.y -= int_adj; | ||
| 966 | data->viewport.height += int_adj; | ||
| 967 | int_part += int_adj; | ||
| 968 | } else if (int_part > data->taps.v_taps) { | ||
| 969 | data->viewport.y += int_part - data->taps.v_taps; | ||
| 970 | data->viewport.height -= int_part - data->taps.v_taps; | ||
| 971 | int_part = data->taps.v_taps; | ||
| 972 | } | ||
| 973 | data->inits.v.value &= 0xffffffff; | ||
| 974 | data->inits.v = dc_fixpt_add_int(data->inits.v, int_part); | ||
| 975 | } | ||
| 976 | |||
| 977 | if (data->viewport_c.y) { | ||
| 978 | int int_part; | ||
| 979 | 801 | ||
| 980 | data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int( | 802 | static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) |
| 981 | data->ratios.vert_c, data->recout.y - recout_full->y)); | 803 | { |
| 982 | int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y; | 804 | const struct dc_plane_state *plane_state = pipe_ctx->plane_state; |
| 983 | if (int_part < data->taps.v_taps_c) { | 805 | const struct dc_stream_state *stream = pipe_ctx->stream; |
| 984 | int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ? | 806 | struct scaler_data *data = &pipe_ctx->plane_res.scl_data; |
| 985 | (data->taps.v_taps_c - int_part) : data->viewport_c.y; | 807 | struct rect src = pipe_ctx->plane_state->src_rect; |
| 986 | data->viewport_c.y -= int_adj; | 808 | int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v; |
| 987 | data->viewport_c.height += int_adj; | 809 | int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 |
| 988 | int_part += int_adj; | 810 | || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; |
| 989 | } else if (int_part > data->taps.v_taps_c) { | 811 | bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; |
| 990 | data->viewport_c.y += int_part - data->taps.v_taps_c; | ||
| 991 | data->viewport_c.height -= int_part - data->taps.v_taps_c; | ||
| 992 | int_part = data->taps.v_taps_c; | ||
| 993 | } | ||
| 994 | data->inits.v_c.value &= 0xffffffff; | ||
| 995 | data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part); | ||
| 996 | } | ||
| 997 | } else { | ||
| 998 | /* Adjust for non-0 viewport offset */ | ||
| 999 | if (data->viewport.y) { | ||
| 1000 | int int_part = dc_fixpt_floor( | ||
| 1001 | dc_fixpt_sub(data->inits.v, data->ratios.vert)); | ||
| 1002 | 812 | ||
| 1003 | int_part = int_part > 0 ? int_part : 0; | 813 | /* |
| 1004 | data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y; | 814 | * Need to calculate the scan direction for viewport to make adjustments |
| 1005 | data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y; | 815 | */ |
| 1006 | } | 816 | get_vp_scan_direction( |
| 1007 | if (data->viewport_c.y) { | 817 | plane_state->rotation, |
| 1008 | int int_part = dc_fixpt_floor( | 818 | plane_state->horizontal_mirror, |
| 1009 | dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c)); | 819 | &orthogonal_rotation, |
| 820 | &flip_vert_scan_dir, | ||
| 821 | &flip_horz_scan_dir); | ||
| 822 | |||
| 823 | /* Calculate src rect rotation adjusted to recout space */ | ||
| 824 | surf_size_h = src.x + src.width; | ||
| 825 | surf_size_v = src.y + src.height; | ||
| 826 | if (flip_horz_scan_dir) | ||
| 827 | src.x = 0; | ||
| 828 | if (flip_vert_scan_dir) | ||
| 829 | src.y = 0; | ||
| 830 | if (orthogonal_rotation) { | ||
| 831 | swap(src.x, src.y); | ||
| 832 | swap(src.width, src.height); | ||
| 833 | } | ||
| 1010 | 834 | ||
| 1011 | int_part = int_part > 0 ? int_part : 0; | 835 | /* Recout matching initial vp offset = recout_offset - (stream dst offset + |
| 1012 | data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y; | 836 | * ((surf dst offset - stream src offset) * 1/ stream scaling ratio) |
| 1013 | data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y; | 837 | * - (surf surf_src offset * 1/ full scl ratio)) |
| 1014 | } | 838 | */ |
| 839 | recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x) | ||
| 840 | * stream->dst.width / stream->src.width - | ||
| 841 | src.x * plane_state->dst_rect.width / src.width | ||
| 842 | * stream->dst.width / stream->src.width); | ||
| 843 | recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y) | ||
| 844 | * stream->dst.height / stream->src.height - | ||
| 845 | src.y * plane_state->dst_rect.height / src.height | ||
| 846 | * stream->dst.height / stream->src.height); | ||
| 847 | if (orthogonal_rotation) | ||
| 848 | swap(recout_skip_h, recout_skip_v); | ||
| 849 | /* | ||
| 850 | * Init calculated according to formula: | ||
| 851 | * init = (scaling_ratio + number_of_taps + 1) / 2 | ||
| 852 | * init_bot = init + scaling_ratio | ||
| 853 | * init_c = init + truncated_vp_c_offset(from calculate viewport) | ||
| 854 | */ | ||
| 855 | data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int( | ||
| 856 | dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19); | ||
| 1015 | 857 | ||
| 1016 | /* Adjust for viewport end clip-off */ | 858 | data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int( |
| 1017 | if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) { | 859 | dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19); |
| 1018 | int int_part; | ||
| 1019 | int end_offset = src.y + src.height | ||
| 1020 | - data->viewport.y - data->viewport.height; | ||
| 1021 | 860 | ||
| 1022 | /* | 861 | data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int( |
| 1023 | * this is init if vp had no offset, keep in mind this is from the | 862 | dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19); |
| 1024 | * right side of vp due to scan direction | ||
| 1025 | */ | ||
| 1026 | data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int( | ||
| 1027 | data->ratios.vert, data->recout.y - recout_full->y)); | ||
| 1028 | /* | ||
| 1029 | * this is the difference between first pixel of viewport available to read | ||
| 1030 | * and init position, taking into account scan direction | ||
| 1031 | */ | ||
| 1032 | int_part = dc_fixpt_floor(data->inits.v) - end_offset; | ||
| 1033 | if (int_part < data->taps.v_taps) { | ||
| 1034 | int int_adj = end_offset >= (data->taps.v_taps - int_part) ? | ||
| 1035 | (data->taps.v_taps - int_part) : end_offset; | ||
| 1036 | data->viewport.height += int_adj; | ||
| 1037 | int_part += int_adj; | ||
| 1038 | } else if (int_part > data->taps.v_taps) { | ||
| 1039 | data->viewport.height += int_part - data->taps.v_taps; | ||
| 1040 | int_part = data->taps.v_taps; | ||
| 1041 | } | ||
| 1042 | data->inits.v.value &= 0xffffffff; | ||
| 1043 | data->inits.v = dc_fixpt_add_int(data->inits.v, int_part); | ||
| 1044 | } | ||
| 1045 | 863 | ||
| 1046 | if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) { | 864 | data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int( |
| 1047 | int int_part; | 865 | dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19); |
| 1048 | int end_offset = (src.y + src.height) / vpc_div | ||
| 1049 | - data->viewport_c.y - data->viewport_c.height; | ||
| 1050 | 866 | ||
| 1051 | /* | 867 | /* |
| 1052 | * this is init if vp had no offset, keep in mind this is from the | 868 | * Taps, inits and scaling ratios are in recout space need to rotate |
| 1053 | * right side of vp due to scan direction | 869 | * to viewport rotation before adjustment |
| 1054 | */ | 870 | */ |
| 1055 | data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int( | 871 | adjust_vp_and_init_for_seamless_clip( |
| 1056 | data->ratios.vert_c, data->recout.y - recout_full->y)); | 872 | flip_horz_scan_dir, |
| 1057 | /* | 873 | recout_skip_h, |
| 1058 | * this is the difference between first pixel of viewport available to read | 874 | surf_size_h, |
| 1059 | * and init position, taking into account scan direction | 875 | orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps, |
| 1060 | */ | 876 | orthogonal_rotation ? data->ratios.vert : data->ratios.horz, |
| 1061 | int_part = dc_fixpt_floor(data->inits.v_c) - end_offset; | 877 | orthogonal_rotation ? &data->inits.v : &data->inits.h, |
| 1062 | if (int_part < data->taps.v_taps_c) { | 878 | &data->viewport.x, |
| 1063 | int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ? | 879 | &data->viewport.width); |
| 1064 | (data->taps.v_taps_c - int_part) : end_offset; | 880 | adjust_vp_and_init_for_seamless_clip( |
| 1065 | data->viewport_c.height += int_adj; | 881 | flip_horz_scan_dir, |
| 1066 | int_part += int_adj; | 882 | recout_skip_h, |
| 1067 | } else if (int_part > data->taps.v_taps_c) { | 883 | surf_size_h / vpc_div, |
| 1068 | data->viewport_c.height += int_part - data->taps.v_taps_c; | 884 | orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c, |
| 1069 | int_part = data->taps.v_taps_c; | 885 | orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c, |
| 1070 | } | 886 | orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c, |
| 1071 | data->inits.v_c.value &= 0xffffffff; | 887 | &data->viewport_c.x, |
| 1072 | data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part); | 888 | &data->viewport_c.width); |
| 1073 | } | 889 | adjust_vp_and_init_for_seamless_clip( |
| 1074 | } | 890 | flip_vert_scan_dir, |
| 891 | recout_skip_v, | ||
| 892 | surf_size_v, | ||
| 893 | orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps, | ||
| 894 | orthogonal_rotation ? data->ratios.horz : data->ratios.vert, | ||
| 895 | orthogonal_rotation ? &data->inits.h : &data->inits.v, | ||
| 896 | &data->viewport.y, | ||
| 897 | &data->viewport.height); | ||
| 898 | adjust_vp_and_init_for_seamless_clip( | ||
| 899 | flip_vert_scan_dir, | ||
| 900 | recout_skip_v, | ||
| 901 | surf_size_v / vpc_div, | ||
| 902 | orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c, | ||
| 903 | orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c, | ||
| 904 | orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c, | ||
| 905 | &data->viewport_c.y, | ||
| 906 | &data->viewport_c.height); | ||
| 1075 | 907 | ||
| 1076 | /* Interlaced inits based on final vert inits */ | 908 | /* Interlaced inits based on final vert inits */ |
| 1077 | data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert); | 909 | data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert); |
| 1078 | data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); | 910 | data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); |
| 1079 | 911 | ||
| 1080 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || | ||
| 1081 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { | ||
| 1082 | rect_swap_helper(&data->viewport_c); | ||
| 1083 | rect_swap_helper(&data->viewport); | ||
| 1084 | } | ||
| 1085 | } | 912 | } |
| 1086 | 913 | ||
| 1087 | bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | 914 | bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) |
| 1088 | { | 915 | { |
| 1089 | const struct dc_plane_state *plane_state = pipe_ctx->plane_state; | 916 | const struct dc_plane_state *plane_state = pipe_ctx->plane_state; |
| 1090 | struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; | 917 | struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; |
| 1091 | struct rect recout_full = { 0 }; | ||
| 1092 | bool res = false; | 918 | bool res = false; |
| 1093 | DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); | 919 | DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); |
| 1094 | /* Important: scaling ratio calculation requires pixel format, | 920 | /* Important: scaling ratio calculation requires pixel format, |
| @@ -1105,7 +931,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
| 1105 | if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) | 931 | if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) |
| 1106 | return false; | 932 | return false; |
| 1107 | 933 | ||
| 1108 | calculate_recout(pipe_ctx, &recout_full); | 934 | calculate_recout(pipe_ctx); |
| 1109 | 935 | ||
| 1110 | /** | 936 | /** |
| 1111 | * Setting line buffer pixel depth to 24bpp yields banding | 937 | * Setting line buffer pixel depth to 24bpp yields banding |
| @@ -1146,7 +972,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
| 1146 | 972 | ||
| 1147 | if (res) | 973 | if (res) |
| 1148 | /* May need to re-check lb size after this in some obscure scenario */ | 974 | /* May need to re-check lb size after this in some obscure scenario */ |
| 1149 | calculate_inits_and_adj_vp(pipe_ctx, &recout_full); | 975 | calculate_inits_and_adj_vp(pipe_ctx); |
| 1150 | 976 | ||
| 1151 | DC_LOG_SCALER( | 977 | DC_LOG_SCALER( |
| 1152 | "%s: Viewport:\nheight:%d width:%d x:%d " | 978 | "%s: Viewport:\nheight:%d width:%d x:%d " |
| @@ -1356,6 +1182,9 @@ bool dc_add_plane_to_context( | |||
| 1356 | return false; | 1182 | return false; |
| 1357 | } | 1183 | } |
| 1358 | 1184 | ||
| 1185 | tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream); | ||
| 1186 | ASSERT(tail_pipe); | ||
| 1187 | |||
| 1359 | free_pipe = acquire_free_pipe_for_stream(context, pool, stream); | 1188 | free_pipe = acquire_free_pipe_for_stream(context, pool, stream); |
| 1360 | 1189 | ||
| 1361 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) | 1190 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
| @@ -1373,10 +1202,6 @@ bool dc_add_plane_to_context( | |||
| 1373 | free_pipe->plane_state = plane_state; | 1202 | free_pipe->plane_state = plane_state; |
| 1374 | 1203 | ||
| 1375 | if (head_pipe != free_pipe) { | 1204 | if (head_pipe != free_pipe) { |
| 1376 | |||
| 1377 | tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream); | ||
| 1378 | ASSERT(tail_pipe); | ||
| 1379 | |||
| 1380 | free_pipe->stream_res.tg = tail_pipe->stream_res.tg; | 1205 | free_pipe->stream_res.tg = tail_pipe->stream_res.tg; |
| 1381 | free_pipe->stream_res.abm = tail_pipe->stream_res.abm; | 1206 | free_pipe->stream_res.abm = tail_pipe->stream_res.abm; |
| 1382 | free_pipe->stream_res.opp = tail_pipe->stream_res.opp; | 1207 | free_pipe->stream_res.opp = tail_pipe->stream_res.opp; |
| @@ -1796,11 +1621,11 @@ enum dc_status dc_add_stream_to_ctx( | |||
| 1796 | struct dc_state *new_ctx, | 1621 | struct dc_state *new_ctx, |
| 1797 | struct dc_stream_state *stream) | 1622 | struct dc_stream_state *stream) |
| 1798 | { | 1623 | { |
| 1799 | struct dc_context *dc_ctx = dc->ctx; | ||
| 1800 | enum dc_status res; | 1624 | enum dc_status res; |
| 1625 | DC_LOGGER_INIT(dc->ctx->logger); | ||
| 1801 | 1626 | ||
| 1802 | if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { | 1627 | if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { |
| 1803 | DC_ERROR("Max streams reached, can't add stream %p !\n", stream); | 1628 | DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); |
| 1804 | return DC_ERROR_UNEXPECTED; | 1629 | return DC_ERROR_UNEXPECTED; |
| 1805 | } | 1630 | } |
| 1806 | 1631 | ||
| @@ -1810,7 +1635,7 @@ enum dc_status dc_add_stream_to_ctx( | |||
| 1810 | 1635 | ||
| 1811 | res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream); | 1636 | res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream); |
| 1812 | if (res != DC_OK) | 1637 | if (res != DC_OK) |
| 1813 | DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res); | 1638 | DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); |
| 1814 | 1639 | ||
| 1815 | return res; | 1640 | return res; |
| 1816 | } | 1641 | } |
| @@ -1976,6 +1801,8 @@ enum dc_status resource_map_pool_resources( | |||
| 1976 | } | 1801 | } |
| 1977 | */ | 1802 | */ |
| 1978 | 1803 | ||
| 1804 | calculate_phy_pix_clks(stream); | ||
| 1805 | |||
| 1979 | /* acquire new resources */ | 1806 | /* acquire new resources */ |
| 1980 | pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); | 1807 | pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); |
| 1981 | 1808 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index e113439aaa86..780838a05f44 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
| @@ -100,8 +100,6 @@ static void construct(struct dc_stream_state *stream, | |||
| 100 | /* EDID CAP translation for HDMI 2.0 */ | 100 | /* EDID CAP translation for HDMI 2.0 */ |
| 101 | stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; | 101 | stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; |
| 102 | 102 | ||
| 103 | stream->status.link = stream->sink->link; | ||
| 104 | |||
| 105 | update_stream_signal(stream); | 103 | update_stream_signal(stream); |
| 106 | 104 | ||
| 107 | stream->out_transfer_func = dc_create_transfer_func(); | 105 | stream->out_transfer_func = dc_create_transfer_func(); |
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index d16a20c84792..dea8bc39c688 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
| @@ -36,9 +36,10 @@ | |||
| 36 | 36 | ||
| 37 | #include "inc/hw_sequencer.h" | 37 | #include "inc/hw_sequencer.h" |
| 38 | #include "inc/compressor.h" | 38 | #include "inc/compressor.h" |
| 39 | #include "inc/hw/dmcu.h" | ||
| 39 | #include "dml/display_mode_lib.h" | 40 | #include "dml/display_mode_lib.h" |
| 40 | 41 | ||
| 41 | #define DC_VER "3.2.04" | 42 | #define DC_VER "3.2.06" |
| 42 | 43 | ||
| 43 | #define MAX_SURFACES 3 | 44 | #define MAX_SURFACES 3 |
| 44 | #define MAX_STREAMS 6 | 45 | #define MAX_STREAMS 6 |
| @@ -47,13 +48,6 @@ | |||
| 47 | /******************************************************************************* | 48 | /******************************************************************************* |
| 48 | * Display Core Interfaces | 49 | * Display Core Interfaces |
| 49 | ******************************************************************************/ | 50 | ******************************************************************************/ |
| 50 | struct dmcu_version { | ||
| 51 | unsigned int date; | ||
| 52 | unsigned int month; | ||
| 53 | unsigned int year; | ||
| 54 | unsigned int interface_version; | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct dc_versions { | 51 | struct dc_versions { |
| 58 | const char *dc_ver; | 52 | const char *dc_ver; |
| 59 | struct dmcu_version dmcu_version; | 53 | struct dmcu_version dmcu_version; |
| @@ -748,5 +742,6 @@ void dc_set_power_state( | |||
| 748 | struct dc *dc, | 742 | struct dc *dc, |
| 749 | enum dc_acpi_cm_power_state power_state); | 743 | enum dc_acpi_cm_power_state power_state); |
| 750 | void dc_resume(struct dc *dc); | 744 | void dc_resume(struct dc *dc); |
| 745 | bool dc_is_dmcu_initialized(struct dc *dc); | ||
| 751 | 746 | ||
| 752 | #endif /* DC_INTERFACE_H_ */ | 747 | #endif /* DC_INTERFACE_H_ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index c5bd1fbb6982..771d9f17e26e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h | |||
| @@ -104,8 +104,6 @@ struct dc_stream_state { | |||
| 104 | bool dpms_off; | 104 | bool dpms_off; |
| 105 | bool apply_edp_fast_boot_optimization; | 105 | bool apply_edp_fast_boot_optimization; |
| 106 | 106 | ||
| 107 | struct dc_stream_status status; | ||
| 108 | |||
| 109 | struct dc_cursor_attributes cursor_attributes; | 107 | struct dc_cursor_attributes cursor_attributes; |
| 110 | struct dc_cursor_position cursor_position; | 108 | struct dc_cursor_position cursor_position; |
| 111 | uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode | 109 | uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index 9a28a04417d1..bd22f51813bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | |||
| @@ -94,7 +94,7 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = { | |||
| 94 | /*ClocksStatePerformance*/ | 94 | /*ClocksStatePerformance*/ |
| 95 | { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; | 95 | { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; |
| 96 | 96 | ||
| 97 | static int dentist_get_divider_from_did(int did) | 97 | int dentist_get_divider_from_did(int did) |
| 98 | { | 98 | { |
| 99 | if (did < DENTIST_BASE_DID_1) | 99 | if (did < DENTIST_BASE_DID_1) |
| 100 | did = DENTIST_BASE_DID_1; | 100 | did = DENTIST_BASE_DID_1; |
| @@ -277,7 +277,8 @@ static int dce_set_clock( | |||
| 277 | if (requested_clk_khz == 0) | 277 | if (requested_clk_khz == 0) |
| 278 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | 278 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; |
| 279 | 279 | ||
| 280 | dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); | 280 | if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) |
| 281 | dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); | ||
| 281 | 282 | ||
| 282 | return actual_clock; | 283 | return actual_clock; |
| 283 | } | 284 | } |
| @@ -324,9 +325,11 @@ int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) | |||
| 324 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | 325 | bp->funcs->set_dce_clock(bp, &dce_clk_params); |
| 325 | 326 | ||
| 326 | if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { | 327 | if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { |
| 327 | if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) | 328 | if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { |
| 328 | dmcu->funcs->set_psr_wait_loop(dmcu, | 329 | if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) |
| 329 | actual_clock / 1000 / 7); | 330 | dmcu->funcs->set_psr_wait_loop(dmcu, |
| 331 | actual_clock / 1000 / 7); | ||
| 332 | } | ||
| 330 | } | 333 | } |
| 331 | 334 | ||
| 332 | clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; | 335 | clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; |
| @@ -588,6 +591,8 @@ static void dce11_pplib_apply_display_requirements( | |||
| 588 | dc, | 591 | dc, |
| 589 | context->bw.dce.sclk_khz); | 592 | context->bw.dce.sclk_khz); |
| 590 | 593 | ||
| 594 | pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; | ||
| 595 | |||
| 591 | pp_display_cfg->min_engine_clock_deep_sleep_khz | 596 | pp_display_cfg->min_engine_clock_deep_sleep_khz |
| 592 | = context->bw.dce.sclk_deep_sleep_khz; | 597 | = context->bw.dce.sclk_deep_sleep_khz; |
| 593 | 598 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h index 046077797416..3bceb31d910d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h | |||
| @@ -165,4 +165,6 @@ struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx); | |||
| 165 | 165 | ||
| 166 | void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); | 166 | void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); |
| 167 | 167 | ||
| 168 | int dentist_get_divider_from_did(int did); | ||
| 169 | |||
| 168 | #endif /* _DCE_CLK_MGR_H_ */ | 170 | #endif /* _DCE_CLK_MGR_H_ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index c47c81883d3c..cce0d18f91da 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | |||
| @@ -908,7 +908,6 @@ static void dce110_stream_encoder_dp_blank( | |||
| 908 | struct stream_encoder *enc) | 908 | struct stream_encoder *enc) |
| 909 | { | 909 | { |
| 910 | struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); | 910 | struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); |
| 911 | uint32_t retries = 0; | ||
| 912 | uint32_t reg1 = 0; | 911 | uint32_t reg1 = 0; |
| 913 | uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; | 912 | uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; |
| 914 | 913 | ||
| @@ -926,30 +925,28 @@ static void dce110_stream_encoder_dp_blank( | |||
| 926 | * (2 = start of the next vertical blank) */ | 925 | * (2 = start of the next vertical blank) */ |
| 927 | REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); | 926 | REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); |
| 928 | /* Larger delay to wait until VBLANK - use max retry of | 927 | /* Larger delay to wait until VBLANK - use max retry of |
| 929 | * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode + | 928 | * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode + |
| 930 | * a little more because we may not trust delay accuracy. | 929 | * a little more because we may not trust delay accuracy. |
| 931 | */ | 930 | */ |
| 932 | max_retries = DP_BLANK_MAX_RETRY * 150; | 931 | max_retries = DP_BLANK_MAX_RETRY * 150; |
| 933 | 932 | ||
| 934 | /* disable DP stream */ | 933 | /* disable DP stream */ |
| 935 | REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); | 934 | REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); |
| 936 | 935 | ||
| 937 | /* the encoder stops sending the video stream | 936 | /* the encoder stops sending the video stream |
| 938 | * at the start of the vertical blanking. | 937 | * at the start of the vertical blanking. |
| 939 | * Poll for DP_VID_STREAM_STATUS == 0 | 938 | * Poll for DP_VID_STREAM_STATUS == 0 |
| 940 | */ | 939 | */ |
| 941 | 940 | ||
| 942 | REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, | 941 | REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, |
| 943 | 0, | 942 | 0, |
| 944 | 10, max_retries); | 943 | 10, max_retries); |
| 945 | 944 | ||
| 946 | ASSERT(retries <= max_retries); | ||
| 947 | |||
| 948 | /* Tell the DP encoder to ignore timing from CRTC, must be done after | 945 | /* Tell the DP encoder to ignore timing from CRTC, must be done after |
| 949 | * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is | 946 | * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is |
| 950 | * complete, stream status will be stuck in video stream enabled state, | 947 | * complete, stream status will be stuck in video stream enabled state, |
| 951 | * i.e. DP_VID_STREAM_STATUS stuck at 1. | 948 | * i.e. DP_VID_STREAM_STATUS stuck at 1. |
| 952 | */ | 949 | */ |
| 953 | 950 | ||
| 954 | REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); | 951 | REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); |
| 955 | } | 952 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 9724a17e352b..2f062bacd78a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
| @@ -2282,7 +2282,7 @@ static void dce110_enable_per_frame_crtc_position_reset( | |||
| 2282 | int i; | 2282 | int i; |
| 2283 | 2283 | ||
| 2284 | gsl_params.gsl_group = 0; | 2284 | gsl_params.gsl_group = 0; |
| 2285 | gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst; | 2285 | gsl_params.gsl_master = 0; |
| 2286 | 2286 | ||
| 2287 | for (i = 0; i < group_size; i++) | 2287 | for (i = 0; i < group_size; i++) |
| 2288 | grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock( | 2288 | grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock( |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 6d40b3d54ac1..cdd1d6b7b9f2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
| @@ -41,7 +41,6 @@ | |||
| 41 | #include "dce/dce_mem_input.h" | 41 | #include "dce/dce_mem_input.h" |
| 42 | #include "dce/dce_link_encoder.h" | 42 | #include "dce/dce_link_encoder.h" |
| 43 | #include "dce/dce_stream_encoder.h" | 43 | #include "dce/dce_stream_encoder.h" |
| 44 | #include "dce/dce_mem_input.h" | ||
| 45 | #include "dce/dce_ipp.h" | 44 | #include "dce/dce_ipp.h" |
| 46 | #include "dce/dce_transform.h" | 45 | #include "dce/dce_transform.h" |
| 47 | #include "dce/dce_opp.h" | 46 | #include "dce/dce_opp.h" |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c index 20f531d27e2b..f9d7d2c26cc2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c | |||
| @@ -223,7 +223,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, | |||
| 223 | &dc->res_pool->pp_smu_req; | 223 | &dc->res_pool->pp_smu_req; |
| 224 | struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; | 224 | struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; |
| 225 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | 225 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; |
| 226 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | 226 | uint32_t requested_dcf_clock_in_khz = 0; |
| 227 | bool send_request_to_increase = false; | 227 | bool send_request_to_increase = false; |
| 228 | bool send_request_to_lower = false; | 228 | bool send_request_to_lower = false; |
| 229 | int display_count; | 229 | int display_count; |
| @@ -263,8 +263,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, | |||
| 263 | // F Clock | 263 | // F Clock |
| 264 | if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { | 264 | if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { |
| 265 | clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; | 265 | clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; |
| 266 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; | ||
| 267 | clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; | ||
| 268 | smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; | 266 | smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; |
| 269 | 267 | ||
| 270 | notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz); | 268 | notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz); |
| @@ -293,10 +291,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, | |||
| 293 | */ | 291 | */ |
| 294 | if (send_request_to_increase) { | 292 | if (send_request_to_increase) { |
| 295 | /*use dcfclk to request voltage*/ | 293 | /*use dcfclk to request voltage*/ |
| 296 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | 294 | requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); |
| 297 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
| 298 | 295 | ||
| 299 | notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); | 296 | notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); |
| 300 | 297 | ||
| 301 | if (pp_smu->set_display_requirement) | 298 | if (pp_smu->set_display_requirement) |
| 302 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | 299 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); |
| @@ -317,10 +314,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, | |||
| 317 | 314 | ||
| 318 | if (!send_request_to_increase && send_request_to_lower) { | 315 | if (!send_request_to_increase && send_request_to_lower) { |
| 319 | /*use dcfclk to request voltage*/ | 316 | /*use dcfclk to request voltage*/ |
| 320 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | 317 | requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); |
| 321 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
| 322 | 318 | ||
| 323 | notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); | 319 | notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); |
| 324 | 320 | ||
| 325 | if (pp_smu->set_display_requirement) | 321 | if (pp_smu->set_display_requirement) |
| 326 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | 322 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 4254e7e1a509..c7d1e678ebf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | |||
| @@ -100,7 +100,7 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub) | |||
| 100 | REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, | 100 | REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, |
| 101 | DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable); | 101 | DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable); |
| 102 | 102 | ||
| 103 | return true ? false : enable; | 103 | return enable ? true : false; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | 106 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 74132a1f3046..345af015d061 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | |||
| @@ -99,6 +99,14 @@ static unsigned int hubp1_get_underflow_status(struct hubp *hubp) | |||
| 99 | return hubp_underflow; | 99 | return hubp_underflow; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | |||
| 103 | void hubp1_clear_underflow(struct hubp *hubp) | ||
| 104 | { | ||
| 105 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); | ||
| 106 | |||
| 107 | REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1); | ||
| 108 | } | ||
| 109 | |||
| 102 | static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank) | 110 | static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank) |
| 103 | { | 111 | { |
| 104 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); | 112 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); |
| @@ -565,19 +573,6 @@ void hubp1_program_deadline( | |||
| 565 | REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler, | 573 | REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler, |
| 566 | DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler); | 574 | DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler); |
| 567 | 575 | ||
| 568 | if (REG(PREFETCH_SETTINS)) | ||
| 569 | REG_SET_2(PREFETCH_SETTINS, 0, | ||
| 570 | DST_Y_PREFETCH, dlg_attr->dst_y_prefetch, | ||
| 571 | VRATIO_PREFETCH, dlg_attr->vratio_prefetch); | ||
| 572 | else | ||
| 573 | REG_SET_2(PREFETCH_SETTINGS, 0, | ||
| 574 | DST_Y_PREFETCH, dlg_attr->dst_y_prefetch, | ||
| 575 | VRATIO_PREFETCH, dlg_attr->vratio_prefetch); | ||
| 576 | |||
| 577 | REG_SET_2(VBLANK_PARAMETERS_0, 0, | ||
| 578 | DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank, | ||
| 579 | DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank); | ||
| 580 | |||
| 581 | REG_SET(REF_FREQ_TO_PIX_FREQ, 0, | 576 | REG_SET(REF_FREQ_TO_PIX_FREQ, 0, |
| 582 | REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq); | 577 | REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq); |
| 583 | 578 | ||
| @@ -585,9 +580,6 @@ void hubp1_program_deadline( | |||
| 585 | REG_SET(VBLANK_PARAMETERS_1, 0, | 580 | REG_SET(VBLANK_PARAMETERS_1, 0, |
| 586 | REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l); | 581 | REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l); |
| 587 | 582 | ||
| 588 | REG_SET(VBLANK_PARAMETERS_3, 0, | ||
| 589 | REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l); | ||
| 590 | |||
| 591 | if (REG(NOM_PARAMETERS_0)) | 583 | if (REG(NOM_PARAMETERS_0)) |
| 592 | REG_SET(NOM_PARAMETERS_0, 0, | 584 | REG_SET(NOM_PARAMETERS_0, 0, |
| 593 | DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l); | 585 | DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l); |
| @@ -602,27 +594,13 @@ void hubp1_program_deadline( | |||
| 602 | REG_SET(NOM_PARAMETERS_5, 0, | 594 | REG_SET(NOM_PARAMETERS_5, 0, |
| 603 | REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l); | 595 | REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l); |
| 604 | 596 | ||
| 605 | REG_SET_2(PER_LINE_DELIVERY_PRE, 0, | ||
| 606 | REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l, | ||
| 607 | REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c); | ||
| 608 | |||
| 609 | REG_SET_2(PER_LINE_DELIVERY, 0, | 597 | REG_SET_2(PER_LINE_DELIVERY, 0, |
| 610 | REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l, | 598 | REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l, |
| 611 | REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c); | 599 | REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c); |
| 612 | 600 | ||
| 613 | if (REG(PREFETCH_SETTINS_C)) | ||
| 614 | REG_SET(PREFETCH_SETTINS_C, 0, | ||
| 615 | VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c); | ||
| 616 | else | ||
| 617 | REG_SET(PREFETCH_SETTINGS_C, 0, | ||
| 618 | VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c); | ||
| 619 | |||
| 620 | REG_SET(VBLANK_PARAMETERS_2, 0, | 601 | REG_SET(VBLANK_PARAMETERS_2, 0, |
| 621 | REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c); | 602 | REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c); |
| 622 | 603 | ||
| 623 | REG_SET(VBLANK_PARAMETERS_4, 0, | ||
| 624 | REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c); | ||
| 625 | |||
| 626 | if (REG(NOM_PARAMETERS_2)) | 604 | if (REG(NOM_PARAMETERS_2)) |
| 627 | REG_SET(NOM_PARAMETERS_2, 0, | 605 | REG_SET(NOM_PARAMETERS_2, 0, |
| 628 | DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c); | 606 | DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c); |
| @@ -642,10 +620,6 @@ void hubp1_program_deadline( | |||
| 642 | QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm, | 620 | QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm, |
| 643 | QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm); | 621 | QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm); |
| 644 | 622 | ||
| 645 | REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0, | ||
| 646 | MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank, | ||
| 647 | QoS_LEVEL_FLIP, ttu_attr->qos_level_flip); | ||
| 648 | |||
| 649 | /* TTU - per luma/chroma */ | 623 | /* TTU - per luma/chroma */ |
| 650 | /* Assumed surf0 is luma and 1 is chroma */ | 624 | /* Assumed surf0 is luma and 1 is chroma */ |
| 651 | 625 | ||
| @@ -654,25 +628,15 @@ void hubp1_program_deadline( | |||
| 654 | QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l, | 628 | QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l, |
| 655 | QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l); | 629 | QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l); |
| 656 | 630 | ||
| 657 | REG_SET(DCN_SURF0_TTU_CNTL1, 0, | ||
| 658 | REFCYC_PER_REQ_DELIVERY_PRE, | ||
| 659 | ttu_attr->refcyc_per_req_delivery_pre_l); | ||
| 660 | |||
| 661 | REG_SET_3(DCN_SURF1_TTU_CNTL0, 0, | 631 | REG_SET_3(DCN_SURF1_TTU_CNTL0, 0, |
| 662 | REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c, | 632 | REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c, |
| 663 | QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c, | 633 | QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c, |
| 664 | QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c); | 634 | QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c); |
| 665 | 635 | ||
| 666 | REG_SET(DCN_SURF1_TTU_CNTL1, 0, | ||
| 667 | REFCYC_PER_REQ_DELIVERY_PRE, | ||
| 668 | ttu_attr->refcyc_per_req_delivery_pre_c); | ||
| 669 | |||
| 670 | REG_SET_3(DCN_CUR0_TTU_CNTL0, 0, | 636 | REG_SET_3(DCN_CUR0_TTU_CNTL0, 0, |
| 671 | REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0, | 637 | REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0, |
| 672 | QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0, | 638 | QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0, |
| 673 | QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0); | 639 | QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0); |
| 674 | REG_SET(DCN_CUR0_TTU_CNTL1, 0, | ||
| 675 | REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0); | ||
| 676 | } | 640 | } |
| 677 | 641 | ||
| 678 | static void hubp1_setup( | 642 | static void hubp1_setup( |
| @@ -690,6 +654,48 @@ static void hubp1_setup( | |||
| 690 | hubp1_vready_workaround(hubp, pipe_dest); | 654 | hubp1_vready_workaround(hubp, pipe_dest); |
| 691 | } | 655 | } |
| 692 | 656 | ||
| 657 | static void hubp1_setup_interdependent( | ||
| 658 | struct hubp *hubp, | ||
| 659 | struct _vcs_dpi_display_dlg_regs_st *dlg_attr, | ||
| 660 | struct _vcs_dpi_display_ttu_regs_st *ttu_attr) | ||
| 661 | { | ||
| 662 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); | ||
| 663 | |||
| 664 | REG_SET_2(PREFETCH_SETTINS, 0, | ||
| 665 | DST_Y_PREFETCH, dlg_attr->dst_y_prefetch, | ||
| 666 | VRATIO_PREFETCH, dlg_attr->vratio_prefetch); | ||
| 667 | |||
| 668 | REG_SET(PREFETCH_SETTINS_C, 0, | ||
| 669 | VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c); | ||
| 670 | |||
| 671 | REG_SET_2(VBLANK_PARAMETERS_0, 0, | ||
| 672 | DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank, | ||
| 673 | DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank); | ||
| 674 | |||
| 675 | REG_SET(VBLANK_PARAMETERS_3, 0, | ||
| 676 | REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l); | ||
| 677 | |||
| 678 | REG_SET(VBLANK_PARAMETERS_4, 0, | ||
| 679 | REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c); | ||
| 680 | |||
| 681 | REG_SET_2(PER_LINE_DELIVERY_PRE, 0, | ||
| 682 | REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l, | ||
| 683 | REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c); | ||
| 684 | |||
| 685 | REG_SET(DCN_SURF0_TTU_CNTL1, 0, | ||
| 686 | REFCYC_PER_REQ_DELIVERY_PRE, | ||
| 687 | ttu_attr->refcyc_per_req_delivery_pre_l); | ||
| 688 | REG_SET(DCN_SURF1_TTU_CNTL1, 0, | ||
| 689 | REFCYC_PER_REQ_DELIVERY_PRE, | ||
| 690 | ttu_attr->refcyc_per_req_delivery_pre_c); | ||
| 691 | REG_SET(DCN_CUR0_TTU_CNTL1, 0, | ||
| 692 | REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0); | ||
| 693 | |||
| 694 | REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0, | ||
| 695 | MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank, | ||
| 696 | QoS_LEVEL_FLIP, ttu_attr->qos_level_flip); | ||
| 697 | } | ||
| 698 | |||
| 693 | bool hubp1_is_flip_pending(struct hubp *hubp) | 699 | bool hubp1_is_flip_pending(struct hubp *hubp) |
| 694 | { | 700 | { |
| 695 | uint32_t flip_pending = 0; | 701 | uint32_t flip_pending = 0; |
| @@ -1178,6 +1184,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { | |||
| 1178 | hubp1_program_surface_config, | 1184 | hubp1_program_surface_config, |
| 1179 | .hubp_is_flip_pending = hubp1_is_flip_pending, | 1185 | .hubp_is_flip_pending = hubp1_is_flip_pending, |
| 1180 | .hubp_setup = hubp1_setup, | 1186 | .hubp_setup = hubp1_setup, |
| 1187 | .hubp_setup_interdependent = hubp1_setup_interdependent, | ||
| 1181 | .hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings, | 1188 | .hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings, |
| 1182 | .hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings, | 1189 | .hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings, |
| 1183 | .set_blank = hubp1_set_blank, | 1190 | .set_blank = hubp1_set_blank, |
| @@ -1190,6 +1197,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { | |||
| 1190 | .hubp_clk_cntl = hubp1_clk_cntl, | 1197 | .hubp_clk_cntl = hubp1_clk_cntl, |
| 1191 | .hubp_vtg_sel = hubp1_vtg_sel, | 1198 | .hubp_vtg_sel = hubp1_vtg_sel, |
| 1192 | .hubp_read_state = hubp1_read_state, | 1199 | .hubp_read_state = hubp1_read_state, |
| 1200 | .hubp_clear_underflow = hubp1_clear_underflow, | ||
| 1193 | .hubp_disable_control = hubp1_disable_control, | 1201 | .hubp_disable_control = hubp1_disable_control, |
| 1194 | .hubp_get_underflow_status = hubp1_get_underflow_status, | 1202 | .hubp_get_underflow_status = hubp1_get_underflow_status, |
| 1195 | 1203 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 4890273b632b..62d4232e7796 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | |||
| @@ -251,6 +251,7 @@ | |||
| 251 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ | 251 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ |
| 252 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ | 252 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ |
| 253 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ | 253 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ |
| 254 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\ | ||
| 254 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\ | 255 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\ |
| 255 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\ | 256 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\ |
| 256 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\ | 257 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\ |
| @@ -435,6 +436,7 @@ | |||
| 435 | type HUBP_NO_OUTSTANDING_REQ;\ | 436 | type HUBP_NO_OUTSTANDING_REQ;\ |
| 436 | type HUBP_VTG_SEL;\ | 437 | type HUBP_VTG_SEL;\ |
| 437 | type HUBP_UNDERFLOW_STATUS;\ | 438 | type HUBP_UNDERFLOW_STATUS;\ |
| 439 | type HUBP_UNDERFLOW_CLEAR;\ | ||
| 438 | type NUM_PIPES;\ | 440 | type NUM_PIPES;\ |
| 439 | type NUM_BANKS;\ | 441 | type NUM_BANKS;\ |
| 440 | type PIPE_INTERLEAVE;\ | 442 | type PIPE_INTERLEAVE;\ |
| @@ -739,6 +741,7 @@ void dcn10_hubp_construct( | |||
| 739 | const struct dcn_mi_mask *hubp_mask); | 741 | const struct dcn_mi_mask *hubp_mask); |
| 740 | 742 | ||
| 741 | void hubp1_read_state(struct hubp *hubp); | 743 | void hubp1_read_state(struct hubp *hubp); |
| 744 | void hubp1_clear_underflow(struct hubp *hubp); | ||
| 742 | 745 | ||
| 743 | enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); | 746 | enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); |
| 744 | 747 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 87495dea45ec..0bd33a713836 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
| @@ -1227,7 +1227,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, | |||
| 1227 | tf = plane_state->in_transfer_func; | 1227 | tf = plane_state->in_transfer_func; |
| 1228 | 1228 | ||
| 1229 | if (plane_state->gamma_correction && | 1229 | if (plane_state->gamma_correction && |
| 1230 | !plane_state->gamma_correction->is_identity | 1230 | !dpp_base->ctx->dc->debug.always_use_regamma |
| 1231 | && !plane_state->gamma_correction->is_identity | ||
| 1231 | && dce_use_lut(plane_state->format)) | 1232 | && dce_use_lut(plane_state->format)) |
| 1232 | dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); | 1233 | dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); |
| 1233 | 1234 | ||
| @@ -1400,7 +1401,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( | |||
| 1400 | if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset) | 1401 | if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset) |
| 1401 | grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset( | 1402 | grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset( |
| 1402 | grouped_pipes[i]->stream_res.tg, | 1403 | grouped_pipes[i]->stream_res.tg, |
| 1403 | grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst, | 1404 | 0, |
| 1404 | &grouped_pipes[i]->stream->triggered_crtc_reset); | 1405 | &grouped_pipes[i]->stream->triggered_crtc_reset); |
| 1405 | 1406 | ||
| 1406 | DC_SYNC_INFO("Waiting for trigger\n"); | 1407 | DC_SYNC_INFO("Waiting for trigger\n"); |
| @@ -1770,7 +1771,7 @@ bool is_rgb_cspace(enum dc_color_space output_color_space) | |||
| 1770 | } | 1771 | } |
| 1771 | } | 1772 | } |
| 1772 | 1773 | ||
| 1773 | static void dcn10_get_surface_visual_confirm_color( | 1774 | void dcn10_get_surface_visual_confirm_color( |
| 1774 | const struct pipe_ctx *pipe_ctx, | 1775 | const struct pipe_ctx *pipe_ctx, |
| 1775 | struct tg_color *color) | 1776 | struct tg_color *color) |
| 1776 | { | 1777 | { |
| @@ -1806,7 +1807,7 @@ static void dcn10_get_surface_visual_confirm_color( | |||
| 1806 | } | 1807 | } |
| 1807 | } | 1808 | } |
| 1808 | 1809 | ||
| 1809 | static void dcn10_get_hdr_visual_confirm_color( | 1810 | void dcn10_get_hdr_visual_confirm_color( |
| 1810 | struct pipe_ctx *pipe_ctx, | 1811 | struct pipe_ctx *pipe_ctx, |
| 1811 | struct tg_color *color) | 1812 | struct tg_color *color) |
| 1812 | { | 1813 | { |
| @@ -2067,6 +2068,10 @@ void update_dchubp_dpp( | |||
| 2067 | &pipe_ctx->ttu_regs, | 2068 | &pipe_ctx->ttu_regs, |
| 2068 | &pipe_ctx->rq_regs, | 2069 | &pipe_ctx->rq_regs, |
| 2069 | &pipe_ctx->pipe_dlg_param); | 2070 | &pipe_ctx->pipe_dlg_param); |
| 2071 | hubp->funcs->hubp_setup_interdependent( | ||
| 2072 | hubp, | ||
| 2073 | &pipe_ctx->dlg_regs, | ||
| 2074 | &pipe_ctx->ttu_regs); | ||
| 2070 | } | 2075 | } |
| 2071 | 2076 | ||
| 2072 | size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport; | 2077 | size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport; |
| @@ -2337,6 +2342,32 @@ static void dcn10_apply_ctx_for_surface( | |||
| 2337 | 2342 | ||
| 2338 | dcn10_pipe_control_lock(dc, top_pipe_to_program, false); | 2343 | dcn10_pipe_control_lock(dc, top_pipe_to_program, false); |
| 2339 | 2344 | ||
| 2345 | if (top_pipe_to_program->plane_state && | ||
| 2346 | top_pipe_to_program->plane_state->update_flags.bits.full_update) | ||
| 2347 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 2348 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
| 2349 | |||
| 2350 | /* Skip inactive pipes and ones already updated */ | ||
| 2351 | if (!pipe_ctx->stream || pipe_ctx->stream == stream) | ||
| 2352 | continue; | ||
| 2353 | |||
| 2354 | pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); | ||
| 2355 | |||
| 2356 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( | ||
| 2357 | pipe_ctx->plane_res.hubp, | ||
| 2358 | &pipe_ctx->dlg_regs, | ||
| 2359 | &pipe_ctx->ttu_regs); | ||
| 2360 | } | ||
| 2361 | |||
| 2362 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 2363 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
| 2364 | |||
| 2365 | if (!pipe_ctx->stream || pipe_ctx->stream == stream) | ||
| 2366 | continue; | ||
| 2367 | |||
| 2368 | dcn10_pipe_control_lock(dc, pipe_ctx, false); | ||
| 2369 | } | ||
| 2370 | |||
| 2340 | if (num_planes == 0) | 2371 | if (num_planes == 0) |
| 2341 | false_optc_underflow_wa(dc, stream, tg); | 2372 | false_optc_underflow_wa(dc, stream, tg); |
| 2342 | 2373 | ||
| @@ -2710,6 +2741,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { | |||
| 2710 | .set_avmute = dce110_set_avmute, | 2741 | .set_avmute = dce110_set_avmute, |
| 2711 | .log_hw_state = dcn10_log_hw_state, | 2742 | .log_hw_state = dcn10_log_hw_state, |
| 2712 | .get_hw_state = dcn10_get_hw_state, | 2743 | .get_hw_state = dcn10_get_hw_state, |
| 2744 | .clear_status_bits = dcn10_clear_status_bits, | ||
| 2713 | .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, | 2745 | .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, |
| 2714 | .edp_backlight_control = hwss_edp_backlight_control, | 2746 | .edp_backlight_control = hwss_edp_backlight_control, |
| 2715 | .edp_power_control = hwss_edp_power_control, | 2747 | .edp_power_control = hwss_edp_power_control, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 5e5610c9e600..f8eea10e4c64 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | |||
| @@ -51,6 +51,8 @@ void dcn10_get_hw_state( | |||
| 51 | char *pBuf, unsigned int bufSize, | 51 | char *pBuf, unsigned int bufSize, |
| 52 | unsigned int mask); | 52 | unsigned int mask); |
| 53 | 53 | ||
| 54 | void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); | ||
| 55 | |||
| 54 | bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | 56 | bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); |
| 55 | 57 | ||
| 56 | bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | 58 | bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); |
| @@ -61,6 +63,14 @@ void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); | |||
| 61 | 63 | ||
| 62 | void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); | 64 | void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); |
| 63 | 65 | ||
| 66 | void dcn10_get_surface_visual_confirm_color( | ||
| 67 | const struct pipe_ctx *pipe_ctx, | ||
| 68 | struct tg_color *color); | ||
| 69 | |||
| 70 | void dcn10_get_hdr_visual_confirm_color( | ||
| 71 | struct pipe_ctx *pipe_ctx, | ||
| 72 | struct tg_color *color); | ||
| 73 | |||
| 64 | void update_dchubp_dpp( | 74 | void update_dchubp_dpp( |
| 65 | struct dc *dc, | 75 | struct dc *dc, |
| 66 | struct pipe_ctx *pipe_ctx, | 76 | struct pipe_ctx *pipe_ctx, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 64158900730f..211bb240a720 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c | |||
| @@ -454,12 +454,6 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int | |||
| 454 | 454 | ||
| 455 | remaining_buffer -= chars_printed; | 455 | remaining_buffer -= chars_printed; |
| 456 | pBuf += chars_printed; | 456 | pBuf += chars_printed; |
| 457 | |||
| 458 | // Clear underflow for debug purposes | ||
| 459 | // We want to keep underflow sticky bit on for the longevity tests outside of test environment. | ||
| 460 | // This function is called only from Windows or Diags test environment, hence it's safe to clear | ||
| 461 | // it from here without affecting the original intent. | ||
| 462 | tg->funcs->clear_optc_underflow(tg); | ||
| 463 | } | 457 | } |
| 464 | } | 458 | } |
| 465 | 459 | ||
| @@ -484,6 +478,59 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i | |||
| 484 | return chars_printed; | 478 | return chars_printed; |
| 485 | } | 479 | } |
| 486 | 480 | ||
| 481 | static void dcn10_clear_otpc_underflow(struct dc *dc) | ||
| 482 | { | ||
| 483 | struct resource_pool *pool = dc->res_pool; | ||
| 484 | int i; | ||
| 485 | |||
| 486 | for (i = 0; i < pool->timing_generator_count; i++) { | ||
| 487 | struct timing_generator *tg = pool->timing_generators[i]; | ||
| 488 | struct dcn_otg_state s = {0}; | ||
| 489 | |||
| 490 | optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); | ||
| 491 | |||
| 492 | if (s.otg_enabled & 1) | ||
| 493 | tg->funcs->clear_optc_underflow(tg); | ||
| 494 | } | ||
| 495 | } | ||
| 496 | |||
| 497 | static void dcn10_clear_hubp_underflow(struct dc *dc) | ||
| 498 | { | ||
| 499 | struct resource_pool *pool = dc->res_pool; | ||
| 500 | int i; | ||
| 501 | |||
| 502 | for (i = 0; i < pool->pipe_count; i++) { | ||
| 503 | struct hubp *hubp = pool->hubps[i]; | ||
| 504 | struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); | ||
| 505 | |||
| 506 | hubp->funcs->hubp_read_state(hubp); | ||
| 507 | |||
| 508 | if (!s->blank_en) | ||
| 509 | hubp->funcs->hubp_clear_underflow(hubp); | ||
| 510 | } | ||
| 511 | } | ||
| 512 | |||
| 513 | void dcn10_clear_status_bits(struct dc *dc, unsigned int mask) | ||
| 514 | { | ||
| 515 | /* | ||
| 516 | * Mask Format | ||
| 517 | * Bit 0 - 31: Status bit to clear | ||
| 518 | * | ||
| 519 | * Mask = 0x0 means clear all status bits | ||
| 520 | */ | ||
| 521 | const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW = 0x1; | ||
| 522 | const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW = 0x2; | ||
| 523 | |||
| 524 | if (mask == 0x0) | ||
| 525 | mask = 0xFFFFFFFF; | ||
| 526 | |||
| 527 | if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW) | ||
| 528 | dcn10_clear_hubp_underflow(dc); | ||
| 529 | |||
| 530 | if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW) | ||
| 531 | dcn10_clear_otpc_underflow(dc); | ||
| 532 | } | ||
| 533 | |||
| 487 | void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask) | 534 | void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask) |
| 488 | { | 535 | { |
| 489 | /* | 536 | /* |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 7d1f66797cb3..7c138615f17d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | |||
| @@ -335,9 +335,8 @@ void optc1_program_timing( | |||
| 335 | /* Enable stereo - only when we need to pack 3D frame. Other types | 335 | /* Enable stereo - only when we need to pack 3D frame. Other types |
| 336 | * of stereo handled in explicit call | 336 | * of stereo handled in explicit call |
| 337 | */ | 337 | */ |
| 338 | h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ? | ||
| 339 | 1 : 0; | ||
| 340 | 338 | ||
| 339 | h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing); | ||
| 341 | REG_UPDATE(OTG_H_TIMING_CNTL, | 340 | REG_UPDATE(OTG_H_TIMING_CNTL, |
| 342 | OTG_H_TIMING_DIV_BY2, h_div_2); | 341 | OTG_H_TIMING_DIV_BY2, h_div_2); |
| 343 | 342 | ||
| @@ -360,20 +359,19 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab | |||
| 360 | static void optc1_unblank_crtc(struct timing_generator *optc) | 359 | static void optc1_unblank_crtc(struct timing_generator *optc) |
| 361 | { | 360 | { |
| 362 | struct optc *optc1 = DCN10TG_FROM_TG(optc); | 361 | struct optc *optc1 = DCN10TG_FROM_TG(optc); |
| 363 | uint32_t vertical_interrupt_enable = 0; | ||
| 364 | |||
| 365 | REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL, | ||
| 366 | OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable); | ||
| 367 | |||
| 368 | /* temporary work around for vertical interrupt, once vertical interrupt enabled, | ||
| 369 | * this check will be removed. | ||
| 370 | */ | ||
| 371 | if (vertical_interrupt_enable) | ||
| 372 | optc1_set_blank_data_double_buffer(optc, true); | ||
| 373 | 362 | ||
| 374 | REG_UPDATE_2(OTG_BLANK_CONTROL, | 363 | REG_UPDATE_2(OTG_BLANK_CONTROL, |
| 375 | OTG_BLANK_DATA_EN, 0, | 364 | OTG_BLANK_DATA_EN, 0, |
| 376 | OTG_BLANK_DE_MODE, 0); | 365 | OTG_BLANK_DE_MODE, 0); |
| 366 | |||
| 367 | /* W/A for automated testing | ||
| 368 | * Automated testing will fail underflow test as there | ||
| 369 | * sporadic underflows which occur during the optc blank | ||
| 370 | * sequence. As a w/a, clear underflow on unblank. | ||
| 371 | * This prevents the failure, but will not mask actual | ||
| 372 | * underflow that affect real use cases. | ||
| 373 | */ | ||
| 374 | optc1_clear_optc_underflow(optc); | ||
| 377 | } | 375 | } |
| 378 | 376 | ||
| 379 | /** | 377 | /** |
| @@ -1422,3 +1420,9 @@ void dcn10_timing_generator_init(struct optc *optc1) | |||
| 1422 | optc1->min_h_sync_width = 8; | 1420 | optc1->min_h_sync_width = 8; |
| 1423 | optc1->min_v_sync_width = 1; | 1421 | optc1->min_v_sync_width = 1; |
| 1424 | } | 1422 | } |
| 1423 | |||
| 1424 | bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) | ||
| 1425 | { | ||
| 1426 | return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; | ||
| 1427 | } | ||
| 1428 | |||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index c1b114209fe8..8bacf0b6e27e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | |||
| @@ -565,4 +565,6 @@ bool optc1_configure_crc(struct timing_generator *optc, | |||
| 565 | bool optc1_get_crc(struct timing_generator *optc, | 565 | bool optc1_get_crc(struct timing_generator *optc, |
| 566 | uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb); | 566 | uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb); |
| 567 | 567 | ||
| 568 | bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); | ||
| 569 | |||
| 568 | #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ | 570 | #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 6f9078f3c4d3..b8b5525a389a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c | |||
| @@ -766,7 +766,6 @@ void enc1_stream_encoder_dp_blank( | |||
| 766 | struct stream_encoder *enc) | 766 | struct stream_encoder *enc) |
| 767 | { | 767 | { |
| 768 | struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); | 768 | struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); |
| 769 | uint32_t retries = 0; | ||
| 770 | uint32_t reg1 = 0; | 769 | uint32_t reg1 = 0; |
| 771 | uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; | 770 | uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; |
| 772 | 771 | ||
| @@ -803,8 +802,6 @@ void enc1_stream_encoder_dp_blank( | |||
| 803 | 0, | 802 | 0, |
| 804 | 10, max_retries); | 803 | 10, max_retries); |
| 805 | 804 | ||
| 806 | ASSERT(retries <= max_retries); | ||
| 807 | |||
| 808 | /* Tell the DP encoder to ignore timing from CRTC, must be done after | 805 | /* Tell the DP encoder to ignore timing from CRTC, must be done after |
| 809 | * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is | 806 | * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is |
| 810 | * complete, stream status will be stuck in video stream enabled state, | 807 | * complete, stream status will be stuck in video stream enabled state, |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index 4550747fb61c..cb85eaa9857f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h | |||
| @@ -32,6 +32,13 @@ enum dmcu_state { | |||
| 32 | DMCU_RUNNING = 1 | 32 | DMCU_RUNNING = 1 |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | struct dmcu_version { | ||
| 36 | unsigned int date; | ||
| 37 | unsigned int month; | ||
| 38 | unsigned int year; | ||
| 39 | unsigned int interface_version; | ||
| 40 | }; | ||
| 41 | |||
| 35 | struct dmcu { | 42 | struct dmcu { |
| 36 | struct dc_context *ctx; | 43 | struct dc_context *ctx; |
| 37 | const struct dmcu_funcs *funcs; | 44 | const struct dmcu_funcs *funcs; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 334c48cdafdc..04c6989aac58 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | |||
| @@ -63,6 +63,11 @@ struct hubp_funcs { | |||
| 63 | struct _vcs_dpi_display_rq_regs_st *rq_regs, | 63 | struct _vcs_dpi_display_rq_regs_st *rq_regs, |
| 64 | struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); | 64 | struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); |
| 65 | 65 | ||
| 66 | void (*hubp_setup_interdependent)( | ||
| 67 | struct hubp *hubp, | ||
| 68 | struct _vcs_dpi_display_dlg_regs_st *dlg_regs, | ||
| 69 | struct _vcs_dpi_display_ttu_regs_st *ttu_regs); | ||
| 70 | |||
| 66 | void (*dcc_control)(struct hubp *hubp, bool enable, | 71 | void (*dcc_control)(struct hubp *hubp, bool enable, |
| 67 | bool independent_64b_blks); | 72 | bool independent_64b_blks); |
| 68 | void (*mem_program_viewport)( | 73 | void (*mem_program_viewport)( |
| @@ -121,6 +126,7 @@ struct hubp_funcs { | |||
| 121 | void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); | 126 | void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); |
| 122 | void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); | 127 | void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); |
| 123 | void (*hubp_read_state)(struct hubp *hubp); | 128 | void (*hubp_read_state)(struct hubp *hubp); |
| 129 | void (*hubp_clear_underflow)(struct hubp *hubp); | ||
| 124 | void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); | 130 | void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); |
| 125 | unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); | 131 | unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); |
| 126 | 132 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index e9b702ce02dd..d6a85f48b6d1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | |||
| @@ -200,6 +200,7 @@ struct hw_sequencer_funcs { | |||
| 200 | void (*log_hw_state)(struct dc *dc, | 200 | void (*log_hw_state)(struct dc *dc, |
| 201 | struct dc_log_buffer_ctx *log_ctx); | 201 | struct dc_log_buffer_ctx *log_ctx); |
| 202 | void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); | 202 | void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); |
| 203 | void (*clear_status_bits)(struct dc *dc, unsigned int mask); | ||
| 203 | 204 | ||
| 204 | void (*wait_for_mpcc_disconnect)(struct dc *dc, | 205 | void (*wait_for_mpcc_disconnect)(struct dc *dc, |
| 205 | struct resource_pool *res_pool, | 206 | struct resource_pool *res_pool, |
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 7480f072c375..bbecbaefb741 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c | |||
| @@ -813,20 +813,26 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, | |||
| 813 | const struct hw_x_point *coord_x = coordinate_x; | 813 | const struct hw_x_point *coord_x = coordinate_x; |
| 814 | struct fixed31_32 scaledX = dc_fixpt_zero; | 814 | struct fixed31_32 scaledX = dc_fixpt_zero; |
| 815 | struct fixed31_32 scaledX1 = dc_fixpt_zero; | 815 | struct fixed31_32 scaledX1 = dc_fixpt_zero; |
| 816 | struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display); | 816 | struct fixed31_32 max_display; |
| 817 | struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); | 817 | struct fixed31_32 min_display; |
| 818 | struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content); | 818 | struct fixed31_32 max_content; |
| 819 | struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); | 819 | struct fixed31_32 min_content; |
| 820 | struct fixed31_32 clip = dc_fixpt_one; | 820 | struct fixed31_32 clip = dc_fixpt_one; |
| 821 | struct fixed31_32 output; | 821 | struct fixed31_32 output; |
| 822 | bool use_eetf = false; | 822 | bool use_eetf = false; |
| 823 | bool is_clipped = false; | 823 | bool is_clipped = false; |
| 824 | struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); | 824 | struct fixed31_32 sdr_white_level; |
| 825 | 825 | ||
| 826 | if (fs_params == NULL || fs_params->max_content == 0 || | 826 | if (fs_params == NULL || fs_params->max_content == 0 || |
| 827 | fs_params->max_display == 0) | 827 | fs_params->max_display == 0) |
| 828 | return false; | 828 | return false; |
| 829 | 829 | ||
| 830 | max_display = dc_fixpt_from_int(fs_params->max_display); | ||
| 831 | min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); | ||
| 832 | max_content = dc_fixpt_from_int(fs_params->max_content); | ||
| 833 | min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); | ||
| 834 | sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); | ||
| 835 | |||
| 830 | if (fs_params->min_display > 1000) // cap at 0.1 at the bottom | 836 | if (fs_params->min_display > 1000) // cap at 0.1 at the bottom |
| 831 | min_display = dc_fixpt_from_fraction(1, 10); | 837 | min_display = dc_fixpt_from_fraction(1, 10); |
| 832 | if (fs_params->max_display < 100) // cap at 100 at the top | 838 | if (fs_params->max_display < 100) // cap at 100 at the top |
diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile b/drivers/gpu/drm/amd/display/modules/power/Makefile new file mode 100644 index 000000000000..87851f892a52 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/power/Makefile | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | # | ||
| 2 | # Copyright 2017 Advanced Micro Devices, Inc. | ||
| 3 | # | ||
| 4 | # Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | # copy of this software and associated documentation files (the "Software"), | ||
| 6 | # to deal in the Software without restriction, including without limitation | ||
| 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | # and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | # Software is furnished to do so, subject to the following conditions: | ||
| 10 | # | ||
| 11 | # The above copyright notice and this permission notice shall be included in | ||
| 12 | # all copies or substantial portions of the Software. | ||
| 13 | # | ||
| 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | # THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | # OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | # | ||
| 22 | # | ||
| 23 | # Makefile for the 'power' sub-module of DAL. | ||
| 24 | # | ||
| 25 | |||
| 26 | MOD_POWER = power_helpers.o | ||
| 27 | |||
| 28 | AMD_DAL_MOD_POWER = $(addprefix $(AMDDALPATH)/modules/power/,$(MOD_POWER)) | ||
| 29 | #$(info ************ DAL POWER MODULE MAKEFILE ************) | ||
| 30 | |||
| 31 | AMD_DISPLAY_FILES += $(AMD_DAL_MOD_POWER) \ No newline at end of file | ||
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c new file mode 100644 index 000000000000..00f63b7dd32f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c | |||
| @@ -0,0 +1,326 @@ | |||
| 1 | /* Copyright 2018 Advanced Micro Devices, Inc. | ||
| 2 | * | ||
| 3 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 4 | * copy of this software and associated documentation files (the "Software"), | ||
| 5 | * to deal in the Software without restriction, including without limitation | ||
| 6 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 7 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 8 | * Software is furnished to do so, subject to the following conditions: | ||
| 9 | * | ||
| 10 | * The above copyright notice and this permission notice shall be included in | ||
| 11 | * all copies or substantial portions of the Software. | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 17 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 18 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 19 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * Authors: AMD | ||
| 22 | * | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "power_helpers.h" | ||
| 26 | #include "dc/inc/hw/dmcu.h" | ||
| 27 | |||
| 28 | #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b)) | ||
| 29 | |||
| 30 | /* Possible Min Reduction config from least aggressive to most aggressive | ||
| 31 | * 0 1 2 3 4 5 6 7 8 9 10 11 12 | ||
| 32 | * 100 98.0 94.1 94.1 85.1 80.3 75.3 69.4 60.0 57.6 50.2 49.8 40.0 % | ||
| 33 | */ | ||
| 34 | static const unsigned char min_reduction_table[13] = { | ||
| 35 | 0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66}; | ||
| 36 | |||
| 37 | /* Possible Max Reduction configs from least aggressive to most aggressive | ||
| 38 | * 0 1 2 3 4 5 6 7 8 9 10 11 12 | ||
| 39 | * 96.1 89.8 85.1 80.3 69.4 64.7 64.7 50.2 39.6 30.2 30.2 30.2 19.6 % | ||
| 40 | */ | ||
| 41 | static const unsigned char max_reduction_table[13] = { | ||
| 42 | 0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32}; | ||
| 43 | |||
| 44 | /* Predefined ABM configuration sets. We may have different configuration sets | ||
| 45 | * in order to satisfy different power/quality requirements. | ||
| 46 | */ | ||
| 47 | static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = { | ||
| 48 | /* ABM Level 1, ABM Level 2, ABM Level 3, ABM Level 4 */ | ||
| 49 | { 2, 5, 7, 8 }, /* Default - Medium aggressiveness */ | ||
| 50 | { 2, 5, 8, 11 }, /* Alt #1 - Increased aggressiveness */ | ||
| 51 | { 0, 2, 4, 8 }, /* Alt #2 - Minimal aggressiveness */ | ||
| 52 | { 3, 6, 10, 12 }, /* Alt #3 - Super aggressiveness */ | ||
| 53 | }; | ||
| 54 | |||
| 55 | #define NUM_AMBI_LEVEL 5 | ||
| 56 | #define NUM_AGGR_LEVEL 4 | ||
| 57 | #define NUM_POWER_FN_SEGS 8 | ||
| 58 | #define NUM_BL_CURVE_SEGS 16 | ||
| 59 | |||
| 60 | /* NOTE: iRAM is 256B in size */ | ||
| 61 | struct iram_table_v_2 { | ||
| 62 | /* flags */ | ||
| 63 | uint16_t flags; /* 0x00 U16 */ | ||
| 64 | |||
| 65 | /* parameters for ABM2.0 algorithm */ | ||
| 66 | uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ | ||
| 67 | uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ | ||
| 68 | uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ | ||
| 69 | uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ | ||
| 70 | uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x52 U2.6 */ | ||
| 71 | uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x66 U2.6 */ | ||
| 72 | uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x7a U0.8 */ | ||
| 73 | uint8_t deviation_gain; /* 0x7f U0.8 */ | ||
| 74 | |||
| 75 | /* parameters for crgb conversion */ | ||
| 76 | uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ | ||
| 77 | uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ | ||
| 78 | uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ | ||
| 79 | |||
| 80 | /* parameters for custom curve */ | ||
| 81 | /* thresholds for brightness --> backlight */ | ||
| 82 | uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ | ||
| 83 | /* offsets for brightness --> backlight */ | ||
| 84 | uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ | ||
| 85 | |||
| 86 | /* For reading PSR State directly from IRAM */ | ||
| 87 | uint8_t psr_state; /* 0xf0 */ | ||
| 88 | uint8_t dmcu_interface_version; /* 0xf1 */ | ||
| 89 | uint8_t dmcu_date_version_year_b0; /* 0xf2 */ | ||
| 90 | uint8_t dmcu_date_version_year_b1; /* 0xf3 */ | ||
| 91 | uint8_t dmcu_date_version_month; /* 0xf4 */ | ||
| 92 | uint8_t dmcu_date_version_day; /* 0xf5 */ | ||
| 93 | uint8_t dmcu_state; /* 0xf6 */ | ||
| 94 | |||
| 95 | uint16_t blRampReduction; /* 0xf7 */ | ||
| 96 | uint16_t blRampStart; /* 0xf9 */ | ||
| 97 | uint8_t dummy5; /* 0xfb */ | ||
| 98 | uint8_t dummy6; /* 0xfc */ | ||
| 99 | uint8_t dummy7; /* 0xfd */ | ||
| 100 | uint8_t dummy8; /* 0xfe */ | ||
| 101 | uint8_t dummy9; /* 0xff */ | ||
| 102 | }; | ||
| 103 | |||
| 104 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) | ||
| 105 | { | ||
| 106 | return (uint16_t)(backlight_8bit * 0x101); | ||
| 107 | } | ||
| 108 | |||
| 109 | static void fill_backlight_transform_table(struct dmcu_iram_parameters params, | ||
| 110 | struct iram_table_v_2 *table) | ||
| 111 | { | ||
| 112 | unsigned int i; | ||
| 113 | unsigned int num_entries = NUM_BL_CURVE_SEGS; | ||
| 114 | unsigned int query_input_8bit; | ||
| 115 | unsigned int query_output_8bit; | ||
| 116 | unsigned int lut_index; | ||
| 117 | |||
| 118 | table->backlight_thresholds[0] = 0; | ||
| 119 | table->backlight_offsets[0] = params.backlight_lut_array[0]; | ||
| 120 | table->backlight_thresholds[num_entries-1] = 0xFFFF; | ||
| 121 | table->backlight_offsets[num_entries-1] = | ||
| 122 | params.backlight_lut_array[params.backlight_lut_array_size - 1]; | ||
| 123 | |||
| 124 | /* Setup all brightness levels between 0% and 100% exclusive | ||
| 125 | * Fills brightness-to-backlight transform table. Backlight custom curve | ||
| 126 | * describes transform from brightness to backlight. It will be defined | ||
| 127 | * as set of thresholds and set of offsets, together, implying | ||
| 128 | * extrapolation of custom curve into 16 uniformly spanned linear | ||
| 129 | * segments. Each threshold/offset represented by 16 bit entry in | ||
| 130 | * format U4.10. | ||
| 131 | */ | ||
| 132 | for (i = 1; i+1 < num_entries; i++) { | ||
| 133 | query_input_8bit = DIV_ROUNDUP((i * 256), num_entries); | ||
| 134 | |||
| 135 | lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1); | ||
| 136 | ASSERT(lut_index < params.backlight_lut_array_size); | ||
| 137 | query_output_8bit = params.backlight_lut_array[lut_index] >> 8; | ||
| 138 | |||
| 139 | table->backlight_thresholds[i] = | ||
| 140 | backlight_8_to_16(query_input_8bit); | ||
| 141 | table->backlight_offsets[i] = | ||
| 142 | backlight_8_to_16(query_output_8bit); | ||
| 143 | } | ||
| 144 | } | ||
| 145 | |||
| 146 | bool dmcu_load_iram(struct dmcu *dmcu, | ||
| 147 | struct dmcu_iram_parameters params) | ||
| 148 | { | ||
| 149 | struct iram_table_v_2 ram_table; | ||
| 150 | unsigned int set = params.set; | ||
| 151 | |||
| 152 | if (dmcu == NULL) | ||
| 153 | return false; | ||
| 154 | |||
| 155 | if (!dmcu->funcs->is_dmcu_initialized(dmcu)) | ||
| 156 | return true; | ||
| 157 | |||
| 158 | memset(&ram_table, 0, sizeof(ram_table)); | ||
| 159 | |||
| 160 | ram_table.flags = 0x0; | ||
| 161 | ram_table.deviation_gain = 0xb3; | ||
| 162 | |||
| 163 | ram_table.blRampReduction = | ||
| 164 | cpu_to_be16(params.backlight_ramping_reduction); | ||
| 165 | ram_table.blRampStart = | ||
| 166 | cpu_to_be16(params.backlight_ramping_start); | ||
| 167 | |||
| 168 | ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]]; | ||
| 169 | ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]]; | ||
| 170 | ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]]; | ||
| 171 | ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]]; | ||
| 172 | ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]]; | ||
| 173 | ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]]; | ||
| 174 | ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]]; | ||
| 175 | ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]]; | ||
| 176 | ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]]; | ||
| 177 | ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]]; | ||
| 178 | |||
| 179 | ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]]; | ||
| 180 | ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]]; | ||
| 181 | ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]]; | ||
| 182 | ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]]; | ||
| 183 | ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]]; | ||
| 184 | ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]]; | ||
| 185 | ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]]; | ||
| 186 | ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]]; | ||
| 187 | ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]]; | ||
| 188 | ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]]; | ||
| 189 | |||
| 190 | ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]]; | ||
| 191 | ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]]; | ||
| 192 | ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]]; | ||
| 193 | ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]]; | ||
| 194 | ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]]; | ||
| 195 | ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]]; | ||
| 196 | ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]]; | ||
| 197 | ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]]; | ||
| 198 | ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]]; | ||
| 199 | ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]]; | ||
| 200 | |||
| 201 | ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]]; | ||
| 202 | ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]]; | ||
| 203 | ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]]; | ||
| 204 | ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]]; | ||
| 205 | ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]]; | ||
| 206 | ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]]; | ||
| 207 | ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]]; | ||
| 208 | ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]]; | ||
| 209 | ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]]; | ||
| 210 | ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]]; | ||
| 211 | |||
| 212 | ram_table.bright_pos_gain[0][0] = 0x20; | ||
| 213 | ram_table.bright_pos_gain[0][1] = 0x20; | ||
| 214 | ram_table.bright_pos_gain[0][2] = 0x20; | ||
| 215 | ram_table.bright_pos_gain[0][3] = 0x20; | ||
| 216 | ram_table.bright_pos_gain[1][0] = 0x20; | ||
| 217 | ram_table.bright_pos_gain[1][1] = 0x20; | ||
| 218 | ram_table.bright_pos_gain[1][2] = 0x20; | ||
| 219 | ram_table.bright_pos_gain[1][3] = 0x20; | ||
| 220 | ram_table.bright_pos_gain[2][0] = 0x20; | ||
| 221 | ram_table.bright_pos_gain[2][1] = 0x20; | ||
| 222 | ram_table.bright_pos_gain[2][2] = 0x20; | ||
| 223 | ram_table.bright_pos_gain[2][3] = 0x20; | ||
| 224 | ram_table.bright_pos_gain[3][0] = 0x20; | ||
| 225 | ram_table.bright_pos_gain[3][1] = 0x20; | ||
| 226 | ram_table.bright_pos_gain[3][2] = 0x20; | ||
| 227 | ram_table.bright_pos_gain[3][3] = 0x20; | ||
| 228 | ram_table.bright_pos_gain[4][0] = 0x20; | ||
| 229 | ram_table.bright_pos_gain[4][1] = 0x20; | ||
| 230 | ram_table.bright_pos_gain[4][2] = 0x20; | ||
| 231 | ram_table.bright_pos_gain[4][3] = 0x20; | ||
| 232 | ram_table.bright_neg_gain[0][1] = 0x00; | ||
| 233 | ram_table.bright_neg_gain[0][2] = 0x00; | ||
| 234 | ram_table.bright_neg_gain[0][3] = 0x00; | ||
| 235 | ram_table.bright_neg_gain[1][0] = 0x00; | ||
| 236 | ram_table.bright_neg_gain[1][1] = 0x00; | ||
| 237 | ram_table.bright_neg_gain[1][2] = 0x00; | ||
| 238 | ram_table.bright_neg_gain[1][3] = 0x00; | ||
| 239 | ram_table.bright_neg_gain[2][0] = 0x00; | ||
| 240 | ram_table.bright_neg_gain[2][1] = 0x00; | ||
| 241 | ram_table.bright_neg_gain[2][2] = 0x00; | ||
| 242 | ram_table.bright_neg_gain[2][3] = 0x00; | ||
| 243 | ram_table.bright_neg_gain[3][0] = 0x00; | ||
| 244 | ram_table.bright_neg_gain[3][1] = 0x00; | ||
| 245 | ram_table.bright_neg_gain[3][2] = 0x00; | ||
| 246 | ram_table.bright_neg_gain[3][3] = 0x00; | ||
| 247 | ram_table.bright_neg_gain[4][0] = 0x00; | ||
| 248 | ram_table.bright_neg_gain[4][1] = 0x00; | ||
| 249 | ram_table.bright_neg_gain[4][2] = 0x00; | ||
| 250 | ram_table.bright_neg_gain[4][3] = 0x00; | ||
| 251 | ram_table.dark_pos_gain[0][0] = 0x00; | ||
| 252 | ram_table.dark_pos_gain[0][1] = 0x00; | ||
| 253 | ram_table.dark_pos_gain[0][2] = 0x00; | ||
| 254 | ram_table.dark_pos_gain[0][3] = 0x00; | ||
| 255 | ram_table.dark_pos_gain[1][0] = 0x00; | ||
| 256 | ram_table.dark_pos_gain[1][1] = 0x00; | ||
| 257 | ram_table.dark_pos_gain[1][2] = 0x00; | ||
| 258 | ram_table.dark_pos_gain[1][3] = 0x00; | ||
| 259 | ram_table.dark_pos_gain[2][0] = 0x00; | ||
| 260 | ram_table.dark_pos_gain[2][1] = 0x00; | ||
| 261 | ram_table.dark_pos_gain[2][2] = 0x00; | ||
| 262 | ram_table.dark_pos_gain[2][3] = 0x00; | ||
| 263 | ram_table.dark_pos_gain[3][0] = 0x00; | ||
| 264 | ram_table.dark_pos_gain[3][1] = 0x00; | ||
| 265 | ram_table.dark_pos_gain[3][2] = 0x00; | ||
| 266 | ram_table.dark_pos_gain[3][3] = 0x00; | ||
| 267 | ram_table.dark_pos_gain[4][0] = 0x00; | ||
| 268 | ram_table.dark_pos_gain[4][1] = 0x00; | ||
| 269 | ram_table.dark_pos_gain[4][2] = 0x00; | ||
| 270 | ram_table.dark_pos_gain[4][3] = 0x00; | ||
| 271 | ram_table.dark_neg_gain[0][0] = 0x00; | ||
| 272 | ram_table.dark_neg_gain[0][1] = 0x00; | ||
| 273 | ram_table.dark_neg_gain[0][2] = 0x00; | ||
| 274 | ram_table.dark_neg_gain[0][3] = 0x00; | ||
| 275 | ram_table.dark_neg_gain[1][0] = 0x00; | ||
| 276 | ram_table.dark_neg_gain[1][1] = 0x00; | ||
| 277 | ram_table.dark_neg_gain[1][2] = 0x00; | ||
| 278 | ram_table.dark_neg_gain[1][3] = 0x00; | ||
| 279 | ram_table.dark_neg_gain[2][0] = 0x00; | ||
| 280 | ram_table.dark_neg_gain[2][1] = 0x00; | ||
| 281 | ram_table.dark_neg_gain[2][2] = 0x00; | ||
| 282 | ram_table.dark_neg_gain[2][3] = 0x00; | ||
| 283 | ram_table.dark_neg_gain[3][0] = 0x00; | ||
| 284 | ram_table.dark_neg_gain[3][1] = 0x00; | ||
| 285 | ram_table.dark_neg_gain[3][2] = 0x00; | ||
| 286 | ram_table.dark_neg_gain[3][3] = 0x00; | ||
| 287 | ram_table.dark_neg_gain[4][0] = 0x00; | ||
| 288 | ram_table.dark_neg_gain[4][1] = 0x00; | ||
| 289 | ram_table.dark_neg_gain[4][2] = 0x00; | ||
| 290 | ram_table.dark_neg_gain[4][3] = 0x00; | ||
| 291 | ram_table.iir_curve[0] = 0x65; | ||
| 292 | ram_table.iir_curve[1] = 0x65; | ||
| 293 | ram_table.iir_curve[2] = 0x65; | ||
| 294 | ram_table.iir_curve[3] = 0x65; | ||
| 295 | ram_table.iir_curve[4] = 0x65; | ||
| 296 | ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6); | ||
| 297 | ram_table.crgb_thresh[1] = cpu_to_be16(0x1648); | ||
| 298 | ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3); | ||
| 299 | ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41); | ||
| 300 | ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46); | ||
| 301 | ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21); | ||
| 302 | ram_table.crgb_thresh[6] = cpu_to_be16(0x2167); | ||
| 303 | ram_table.crgb_thresh[7] = cpu_to_be16(0x2384); | ||
| 304 | ram_table.crgb_offset[0] = cpu_to_be16(0x2999); | ||
| 305 | ram_table.crgb_offset[1] = cpu_to_be16(0x3999); | ||
| 306 | ram_table.crgb_offset[2] = cpu_to_be16(0x4666); | ||
| 307 | ram_table.crgb_offset[3] = cpu_to_be16(0x5999); | ||
| 308 | ram_table.crgb_offset[4] = cpu_to_be16(0x6333); | ||
| 309 | ram_table.crgb_offset[5] = cpu_to_be16(0x7800); | ||
| 310 | ram_table.crgb_offset[6] = cpu_to_be16(0x8c00); | ||
| 311 | ram_table.crgb_offset[7] = cpu_to_be16(0xa000); | ||
| 312 | ram_table.crgb_slope[0] = cpu_to_be16(0x3147); | ||
| 313 | ram_table.crgb_slope[1] = cpu_to_be16(0x2978); | ||
| 314 | ram_table.crgb_slope[2] = cpu_to_be16(0x23a2); | ||
| 315 | ram_table.crgb_slope[3] = cpu_to_be16(0x1f55); | ||
| 316 | ram_table.crgb_slope[4] = cpu_to_be16(0x1c63); | ||
| 317 | ram_table.crgb_slope[5] = cpu_to_be16(0x1a0f); | ||
| 318 | ram_table.crgb_slope[6] = cpu_to_be16(0x178d); | ||
| 319 | ram_table.crgb_slope[7] = cpu_to_be16(0x15ab); | ||
| 320 | |||
| 321 | fill_backlight_transform_table( | ||
| 322 | params, &ram_table); | ||
| 323 | |||
| 324 | return dmcu->funcs->load_iram( | ||
| 325 | dmcu, 0, (char *)(&ram_table), sizeof(ram_table)); | ||
| 326 | } | ||
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h new file mode 100644 index 000000000000..da5df00fedce --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | /* Copyright 2018 Advanced Micro Devices, Inc. | ||
| 2 | * | ||
| 3 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 4 | * copy of this software and associated documentation files (the "Software"), | ||
| 5 | * to deal in the Software without restriction, including without limitation | ||
| 6 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 7 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 8 | * Software is furnished to do so, subject to the following conditions: | ||
| 9 | * | ||
| 10 | * The above copyright notice and this permission notice shall be included in | ||
| 11 | * all copies or substantial portions of the Software. | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 17 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 18 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 19 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * Authors: AMD | ||
| 22 | * | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef MODULES_POWER_POWER_HELPERS_H_ | ||
| 26 | #define MODULES_POWER_POWER_HELPERS_H_ | ||
| 27 | |||
| 28 | #include "dc/inc/hw/dmcu.h" | ||
| 29 | |||
| 30 | |||
| 31 | enum abm_defines { | ||
| 32 | abm_defines_max_level = 4, | ||
| 33 | abm_defines_max_config = 4, | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct dmcu_iram_parameters { | ||
| 37 | unsigned int *backlight_lut_array; | ||
| 38 | unsigned int backlight_lut_array_size; | ||
| 39 | unsigned int backlight_ramping_reduction; | ||
| 40 | unsigned int backlight_ramping_start; | ||
| 41 | unsigned int set; | ||
| 42 | }; | ||
| 43 | |||
| 44 | bool dmcu_load_iram(struct dmcu *dmcu, | ||
| 45 | struct dmcu_iram_parameters params); | ||
| 46 | |||
| 47 | #endif /* MODULES_POWER_POWER_HELPERS_H_ */ | ||
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h index 9b9699fc433f..c72cbfe8f684 100644 --- a/drivers/gpu/drm/amd/include/amd_acpi.h +++ b/drivers/gpu/drm/amd/include/amd_acpi.h | |||
| @@ -52,6 +52,30 @@ struct atif_sbios_requests { | |||
| 52 | u8 backlight_level; /* panel backlight level (0-255) */ | 52 | u8 backlight_level; /* panel backlight level (0-255) */ |
| 53 | } __packed; | 53 | } __packed; |
| 54 | 54 | ||
| 55 | struct atif_qbtc_arguments { | ||
| 56 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 57 | u8 requested_display; /* which display is requested */ | ||
| 58 | } __packed; | ||
| 59 | |||
| 60 | #define ATIF_QBTC_MAX_DATA_POINTS 99 | ||
| 61 | |||
| 62 | struct atif_qbtc_data_point { | ||
| 63 | u8 luminance; /* luminance in percent */ | ||
| 64 | u8 ipnut_signal; /* input signal in range 0-255 */ | ||
| 65 | } __packed; | ||
| 66 | |||
| 67 | struct atif_qbtc_output { | ||
| 68 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 69 | u16 flags; /* all zeroes */ | ||
| 70 | u8 error_code; /* error code */ | ||
| 71 | u8 ac_level; /* default brightness on AC power */ | ||
| 72 | u8 dc_level; /* default brightness on DC power */ | ||
| 73 | u8 min_input_signal; /* max input signal in range 0-255 */ | ||
| 74 | u8 max_input_signal; /* min input signal in range 0-255 */ | ||
| 75 | u8 number_of_points; /* number of data points */ | ||
| 76 | struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS]; | ||
| 77 | } __packed; | ||
| 78 | |||
| 55 | #define ATIF_NOTIFY_MASK 0x3 | 79 | #define ATIF_NOTIFY_MASK 0x3 |
| 56 | #define ATIF_NOTIFY_NONE 0 | 80 | #define ATIF_NOTIFY_NONE 0 |
| 57 | #define ATIF_NOTIFY_81 1 | 81 | #define ATIF_NOTIFY_81 1 |
| @@ -126,26 +150,18 @@ struct atcs_pref_req_output { | |||
| 126 | * DWORD - supported functions bit vector | 150 | * DWORD - supported functions bit vector |
| 127 | */ | 151 | */ |
| 128 | /* Notifications mask */ | 152 | /* Notifications mask */ |
| 129 | # define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0) | ||
| 130 | # define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1) | ||
| 131 | # define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2) | 153 | # define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2) |
| 132 | # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3) | 154 | # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3) |
| 133 | # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4) | 155 | # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4) |
| 134 | # define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5) | ||
| 135 | # define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6) | ||
| 136 | # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7) | 156 | # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7) |
| 137 | # define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8) | 157 | # define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8) |
| 158 | # define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED (1 << 12) | ||
| 138 | /* supported functions vector */ | 159 | /* supported functions vector */ |
| 139 | # define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0) | 160 | # define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0) |
| 140 | # define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1) | 161 | # define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1) |
| 141 | # define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2) | ||
| 142 | # define ATIF_GET_LID_STATE_SUPPORTED (1 << 3) | ||
| 143 | # define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4) | ||
| 144 | # define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5) | ||
| 145 | # define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6) | ||
| 146 | # define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7) | ||
| 147 | # define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12) | 162 | # define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12) |
| 148 | # define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14) | 163 | # define ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED (1 << 15) |
| 164 | # define ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED (1 << 16) | ||
| 149 | # define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED (1 << 20) | 165 | # define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED (1 << 20) |
| 150 | #define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1 | 166 | #define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1 |
| 151 | /* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS | 167 | /* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS |
| @@ -170,6 +186,10 @@ struct atcs_pref_req_output { | |||
| 170 | * n (0xd0-0xd9) is specified in notify command code. | 186 | * n (0xd0-0xd9) is specified in notify command code. |
| 171 | * bit 2: | 187 | * bit 2: |
| 172 | * 1 - lid changes not reported though int10 | 188 | * 1 - lid changes not reported though int10 |
| 189 | * bit 3: | ||
| 190 | * 1 - system bios controls overclocking | ||
| 191 | * bit 4: | ||
| 192 | * 1 - enable overclocking | ||
| 173 | */ | 193 | */ |
| 174 | #define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2 | 194 | #define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2 |
| 175 | /* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS | 195 | /* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS |
| @@ -177,28 +197,23 @@ struct atcs_pref_req_output { | |||
| 177 | * OUTPUT: | 197 | * OUTPUT: |
| 178 | * WORD - structure size in bytes (includes size field) | 198 | * WORD - structure size in bytes (includes size field) |
| 179 | * DWORD - pending sbios requests | 199 | * DWORD - pending sbios requests |
| 180 | * BYTE - panel expansion mode | 200 | * BYTE - reserved (all zeroes) |
| 181 | * BYTE - thermal state: target gfx controller | 201 | * BYTE - thermal state: target gfx controller |
| 182 | * BYTE - thermal state: state id (0: exit state, non-0: state) | 202 | * BYTE - thermal state: state id (0: exit state, non-0: state) |
| 183 | * BYTE - forced power state: target gfx controller | 203 | * BYTE - forced power state: target gfx controller |
| 184 | * BYTE - forced power state: state id | 204 | * BYTE - forced power state: state id (0: forced state, non-0: state) |
| 185 | * BYTE - system power source | 205 | * BYTE - system power source |
| 186 | * BYTE - panel backlight level (0-255) | 206 | * BYTE - panel backlight level (0-255) |
| 207 | * BYTE - GPU package power limit: target gfx controller | ||
| 208 | * DWORD - GPU package power limit: value (24:8 fractional format, Watts) | ||
| 187 | */ | 209 | */ |
| 188 | /* pending sbios requests */ | 210 | /* pending sbios requests */ |
| 189 | # define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0) | ||
| 190 | # define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1) | ||
| 191 | # define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2) | 211 | # define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2) |
| 192 | # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3) | 212 | # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3) |
| 193 | # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4) | 213 | # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4) |
| 194 | # define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5) | ||
| 195 | # define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6) | ||
| 196 | # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7) | 214 | # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7) |
| 197 | # define ATIF_DGPU_DISPLAY_EVENT (1 << 8) | 215 | # define ATIF_DGPU_DISPLAY_EVENT (1 << 8) |
| 198 | /* panel expansion mode */ | 216 | # define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST (1 << 12) |
| 199 | # define ATIF_PANEL_EXPANSION_DISABLE 0 | ||
| 200 | # define ATIF_PANEL_EXPANSION_FULL 1 | ||
| 201 | # define ATIF_PANEL_EXPANSION_ASPECT 2 | ||
| 202 | /* target gfx controller */ | 217 | /* target gfx controller */ |
| 203 | # define ATIF_TARGET_GFX_SINGLE 0 | 218 | # define ATIF_TARGET_GFX_SINGLE 0 |
| 204 | # define ATIF_TARGET_GFX_PX_IGPU 1 | 219 | # define ATIF_TARGET_GFX_PX_IGPU 1 |
| @@ -208,76 +223,6 @@ struct atcs_pref_req_output { | |||
| 208 | # define ATIF_POWER_SOURCE_DC 2 | 223 | # define ATIF_POWER_SOURCE_DC 2 |
| 209 | # define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3 | 224 | # define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3 |
| 210 | # define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4 | 225 | # define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4 |
| 211 | #define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3 | ||
| 212 | /* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS | ||
| 213 | * ARG1: | ||
| 214 | * WORD - structure size in bytes (includes size field) | ||
| 215 | * WORD - selected displays | ||
| 216 | * WORD - connected displays | ||
| 217 | * OUTPUT: | ||
| 218 | * WORD - structure size in bytes (includes size field) | ||
| 219 | * WORD - selected displays | ||
| 220 | */ | ||
| 221 | # define ATIF_LCD1 (1 << 0) | ||
| 222 | # define ATIF_CRT1 (1 << 1) | ||
| 223 | # define ATIF_TV (1 << 2) | ||
| 224 | # define ATIF_DFP1 (1 << 3) | ||
| 225 | # define ATIF_CRT2 (1 << 4) | ||
| 226 | # define ATIF_LCD2 (1 << 5) | ||
| 227 | # define ATIF_DFP2 (1 << 7) | ||
| 228 | # define ATIF_CV (1 << 8) | ||
| 229 | # define ATIF_DFP3 (1 << 9) | ||
| 230 | # define ATIF_DFP4 (1 << 10) | ||
| 231 | # define ATIF_DFP5 (1 << 11) | ||
| 232 | # define ATIF_DFP6 (1 << 12) | ||
| 233 | #define ATIF_FUNCTION_GET_LID_STATE 0x4 | ||
| 234 | /* ARG0: ATIF_FUNCTION_GET_LID_STATE | ||
| 235 | * ARG1: none | ||
| 236 | * OUTPUT: | ||
| 237 | * WORD - structure size in bytes (includes size field) | ||
| 238 | * BYTE - lid state (0: open, 1: closed) | ||
| 239 | * | ||
| 240 | * GET_LID_STATE only works at boot and resume, for general lid | ||
| 241 | * status, use the kernel provided status | ||
| 242 | */ | ||
| 243 | #define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5 | ||
| 244 | /* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS | ||
| 245 | * ARG1: none | ||
| 246 | * OUTPUT: | ||
| 247 | * WORD - structure size in bytes (includes size field) | ||
| 248 | * BYTE - 0 | ||
| 249 | * BYTE - TV standard | ||
| 250 | */ | ||
| 251 | # define ATIF_TV_STD_NTSC 0 | ||
| 252 | # define ATIF_TV_STD_PAL 1 | ||
| 253 | # define ATIF_TV_STD_PALM 2 | ||
| 254 | # define ATIF_TV_STD_PAL60 3 | ||
| 255 | # define ATIF_TV_STD_NTSCJ 4 | ||
| 256 | # define ATIF_TV_STD_PALCN 5 | ||
| 257 | # define ATIF_TV_STD_PALN 6 | ||
| 258 | # define ATIF_TV_STD_SCART_RGB 9 | ||
| 259 | #define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6 | ||
| 260 | /* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS | ||
| 261 | * ARG1: | ||
| 262 | * WORD - structure size in bytes (includes size field) | ||
| 263 | * BYTE - 0 | ||
| 264 | * BYTE - TV standard | ||
| 265 | * OUTPUT: none | ||
| 266 | */ | ||
| 267 | #define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7 | ||
| 268 | /* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS | ||
| 269 | * ARG1: none | ||
| 270 | * OUTPUT: | ||
| 271 | * WORD - structure size in bytes (includes size field) | ||
| 272 | * BYTE - panel expansion mode | ||
| 273 | */ | ||
| 274 | #define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8 | ||
| 275 | /* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS | ||
| 276 | * ARG1: | ||
| 277 | * WORD - structure size in bytes (includes size field) | ||
| 278 | * BYTE - panel expansion mode | ||
| 279 | * OUTPUT: none | ||
| 280 | */ | ||
| 281 | #define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD | 226 | #define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD |
| 282 | /* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION | 227 | /* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION |
| 283 | * ARG1: | 228 | * ARG1: |
| @@ -286,21 +231,43 @@ struct atcs_pref_req_output { | |||
| 286 | * BYTE - current temperature (degress Celsius) | 231 | * BYTE - current temperature (degress Celsius) |
| 287 | * OUTPUT: none | 232 | * OUTPUT: none |
| 288 | */ | 233 | */ |
| 289 | #define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF | 234 | #define ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS 0x10 |
| 290 | /* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES | 235 | /* ARG0: ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS |
| 291 | * ARG1: none | 236 | * ARG1: |
| 237 | * WORD - structure size in bytes (includes size field) | ||
| 238 | * BYTE - requested display | ||
| 292 | * OUTPUT: | 239 | * OUTPUT: |
| 293 | * WORD - number of gfx devices | 240 | * WORD - structure size in bytes (includes size field) |
| 294 | * WORD - device structure size in bytes (excludes device size field) | 241 | * WORD - flags (currently all 16 bits are reserved) |
| 295 | * DWORD - flags \ | 242 | * BYTE - error code (on failure, disregard all below fields) |
| 296 | * WORD - bus number } repeated structure | 243 | * BYTE - AC level (default brightness in percent when machine has full power) |
| 297 | * WORD - device number / | 244 | * BYTE - DC level (default brightness in percent when machine is on battery) |
| 245 | * BYTE - min input signal, in range 0-255, corresponding to 0% backlight | ||
| 246 | * BYTE - max input signal, in range 0-255, corresponding to 100% backlight | ||
| 247 | * BYTE - number of reported data points | ||
| 248 | * BYTE - luminance level in percent \ repeated structure | ||
| 249 | * BYTE - input signal in range 0-255 / does not have entries for 0% and 100% | ||
| 250 | */ | ||
| 251 | /* requested display */ | ||
| 252 | # define ATIF_QBTC_REQUEST_LCD1 0 | ||
| 253 | # define ATIF_QBTC_REQUEST_CRT1 1 | ||
| 254 | # define ATIF_QBTC_REQUEST_DFP1 3 | ||
| 255 | # define ATIF_QBTC_REQUEST_CRT2 4 | ||
| 256 | # define ATIF_QBTC_REQUEST_LCD2 5 | ||
| 257 | # define ATIF_QBTC_REQUEST_DFP2 7 | ||
| 258 | # define ATIF_QBTC_REQUEST_DFP3 9 | ||
| 259 | # define ATIF_QBTC_REQUEST_DFP4 10 | ||
| 260 | # define ATIF_QBTC_REQUEST_DFP5 11 | ||
| 261 | # define ATIF_QBTC_REQUEST_DFP6 12 | ||
| 262 | /* error code */ | ||
| 263 | # define ATIF_QBTC_ERROR_CODE_SUCCESS 0 | ||
| 264 | # define ATIF_QBTC_ERROR_CODE_FAILURE 1 | ||
| 265 | # define ATIF_QBTC_ERROR_CODE_DEVICE_NOT_SUPPORTED 2 | ||
| 266 | #define ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION 0x11 | ||
| 267 | /* ARG0: ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION | ||
| 268 | * ARG1: none | ||
| 269 | * OUTPUT: none | ||
| 298 | */ | 270 | */ |
| 299 | /* flags */ | ||
| 300 | # define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0) | ||
| 301 | # define ATIF_XGP_PORT (1 << 1) | ||
| 302 | # define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2) | ||
| 303 | # define ATIF_XGP_PORT_IN_DOCK (1 << 3) | ||
| 304 | #define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION 0x15 | 271 | #define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION 0x15 |
| 305 | /* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION | 272 | /* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION |
| 306 | * ARG1: none | 273 | * ARG1: none |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index d6aa1d414320..b68c2e0fef01 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
| @@ -300,7 +300,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) | |||
| 300 | return -EINVAL; | 300 | return -EINVAL; |
| 301 | 301 | ||
| 302 | if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { | 302 | if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { |
| 303 | pr_info("%s was not implemented.\n", __func__); | 303 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 304 | return 0; | 304 | return 0; |
| 305 | } | 305 | } |
| 306 | 306 | ||
| @@ -387,7 +387,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low) | |||
| 387 | return 0; | 387 | return 0; |
| 388 | 388 | ||
| 389 | if (hwmgr->hwmgr_func->get_sclk == NULL) { | 389 | if (hwmgr->hwmgr_func->get_sclk == NULL) { |
| 390 | pr_info("%s was not implemented.\n", __func__); | 390 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 391 | return 0; | 391 | return 0; |
| 392 | } | 392 | } |
| 393 | mutex_lock(&hwmgr->smu_lock); | 393 | mutex_lock(&hwmgr->smu_lock); |
| @@ -405,7 +405,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low) | |||
| 405 | return 0; | 405 | return 0; |
| 406 | 406 | ||
| 407 | if (hwmgr->hwmgr_func->get_mclk == NULL) { | 407 | if (hwmgr->hwmgr_func->get_mclk == NULL) { |
| 408 | pr_info("%s was not implemented.\n", __func__); | 408 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 409 | return 0; | 409 | return 0; |
| 410 | } | 410 | } |
| 411 | mutex_lock(&hwmgr->smu_lock); | 411 | mutex_lock(&hwmgr->smu_lock); |
| @@ -422,7 +422,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate) | |||
| 422 | return; | 422 | return; |
| 423 | 423 | ||
| 424 | if (hwmgr->hwmgr_func->powergate_vce == NULL) { | 424 | if (hwmgr->hwmgr_func->powergate_vce == NULL) { |
| 425 | pr_info("%s was not implemented.\n", __func__); | 425 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 426 | return; | 426 | return; |
| 427 | } | 427 | } |
| 428 | mutex_lock(&hwmgr->smu_lock); | 428 | mutex_lock(&hwmgr->smu_lock); |
| @@ -438,7 +438,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate) | |||
| 438 | return; | 438 | return; |
| 439 | 439 | ||
| 440 | if (hwmgr->hwmgr_func->powergate_uvd == NULL) { | 440 | if (hwmgr->hwmgr_func->powergate_uvd == NULL) { |
| 441 | pr_info("%s was not implemented.\n", __func__); | 441 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 442 | return; | 442 | return; |
| 443 | } | 443 | } |
| 444 | mutex_lock(&hwmgr->smu_lock); | 444 | mutex_lock(&hwmgr->smu_lock); |
| @@ -505,7 +505,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) | |||
| 505 | return; | 505 | return; |
| 506 | 506 | ||
| 507 | if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { | 507 | if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { |
| 508 | pr_info("%s was not implemented.\n", __func__); | 508 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 509 | return; | 509 | return; |
| 510 | } | 510 | } |
| 511 | mutex_lock(&hwmgr->smu_lock); | 511 | mutex_lock(&hwmgr->smu_lock); |
| @@ -522,7 +522,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle) | |||
| 522 | return 0; | 522 | return 0; |
| 523 | 523 | ||
| 524 | if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { | 524 | if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { |
| 525 | pr_info("%s was not implemented.\n", __func__); | 525 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 526 | return 0; | 526 | return 0; |
| 527 | } | 527 | } |
| 528 | mutex_lock(&hwmgr->smu_lock); | 528 | mutex_lock(&hwmgr->smu_lock); |
| @@ -540,7 +540,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) | |||
| 540 | return -EINVAL; | 540 | return -EINVAL; |
| 541 | 541 | ||
| 542 | if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { | 542 | if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { |
| 543 | pr_info("%s was not implemented.\n", __func__); | 543 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 544 | return 0; | 544 | return 0; |
| 545 | } | 545 | } |
| 546 | mutex_lock(&hwmgr->smu_lock); | 546 | mutex_lock(&hwmgr->smu_lock); |
| @@ -558,7 +558,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) | |||
| 558 | return -EINVAL; | 558 | return -EINVAL; |
| 559 | 559 | ||
| 560 | if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { | 560 | if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { |
| 561 | pr_info("%s was not implemented.\n", __func__); | 561 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 562 | return 0; | 562 | return 0; |
| 563 | } | 563 | } |
| 564 | 564 | ||
| @@ -594,7 +594,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) | |||
| 594 | return -EINVAL; | 594 | return -EINVAL; |
| 595 | 595 | ||
| 596 | if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) { | 596 | if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) { |
| 597 | pr_info("%s was not implemented.\n", __func__); | 597 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 598 | return 0; | 598 | return 0; |
| 599 | } | 599 | } |
| 600 | mutex_lock(&hwmgr->smu_lock); | 600 | mutex_lock(&hwmgr->smu_lock); |
| @@ -720,7 +720,7 @@ static int pp_dpm_force_clock_level(void *handle, | |||
| 720 | return -EINVAL; | 720 | return -EINVAL; |
| 721 | 721 | ||
| 722 | if (hwmgr->hwmgr_func->force_clock_level == NULL) { | 722 | if (hwmgr->hwmgr_func->force_clock_level == NULL) { |
| 723 | pr_info("%s was not implemented.\n", __func__); | 723 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 724 | return 0; | 724 | return 0; |
| 725 | } | 725 | } |
| 726 | 726 | ||
| @@ -745,7 +745,7 @@ static int pp_dpm_print_clock_levels(void *handle, | |||
| 745 | return -EINVAL; | 745 | return -EINVAL; |
| 746 | 746 | ||
| 747 | if (hwmgr->hwmgr_func->print_clock_levels == NULL) { | 747 | if (hwmgr->hwmgr_func->print_clock_levels == NULL) { |
| 748 | pr_info("%s was not implemented.\n", __func__); | 748 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 749 | return 0; | 749 | return 0; |
| 750 | } | 750 | } |
| 751 | mutex_lock(&hwmgr->smu_lock); | 751 | mutex_lock(&hwmgr->smu_lock); |
| @@ -763,7 +763,7 @@ static int pp_dpm_get_sclk_od(void *handle) | |||
| 763 | return -EINVAL; | 763 | return -EINVAL; |
| 764 | 764 | ||
| 765 | if (hwmgr->hwmgr_func->get_sclk_od == NULL) { | 765 | if (hwmgr->hwmgr_func->get_sclk_od == NULL) { |
| 766 | pr_info("%s was not implemented.\n", __func__); | 766 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 767 | return 0; | 767 | return 0; |
| 768 | } | 768 | } |
| 769 | mutex_lock(&hwmgr->smu_lock); | 769 | mutex_lock(&hwmgr->smu_lock); |
| @@ -781,7 +781,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) | |||
| 781 | return -EINVAL; | 781 | return -EINVAL; |
| 782 | 782 | ||
| 783 | if (hwmgr->hwmgr_func->set_sclk_od == NULL) { | 783 | if (hwmgr->hwmgr_func->set_sclk_od == NULL) { |
| 784 | pr_info("%s was not implemented.\n", __func__); | 784 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 785 | return 0; | 785 | return 0; |
| 786 | } | 786 | } |
| 787 | 787 | ||
| @@ -800,7 +800,7 @@ static int pp_dpm_get_mclk_od(void *handle) | |||
| 800 | return -EINVAL; | 800 | return -EINVAL; |
| 801 | 801 | ||
| 802 | if (hwmgr->hwmgr_func->get_mclk_od == NULL) { | 802 | if (hwmgr->hwmgr_func->get_mclk_od == NULL) { |
| 803 | pr_info("%s was not implemented.\n", __func__); | 803 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 804 | return 0; | 804 | return 0; |
| 805 | } | 805 | } |
| 806 | mutex_lock(&hwmgr->smu_lock); | 806 | mutex_lock(&hwmgr->smu_lock); |
| @@ -818,7 +818,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) | |||
| 818 | return -EINVAL; | 818 | return -EINVAL; |
| 819 | 819 | ||
| 820 | if (hwmgr->hwmgr_func->set_mclk_od == NULL) { | 820 | if (hwmgr->hwmgr_func->set_mclk_od == NULL) { |
| 821 | pr_info("%s was not implemented.\n", __func__); | 821 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 822 | return 0; | 822 | return 0; |
| 823 | } | 823 | } |
| 824 | mutex_lock(&hwmgr->smu_lock); | 824 | mutex_lock(&hwmgr->smu_lock); |
| @@ -878,7 +878,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf) | |||
| 878 | return -EINVAL; | 878 | return -EINVAL; |
| 879 | 879 | ||
| 880 | if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { | 880 | if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { |
| 881 | pr_info("%s was not implemented.\n", __func__); | 881 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 882 | return snprintf(buf, PAGE_SIZE, "\n"); | 882 | return snprintf(buf, PAGE_SIZE, "\n"); |
| 883 | } | 883 | } |
| 884 | 884 | ||
| @@ -894,7 +894,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) | |||
| 894 | return ret; | 894 | return ret; |
| 895 | 895 | ||
| 896 | if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { | 896 | if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { |
| 897 | pr_info("%s was not implemented.\n", __func__); | 897 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 898 | return ret; | 898 | return ret; |
| 899 | } | 899 | } |
| 900 | 900 | ||
| @@ -917,7 +917,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3 | |||
| 917 | return -EINVAL; | 917 | return -EINVAL; |
| 918 | 918 | ||
| 919 | if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { | 919 | if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { |
| 920 | pr_info("%s was not implemented.\n", __func__); | 920 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 921 | return -EINVAL; | 921 | return -EINVAL; |
| 922 | } | 922 | } |
| 923 | 923 | ||
| @@ -935,7 +935,7 @@ static int pp_dpm_switch_power_profile(void *handle, | |||
| 935 | return -EINVAL; | 935 | return -EINVAL; |
| 936 | 936 | ||
| 937 | if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { | 937 | if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { |
| 938 | pr_info("%s was not implemented.\n", __func__); | 938 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 939 | return -EINVAL; | 939 | return -EINVAL; |
| 940 | } | 940 | } |
| 941 | 941 | ||
| @@ -972,7 +972,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit) | |||
| 972 | return -EINVAL; | 972 | return -EINVAL; |
| 973 | 973 | ||
| 974 | if (hwmgr->hwmgr_func->set_power_limit == NULL) { | 974 | if (hwmgr->hwmgr_func->set_power_limit == NULL) { |
| 975 | pr_info("%s was not implemented.\n", __func__); | 975 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 976 | return -EINVAL; | 976 | return -EINVAL; |
| 977 | } | 977 | } |
| 978 | 978 | ||
| @@ -1212,7 +1212,7 @@ static int pp_dpm_powergate_mmhub(void *handle) | |||
| 1212 | return -EINVAL; | 1212 | return -EINVAL; |
| 1213 | 1213 | ||
| 1214 | if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { | 1214 | if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { |
| 1215 | pr_info("%s was not implemented.\n", __func__); | 1215 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 1216 | return 0; | 1216 | return 0; |
| 1217 | } | 1217 | } |
| 1218 | 1218 | ||
| @@ -1227,7 +1227,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate) | |||
| 1227 | return 0; | 1227 | return 0; |
| 1228 | 1228 | ||
| 1229 | if (hwmgr->hwmgr_func->powergate_gfx == NULL) { | 1229 | if (hwmgr->hwmgr_func->powergate_gfx == NULL) { |
| 1230 | pr_info("%s was not implemented.\n", __func__); | 1230 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 1231 | return 0; | 1231 | return 0; |
| 1232 | } | 1232 | } |
| 1233 | 1233 | ||
| @@ -1242,7 +1242,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate) | |||
| 1242 | return; | 1242 | return; |
| 1243 | 1243 | ||
| 1244 | if (hwmgr->hwmgr_func->powergate_acp == NULL) { | 1244 | if (hwmgr->hwmgr_func->powergate_acp == NULL) { |
| 1245 | pr_info("%s was not implemented.\n", __func__); | 1245 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 1246 | return; | 1246 | return; |
| 1247 | } | 1247 | } |
| 1248 | 1248 | ||
| @@ -1257,7 +1257,7 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate) | |||
| 1257 | return; | 1257 | return; |
| 1258 | 1258 | ||
| 1259 | if (hwmgr->hwmgr_func->powergate_sdma == NULL) { | 1259 | if (hwmgr->hwmgr_func->powergate_sdma == NULL) { |
| 1260 | pr_info("%s was not implemented.\n", __func__); | 1260 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 1261 | return; | 1261 | return; |
| 1262 | } | 1262 | } |
| 1263 | 1263 | ||
| @@ -1303,7 +1303,7 @@ static int pp_notify_smu_enable_pwe(void *handle) | |||
| 1303 | return -EINVAL; | 1303 | return -EINVAL; |
| 1304 | 1304 | ||
| 1305 | if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { | 1305 | if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { |
| 1306 | pr_info("%s was not implemented.\n", __func__); | 1306 | pr_info_ratelimited("%s was not implemented.\n", __func__); |
| 1307 | return -EINVAL;; | 1307 | return -EINVAL;; |
| 1308 | } | 1308 | } |
| 1309 | 1309 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 88f6b35ea6fe..5dcd21d29dbf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
| @@ -269,7 +269,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) | |||
| 269 | hwmgr->dyn_state.mvdd_dependency_on_mclk); | 269 | hwmgr->dyn_state.mvdd_dependency_on_mclk); |
| 270 | 270 | ||
| 271 | PP_ASSERT_WITH_CODE((0 == result), | 271 | PP_ASSERT_WITH_CODE((0 == result), |
| 272 | "Failed to retrieve SVI2 MVDD table from dependancy table.", | 272 | "Failed to retrieve SVI2 MVDD table from dependency table.", |
| 273 | return result;); | 273 | return result;); |
| 274 | } | 274 | } |
| 275 | 275 | ||
| @@ -288,7 +288,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) | |||
| 288 | result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), | 288 | result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), |
| 289 | hwmgr->dyn_state.vddci_dependency_on_mclk); | 289 | hwmgr->dyn_state.vddci_dependency_on_mclk); |
| 290 | PP_ASSERT_WITH_CODE((0 == result), | 290 | PP_ASSERT_WITH_CODE((0 == result), |
| 291 | "Failed to retrieve SVI2 VDDCI table from dependancy table.", | 291 | "Failed to retrieve SVI2 VDDCI table from dependency table.", |
| 292 | return result); | 292 | return result); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| @@ -317,7 +317,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) | |||
| 317 | table_info->vddc_lookup_table); | 317 | table_info->vddc_lookup_table); |
| 318 | 318 | ||
| 319 | PP_ASSERT_WITH_CODE((0 == result), | 319 | PP_ASSERT_WITH_CODE((0 == result), |
| 320 | "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); | 320 | "Failed to retrieve SVI2 VDDC table from dependency table.", return result;); |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); | 323 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index fef111ddb736..553a203ac47c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | |||
| @@ -1228,17 +1228,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, | |||
| 1228 | 1228 | ||
| 1229 | static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) | 1229 | static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) |
| 1230 | { | 1230 | { |
| 1231 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { | 1231 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) |
| 1232 | smu8_nbdpm_pstate_enable_disable(hwmgr, true, true); | ||
| 1233 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); | 1232 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); |
| 1234 | } | ||
| 1235 | return 0; | 1233 | return 0; |
| 1236 | } | 1234 | } |
| 1237 | 1235 | ||
| 1238 | static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) | 1236 | static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) |
| 1239 | { | 1237 | { |
| 1240 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { | 1238 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { |
| 1241 | smu8_nbdpm_pstate_enable_disable(hwmgr, false, true); | ||
| 1242 | return smum_send_msg_to_smc_with_parameter( | 1239 | return smum_send_msg_to_smc_with_parameter( |
| 1243 | hwmgr, | 1240 | hwmgr, |
| 1244 | PPSMC_MSG_UVDPowerON, | 1241 | PPSMC_MSG_UVDPowerON, |
| @@ -1995,6 +1992,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = { | |||
| 1995 | .power_state_set = smu8_set_power_state_tasks, | 1992 | .power_state_set = smu8_set_power_state_tasks, |
| 1996 | .dynamic_state_management_disable = smu8_disable_dpm_tasks, | 1993 | .dynamic_state_management_disable = smu8_disable_dpm_tasks, |
| 1997 | .notify_cac_buffer_info = smu8_notify_cac_buffer_info, | 1994 | .notify_cac_buffer_info = smu8_notify_cac_buffer_info, |
| 1995 | .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable, | ||
| 1998 | .get_thermal_temperature_range = smu8_get_thermal_temperature_range, | 1996 | .get_thermal_temperature_range = smu8_get_thermal_temperature_range, |
| 1999 | }; | 1997 | }; |
| 2000 | 1998 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 07d180ce4d18..fb0f96f7cdbc 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
| @@ -317,6 +317,9 @@ struct pp_hwmgr_func { | |||
| 317 | uint32_t mc_addr_low, | 317 | uint32_t mc_addr_low, |
| 318 | uint32_t mc_addr_hi, | 318 | uint32_t mc_addr_hi, |
| 319 | uint32_t size); | 319 | uint32_t size); |
| 320 | int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr, | ||
| 321 | bool enable, | ||
| 322 | bool lock); | ||
| 320 | int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr, | 323 | int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr, |
| 321 | struct PP_TemperatureRange *range); | 324 | struct PP_TemperatureRange *range); |
| 322 | int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf); | 325 | int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf); |
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 86ac33922b09..30a6d1edd5fe 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c | |||
| @@ -433,6 +433,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
| 433 | ret = drm_atomic_set_mode_prop_for_crtc(state, mode); | 433 | ret = drm_atomic_set_mode_prop_for_crtc(state, mode); |
| 434 | drm_property_blob_put(mode); | 434 | drm_property_blob_put(mode); |
| 435 | return ret; | 435 | return ret; |
| 436 | } else if (property == config->prop_vrr_enabled) { | ||
| 437 | state->vrr_enabled = val; | ||
| 436 | } else if (property == config->degamma_lut_property) { | 438 | } else if (property == config->degamma_lut_property) { |
| 437 | ret = drm_atomic_replace_property_blob_from_id(dev, | 439 | ret = drm_atomic_replace_property_blob_from_id(dev, |
| 438 | &state->degamma_lut, | 440 | &state->degamma_lut, |
| @@ -491,6 +493,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, | |||
| 491 | *val = state->active; | 493 | *val = state->active; |
| 492 | else if (property == config->prop_mode_id) | 494 | else if (property == config->prop_mode_id) |
| 493 | *val = (state->mode_blob) ? state->mode_blob->base.id : 0; | 495 | *val = (state->mode_blob) ? state->mode_blob->base.id : 0; |
| 496 | else if (property == config->prop_vrr_enabled) | ||
| 497 | *val = state->vrr_enabled; | ||
| 494 | else if (property == config->degamma_lut_property) | 498 | else if (property == config->degamma_lut_property) |
| 495 | *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; | 499 | *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; |
| 496 | else if (property == config->ctm_property) | 500 | else if (property == config->ctm_property) |
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index fa9baacc863b..da8ae80c2750 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
| @@ -1279,6 +1279,105 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev) | |||
| 1279 | EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); | 1279 | EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); |
| 1280 | 1280 | ||
| 1281 | /** | 1281 | /** |
| 1282 | * DOC: Variable refresh properties | ||
| 1283 | * | ||
| 1284 | * Variable refresh rate capable displays can dynamically adjust their | ||
| 1285 | * refresh rate by extending the duration of their vertical front porch | ||
| 1286 | * until page flip or timeout occurs. This can reduce or remove stuttering | ||
| 1287 | * and latency in scenarios where the page flip does not align with the | ||
| 1288 | * vblank interval. | ||
| 1289 | * | ||
| 1290 | * An example scenario would be an application flipping at a constant rate | ||
| 1291 | * of 48Hz on a 60Hz display. The page flip will frequently miss the vblank | ||
| 1292 | * interval and the same contents will be displayed twice. This can be | ||
| 1293 | * observed as stuttering for content with motion. | ||
| 1294 | * | ||
| 1295 | * If variable refresh rate was active on a display that supported a | ||
| 1296 | * variable refresh range from 35Hz to 60Hz no stuttering would be observable | ||
| 1297 | * for the example scenario. The minimum supported variable refresh rate of | ||
| 1298 | * 35Hz is below the page flip frequency and the vertical front porch can | ||
| 1299 | * be extended until the page flip occurs. The vblank interval will be | ||
| 1300 | * directly aligned to the page flip rate. | ||
| 1301 | * | ||
| 1302 | * Not all userspace content is suitable for use with variable refresh rate. | ||
| 1303 | * Large and frequent changes in vertical front porch duration may worsen | ||
| 1304 | * perceived stuttering for input sensitive applications. | ||
| 1305 | * | ||
| 1306 | * Panel brightness will also vary with vertical front porch duration. Some | ||
| 1307 | * panels may have noticeable differences in brightness between the minimum | ||
| 1308 | * vertical front porch duration and the maximum vertical front porch duration. | ||
| 1309 | * Large and frequent changes in vertical front porch duration may produce | ||
| 1310 | * observable flickering for such panels. | ||
| 1311 | * | ||
| 1312 | * Userspace control for variable refresh rate is supported via properties | ||
| 1313 | * on the &drm_connector and &drm_crtc objects. | ||
| 1314 | * | ||
| 1315 | * "vrr_capable": | ||
| 1316 | * Optional &drm_connector boolean property that drivers should attach | ||
| 1317 | * with drm_connector_attach_vrr_capable_property() on connectors that | ||
| 1318 | * could support variable refresh rates. Drivers should update the | ||
| 1319 | * property value by calling drm_connector_set_vrr_capable_property(). | ||
| 1320 | * | ||
| 1321 | * Absence of the property should indicate absence of support. | ||
| 1322 | * | ||
| 1323 | * "vrr_enabled": | ||
| 1324 | * Default &drm_crtc boolean property that notifies the driver that the | ||
| 1325 | * content on the CRTC is suitable for variable refresh rate presentation. | ||
| 1326 | * The driver will take this property as a hint to enable variable | ||
| 1327 | * refresh rate support if the receiver supports it, ie. if the | ||
| 1328 | * "vrr_capable" property is true on the &drm_connector object. The | ||
| 1329 | * vertical front porch duration will be extended until page-flip or | ||
| 1330 | * timeout when enabled. | ||
| 1331 | * | ||
| 1332 | * The minimum vertical front porch duration is defined as the vertical | ||
| 1333 | * front porch duration for the current mode. | ||
| 1334 | * | ||
| 1335 | * The maximum vertical front porch duration is greater than or equal to | ||
| 1336 | * the minimum vertical front porch duration. The duration is derived | ||
| 1337 | * from the minimum supported variable refresh rate for the connector. | ||
| 1338 | * | ||
| 1339 | * The driver may place further restrictions within these minimum | ||
| 1340 | * and maximum bounds. | ||
| 1341 | * | ||
| 1342 | * The semantics for the vertical blank timestamp differ when | ||
| 1343 | * variable refresh rate is active. The vertical blank timestamp | ||
| 1344 | * is defined to be an estimate using the current mode's fixed | ||
| 1345 | * refresh rate timings. The semantics for the page-flip event | ||
| 1346 | * timestamp remain the same. | ||
| 1347 | */ | ||
| 1348 | |||
| 1349 | /** | ||
| 1350 | * drm_connector_attach_vrr_capable_property - creates the | ||
| 1351 | * vrr_capable property | ||
| 1352 | * @connector: connector to create the vrr_capable property on. | ||
| 1353 | * | ||
| 1354 | * This is used by atomic drivers to add support for querying | ||
| 1355 | * variable refresh rate capability for a connector. | ||
| 1356 | * | ||
| 1357 | * Returns: | ||
| 1358 | * Zero on success, negative errono on failure. | ||
| 1359 | */ | ||
| 1360 | int drm_connector_attach_vrr_capable_property( | ||
| 1361 | struct drm_connector *connector) | ||
| 1362 | { | ||
| 1363 | struct drm_device *dev = connector->dev; | ||
| 1364 | struct drm_property *prop; | ||
| 1365 | |||
| 1366 | if (!connector->vrr_capable_property) { | ||
| 1367 | prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, | ||
| 1368 | "vrr_capable"); | ||
| 1369 | if (!prop) | ||
| 1370 | return -ENOMEM; | ||
| 1371 | |||
| 1372 | connector->vrr_capable_property = prop; | ||
| 1373 | drm_object_attach_property(&connector->base, prop, 0); | ||
| 1374 | } | ||
| 1375 | |||
| 1376 | return 0; | ||
| 1377 | } | ||
| 1378 | EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property); | ||
| 1379 | |||
| 1380 | /** | ||
| 1282 | * drm_connector_attach_scaling_mode_property - attach atomic scaling mode property | 1381 | * drm_connector_attach_scaling_mode_property - attach atomic scaling mode property |
| 1283 | * @connector: connector to attach scaling mode property on. | 1382 | * @connector: connector to attach scaling mode property on. |
| 1284 | * @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*). | 1383 | * @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*). |
| @@ -1641,6 +1740,24 @@ int drm_connector_attach_max_bpc_property(struct drm_connector *connector, | |||
| 1641 | EXPORT_SYMBOL(drm_connector_attach_max_bpc_property); | 1740 | EXPORT_SYMBOL(drm_connector_attach_max_bpc_property); |
| 1642 | 1741 | ||
| 1643 | /** | 1742 | /** |
| 1743 | * drm_connector_set_vrr_capable_property - sets the variable refresh rate | ||
| 1744 | * capable property for a connector | ||
| 1745 | * @connector: drm connector | ||
| 1746 | * @capable: True if the connector is variable refresh rate capable | ||
| 1747 | * | ||
| 1748 | * Should be used by atomic drivers to update the indicated support for | ||
| 1749 | * variable refresh rate over a connector. | ||
| 1750 | */ | ||
| 1751 | void drm_connector_set_vrr_capable_property( | ||
| 1752 | struct drm_connector *connector, bool capable) | ||
| 1753 | { | ||
| 1754 | drm_object_property_set_value(&connector->base, | ||
| 1755 | connector->vrr_capable_property, | ||
| 1756 | capable); | ||
| 1757 | } | ||
| 1758 | EXPORT_SYMBOL(drm_connector_set_vrr_capable_property); | ||
| 1759 | |||
| 1760 | /** | ||
| 1644 | * drm_connector_init_panel_orientation_property - | 1761 | * drm_connector_init_panel_orientation_property - |
| 1645 | * initialize the connecters panel_orientation property | 1762 | * initialize the connecters panel_orientation property |
| 1646 | * @connector: connector for which to init the panel-orientation property. | 1763 | * @connector: connector for which to init the panel-orientation property. |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 268a182ae189..6f8ddfcfaba5 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -340,6 +340,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, | |||
| 340 | drm_object_attach_property(&crtc->base, config->prop_mode_id, 0); | 340 | drm_object_attach_property(&crtc->base, config->prop_mode_id, 0); |
| 341 | drm_object_attach_property(&crtc->base, | 341 | drm_object_attach_property(&crtc->base, |
| 342 | config->prop_out_fence_ptr, 0); | 342 | config->prop_out_fence_ptr, 0); |
| 343 | drm_object_attach_property(&crtc->base, | ||
| 344 | config->prop_vrr_enabled, 0); | ||
| 343 | } | 345 | } |
| 344 | 346 | ||
| 345 | return 0; | 347 | return 0; |
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index ee80788f2c40..5670c67f28d4 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c | |||
| @@ -310,6 +310,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) | |||
| 310 | return -ENOMEM; | 310 | return -ENOMEM; |
| 311 | dev->mode_config.prop_mode_id = prop; | 311 | dev->mode_config.prop_mode_id = prop; |
| 312 | 312 | ||
| 313 | prop = drm_property_create_bool(dev, 0, | ||
| 314 | "VRR_ENABLED"); | ||
| 315 | if (!prop) | ||
| 316 | return -ENOMEM; | ||
| 317 | dev->mode_config.prop_vrr_enabled = prop; | ||
| 318 | |||
| 313 | prop = drm_property_create(dev, | 319 | prop = drm_property_create(dev, |
| 314 | DRM_MODE_PROP_BLOB, | 320 | DRM_MODE_PROP_BLOB, |
| 315 | "DEGAMMA_LUT", 0); | 321 | "DEGAMMA_LUT", 0); |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index fed11ece0de6..a3d2ca07a058 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
| @@ -946,7 +946,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
| 946 | bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; | 946 | bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; |
| 947 | 947 | ||
| 948 | if (mem) { | 948 | if (mem) { |
| 949 | addr = mem->start << PAGE_SHIFT; | 949 | addr = (u64)mem->start << PAGE_SHIFT; |
| 950 | if (mem->mem_type != TTM_PL_SYSTEM) { | 950 | if (mem->mem_type != TTM_PL_SYSTEM) { |
| 951 | bo_va->flags |= RADEON_VM_PAGE_VALID; | 951 | bo_va->flags |= RADEON_VM_PAGE_VALID; |
| 952 | } | 952 | } |
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 18ebbb05762e..9d4cd196037a 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c | |||
| @@ -60,6 +60,8 @@ | |||
| 60 | 60 | ||
| 61 | static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); | 61 | static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); |
| 62 | 62 | ||
| 63 | static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job); | ||
| 64 | |||
| 63 | /** | 65 | /** |
| 64 | * drm_sched_rq_init - initialize a given run queue struct | 66 | * drm_sched_rq_init - initialize a given run queue struct |
| 65 | * | 67 | * |
| @@ -228,7 +230,7 @@ static void drm_sched_job_finish(struct work_struct *work) | |||
| 228 | 230 | ||
| 229 | spin_lock(&sched->job_list_lock); | 231 | spin_lock(&sched->job_list_lock); |
| 230 | /* remove job from ring_mirror_list */ | 232 | /* remove job from ring_mirror_list */ |
| 231 | list_del(&s_job->node); | 233 | list_del_init(&s_job->node); |
| 232 | /* queue TDR for next job */ | 234 | /* queue TDR for next job */ |
| 233 | drm_sched_start_timeout(sched); | 235 | drm_sched_start_timeout(sched); |
| 234 | spin_unlock(&sched->job_list_lock); | 236 | spin_unlock(&sched->job_list_lock); |
| @@ -261,40 +263,15 @@ static void drm_sched_job_timedout(struct work_struct *work) | |||
| 261 | { | 263 | { |
| 262 | struct drm_gpu_scheduler *sched; | 264 | struct drm_gpu_scheduler *sched; |
| 263 | struct drm_sched_job *job; | 265 | struct drm_sched_job *job; |
| 264 | int r; | ||
| 265 | 266 | ||
| 266 | sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); | 267 | sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); |
| 267 | |||
| 268 | spin_lock(&sched->job_list_lock); | ||
| 269 | list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) { | ||
| 270 | struct drm_sched_fence *fence = job->s_fence; | ||
| 271 | |||
| 272 | if (!dma_fence_remove_callback(fence->parent, &fence->cb)) | ||
| 273 | goto already_signaled; | ||
| 274 | } | ||
| 275 | |||
| 276 | job = list_first_entry_or_null(&sched->ring_mirror_list, | 268 | job = list_first_entry_or_null(&sched->ring_mirror_list, |
| 277 | struct drm_sched_job, node); | 269 | struct drm_sched_job, node); |
| 278 | spin_unlock(&sched->job_list_lock); | ||
| 279 | 270 | ||
| 280 | if (job) | 271 | if (job) |
| 281 | sched->ops->timedout_job(job); | 272 | job->sched->ops->timedout_job(job); |
| 282 | 273 | ||
| 283 | spin_lock(&sched->job_list_lock); | 274 | spin_lock(&sched->job_list_lock); |
| 284 | list_for_each_entry(job, &sched->ring_mirror_list, node) { | ||
| 285 | struct drm_sched_fence *fence = job->s_fence; | ||
| 286 | |||
| 287 | if (!fence->parent || !list_empty(&fence->cb.node)) | ||
| 288 | continue; | ||
| 289 | |||
| 290 | r = dma_fence_add_callback(fence->parent, &fence->cb, | ||
| 291 | drm_sched_process_job); | ||
| 292 | if (r) | ||
| 293 | drm_sched_process_job(fence->parent, &fence->cb); | ||
| 294 | |||
| 295 | already_signaled: | ||
| 296 | ; | ||
| 297 | } | ||
| 298 | drm_sched_start_timeout(sched); | 275 | drm_sched_start_timeout(sched); |
| 299 | spin_unlock(&sched->job_list_lock); | 276 | spin_unlock(&sched->job_list_lock); |
| 300 | } | 277 | } |
| @@ -391,6 +368,8 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) | |||
| 391 | r); | 368 | r); |
| 392 | dma_fence_put(fence); | 369 | dma_fence_put(fence); |
| 393 | } else { | 370 | } else { |
| 371 | if (s_fence->finished.error < 0) | ||
| 372 | drm_sched_expel_job_unlocked(s_job); | ||
| 394 | drm_sched_process_job(NULL, &s_fence->cb); | 373 | drm_sched_process_job(NULL, &s_fence->cb); |
| 395 | } | 374 | } |
| 396 | spin_lock(&sched->job_list_lock); | 375 | spin_lock(&sched->job_list_lock); |
| @@ -595,6 +574,8 @@ static int drm_sched_main(void *param) | |||
| 595 | r); | 574 | r); |
| 596 | dma_fence_put(fence); | 575 | dma_fence_put(fence); |
| 597 | } else { | 576 | } else { |
| 577 | if (s_fence->finished.error < 0) | ||
| 578 | drm_sched_expel_job_unlocked(sched_job); | ||
| 598 | drm_sched_process_job(NULL, &s_fence->cb); | 579 | drm_sched_process_job(NULL, &s_fence->cb); |
| 599 | } | 580 | } |
| 600 | 581 | ||
| @@ -603,6 +584,15 @@ static int drm_sched_main(void *param) | |||
| 603 | return 0; | 584 | return 0; |
| 604 | } | 585 | } |
| 605 | 586 | ||
| 587 | static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job) | ||
| 588 | { | ||
| 589 | struct drm_gpu_scheduler *sched = s_job->sched; | ||
| 590 | |||
| 591 | spin_lock(&sched->job_list_lock); | ||
| 592 | list_del_init(&s_job->node); | ||
| 593 | spin_unlock(&sched->job_list_lock); | ||
| 594 | } | ||
| 595 | |||
| 606 | /** | 596 | /** |
| 607 | * drm_sched_init - Init a gpu scheduler instance | 597 | * drm_sched_init - Init a gpu scheduler instance |
| 608 | * | 598 | * |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ba80150d1052..895d77d799e4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
| @@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
| 492 | if (!fbo) | 492 | if (!fbo) |
| 493 | return -ENOMEM; | 493 | return -ENOMEM; |
| 494 | 494 | ||
| 495 | ttm_bo_get(bo); | ||
| 496 | fbo->base = *bo; | 495 | fbo->base = *bo; |
| 496 | fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT; | ||
| 497 | |||
| 498 | ttm_bo_get(bo); | ||
| 497 | fbo->bo = bo; | 499 | fbo->bo = bo; |
| 498 | 500 | ||
| 499 | /** | 501 | /** |
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 665b9cae7f43..9be2181b3ed7 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h | |||
| @@ -972,6 +972,17 @@ struct drm_connector { | |||
| 972 | struct drm_property *scaling_mode_property; | 972 | struct drm_property *scaling_mode_property; |
| 973 | 973 | ||
| 974 | /** | 974 | /** |
| 975 | * @vrr_capable_property: Optional property to help userspace | ||
| 976 | * query hardware support for variable refresh rate on a connector. | ||
| 977 | * connector. Drivers can add the property to a connector by | ||
| 978 | * calling drm_connector_attach_vrr_capable_property(). | ||
| 979 | * | ||
| 980 | * This should be updated only by calling | ||
| 981 | * drm_connector_set_vrr_capable_property(). | ||
| 982 | */ | ||
| 983 | struct drm_property *vrr_capable_property; | ||
| 984 | |||
| 985 | /** | ||
| 975 | * @content_protection_property: DRM ENUM property for content | 986 | * @content_protection_property: DRM ENUM property for content |
| 976 | * protection. See drm_connector_attach_content_protection_property(). | 987 | * protection. See drm_connector_attach_content_protection_property(). |
| 977 | */ | 988 | */ |
| @@ -1245,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev); | |||
| 1245 | int drm_connector_attach_content_type_property(struct drm_connector *dev); | 1256 | int drm_connector_attach_content_type_property(struct drm_connector *dev); |
| 1246 | int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, | 1257 | int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, |
| 1247 | u32 scaling_mode_mask); | 1258 | u32 scaling_mode_mask); |
| 1259 | int drm_connector_attach_vrr_capable_property( | ||
| 1260 | struct drm_connector *connector); | ||
| 1248 | int drm_connector_attach_content_protection_property( | 1261 | int drm_connector_attach_content_protection_property( |
| 1249 | struct drm_connector *connector); | 1262 | struct drm_connector *connector); |
| 1250 | int drm_mode_create_aspect_ratio_property(struct drm_device *dev); | 1263 | int drm_mode_create_aspect_ratio_property(struct drm_device *dev); |
| @@ -1261,6 +1274,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector, | |||
| 1261 | const struct edid *edid); | 1274 | const struct edid *edid); |
| 1262 | void drm_connector_set_link_status_property(struct drm_connector *connector, | 1275 | void drm_connector_set_link_status_property(struct drm_connector *connector, |
| 1263 | uint64_t link_status); | 1276 | uint64_t link_status); |
| 1277 | void drm_connector_set_vrr_capable_property( | ||
| 1278 | struct drm_connector *connector, bool capable); | ||
| 1264 | int drm_connector_init_panel_orientation_property( | 1279 | int drm_connector_init_panel_orientation_property( |
| 1265 | struct drm_connector *connector, int width, int height); | 1280 | struct drm_connector *connector, int width, int height); |
| 1266 | int drm_connector_attach_max_bpc_property(struct drm_connector *connector, | 1281 | int drm_connector_attach_max_bpc_property(struct drm_connector *connector, |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index b21437bc95bf..39c3900aab3c 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -291,6 +291,15 @@ struct drm_crtc_state { | |||
| 291 | u32 pageflip_flags; | 291 | u32 pageflip_flags; |
| 292 | 292 | ||
| 293 | /** | 293 | /** |
| 294 | * @vrr_enabled: | ||
| 295 | * | ||
| 296 | * Indicates if variable refresh rate should be enabled for the CRTC. | ||
| 297 | * Support for the requested vrr state will depend on driver and | ||
| 298 | * hardware capabiltiy - lacking support is not treated as failure. | ||
| 299 | */ | ||
| 300 | bool vrr_enabled; | ||
| 301 | |||
| 302 | /** | ||
| 294 | * @event: | 303 | * @event: |
| 295 | * | 304 | * |
| 296 | * Optional pointer to a DRM event to signal upon completion of the | 305 | * Optional pointer to a DRM event to signal upon completion of the |
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 5dbeabdbaf91..9db59a1caf5b 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h | |||
| @@ -645,6 +645,11 @@ struct drm_mode_config { | |||
| 645 | * connectors must be of and active must be set to disabled, too. | 645 | * connectors must be of and active must be set to disabled, too. |
| 646 | */ | 646 | */ |
| 647 | struct drm_property *prop_mode_id; | 647 | struct drm_property *prop_mode_id; |
| 648 | /** | ||
| 649 | * @prop_vrr_enabled: Default atomic CRTC property to indicate | ||
| 650 | * whether variable refresh rate should be enabled on the CRTC. | ||
| 651 | */ | ||
| 652 | struct drm_property *prop_vrr_enabled; | ||
| 648 | 653 | ||
| 649 | /** | 654 | /** |
| 650 | * @dvi_i_subconnector_property: Optional DVI-I property to | 655 | * @dvi_i_subconnector_property: Optional DVI-I property to |
