aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9fb20a53d5b2..290e279abf0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = {
86 "VEGAM", 86 "VEGAM",
87 "VEGA10", 87 "VEGA10",
88 "VEGA12", 88 "VEGA12",
89 "VEGA20",
89 "RAVEN", 90 "RAVEN",
90 "LAST", 91 "LAST",
91}; 92};
@@ -1387,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1387 case CHIP_KABINI: 1388 case CHIP_KABINI:
1388 case CHIP_MULLINS: 1389 case CHIP_MULLINS:
1389#endif 1390#endif
1391 case CHIP_VEGA20:
1390 default: 1392 default:
1391 return 0; 1393 return 0;
1392 case CHIP_VEGA10: 1394 case CHIP_VEGA10:
@@ -1521,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1521#endif 1523#endif
1522 case CHIP_VEGA10: 1524 case CHIP_VEGA10:
1523 case CHIP_VEGA12: 1525 case CHIP_VEGA12:
1526 case CHIP_VEGA20:
1524 case CHIP_RAVEN: 1527 case CHIP_RAVEN:
1525 if (adev->asic_type == CHIP_RAVEN) 1528 if (adev->asic_type == CHIP_RAVEN)
1526 adev->family = AMDGPU_FAMILY_RV; 1529 adev->family = AMDGPU_FAMILY_RV;
@@ -1715,6 +1718,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1715 /* skip CG for VCE/UVD, it's handled specially */ 1718 /* skip CG for VCE/UVD, it's handled specially */
1716 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1719 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1717 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 1720 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1721 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1718 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 1722 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1719 /* enable clockgating to save power */ 1723 /* enable clockgating to save power */
1720 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1724 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@@ -1814,6 +1818,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1814 1818
1815 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1819 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1816 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 1820 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1821 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1817 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 1822 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1818 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1823 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1819 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1824 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@@ -2155,6 +2160,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2155 case CHIP_FIJI: 2160 case CHIP_FIJI:
2156 case CHIP_VEGA10: 2161 case CHIP_VEGA10:
2157 case CHIP_VEGA12: 2162 case CHIP_VEGA12:
2163 case CHIP_VEGA20:
2158#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2164#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2159 case CHIP_RAVEN: 2165 case CHIP_RAVEN:
2160#endif 2166#endif
@@ -3172,7 +3178,6 @@ error:
3172int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 3178int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3173 struct amdgpu_job *job, bool force) 3179 struct amdgpu_job *job, bool force)
3174{ 3180{
3175 struct drm_atomic_state *state = NULL;
3176 int i, r, resched; 3181 int i, r, resched;
3177 3182
3178 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { 3183 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
@@ -3195,10 +3200,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3195 /* block TTM */ 3200 /* block TTM */
3196 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 3201 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3197 3202
3198 /* store modesetting */
3199 if (amdgpu_device_has_dc_support(adev))
3200 state = drm_atomic_helper_suspend(adev->ddev);
3201
3202 /* block all schedulers and reset given job's ring */ 3203 /* block all schedulers and reset given job's ring */
3203 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 3204 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3204 struct amdgpu_ring *ring = adev->rings[i]; 3205 struct amdgpu_ring *ring = adev->rings[i];
@@ -3238,10 +3239,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3238 kthread_unpark(ring->sched.thread); 3239 kthread_unpark(ring->sched.thread);
3239 } 3240 }
3240 3241
3241 if (amdgpu_device_has_dc_support(adev)) { 3242 if (!amdgpu_device_has_dc_support(adev)) {
3242 if (drm_atomic_helper_resume(adev->ddev, state))
3243 dev_info(adev->dev, "drm resume failed:%d\n", r);
3244 } else {
3245 drm_helper_resume_force_mode(adev->ddev); 3243 drm_helper_resume_force_mode(adev->ddev);
3246 } 3244 }
3247 3245