aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index bcf68f80bbf0..3ff08e326838 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
130 unsigned version_major, version_minor, family_id; 130 unsigned version_major, version_minor, family_id;
131 int i, j, r; 131 int i, j, r;
132 132
133 INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); 133 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
134 134
135 switch (adev->asic_type) { 135 switch (adev->asic_type) {
136#ifdef CONFIG_DRM_AMDGPU_CIK 136#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
314 void *ptr; 314 void *ptr;
315 int i, j; 315 int i, j;
316 316
317 cancel_delayed_work_sync(&adev->uvd.idle_work);
318
317 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 319 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
318 if (adev->uvd.inst[j].vcpu_bo == NULL) 320 if (adev->uvd.inst[j].vcpu_bo == NULL)
319 continue; 321 continue;
320 322
321 cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
322
323 /* only valid for physical mode */ 323 /* only valid for physical mode */
324 if (adev->asic_type < CHIP_POLARIS10) { 324 if (adev->asic_type < CHIP_POLARIS10) {
325 for (i = 0; i < adev->uvd.max_handles; ++i) 325 for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1146{ 1146{
1147 struct amdgpu_device *adev = 1147 struct amdgpu_device *adev =
1148 container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); 1148 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1149 unsigned fences = 0, i, j; 1149 unsigned fences = 0, i, j;
1150 1150
1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1167 AMD_CG_STATE_GATE); 1167 AMD_CG_STATE_GATE);
1168 } 1168 }
1169 } else { 1169 } else {
1170 schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1170 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1171 } 1171 }
1172} 1172}
1173 1173
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1179 if (amdgpu_sriov_vf(adev)) 1179 if (amdgpu_sriov_vf(adev))
1180 return; 1180 return;
1181 1181
1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); 1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1183 if (set_clocks) { 1183 if (set_clocks) {
1184 if (adev->pm.dpm_enabled) { 1184 if (adev->pm.dpm_enabled) {
1185 amdgpu_dpm_enable_uvd(adev, true); 1185 amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1197{ 1197{
1198 if (!amdgpu_sriov_vf(ring->adev)) 1198 if (!amdgpu_sriov_vf(ring->adev))
1199 schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1199 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1200} 1200}
1201 1201
1202/** 1202/**