aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c6
2 files changed, 12 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 913ce1cb0ea3..0b92dd0c1d70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1138,6 +1138,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1138 container_of(work, struct amdgpu_device, uvd.idle_work.work); 1138 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1139 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); 1139 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1140 1140
1141 if (amdgpu_sriov_vf(adev))
1142 return;
1143
1141 if (fences == 0) { 1144 if (fences == 0) {
1142 if (adev->pm.dpm_enabled) { 1145 if (adev->pm.dpm_enabled) {
1143 amdgpu_dpm_enable_uvd(adev, false); 1146 amdgpu_dpm_enable_uvd(adev, false);
@@ -1159,6 +1162,9 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1159 struct amdgpu_device *adev = ring->adev; 1162 struct amdgpu_device *adev = ring->adev;
1160 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1163 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1161 1164
1165 if (amdgpu_sriov_vf(adev))
1166 return;
1167
1162 if (set_clocks) { 1168 if (set_clocks) {
1163 if (adev->pm.dpm_enabled) { 1169 if (adev->pm.dpm_enabled) {
1164 amdgpu_dpm_enable_uvd(adev, true); 1170 amdgpu_dpm_enable_uvd(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 199bc89a4d01..a76175a9f878 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -320,6 +320,9 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
320 container_of(work, struct amdgpu_device, vce.idle_work.work); 320 container_of(work, struct amdgpu_device, vce.idle_work.work);
321 unsigned i, count = 0; 321 unsigned i, count = 0;
322 322
323 if (amdgpu_sriov_vf(adev))
324 return;
325
323 for (i = 0; i < adev->vce.num_rings; i++) 326 for (i = 0; i < adev->vce.num_rings; i++)
324 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); 327 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
325 328
@@ -350,6 +353,9 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
350 struct amdgpu_device *adev = ring->adev; 353 struct amdgpu_device *adev = ring->adev;
351 bool set_clocks; 354 bool set_clocks;
352 355
356 if (amdgpu_sriov_vf(adev))
357 return;
358
353 mutex_lock(&adev->vce.idle_mutex); 359 mutex_lock(&adev->vce.idle_mutex);
354 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 360 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
355 if (set_clocks) { 361 if (set_clocks) {