diff options
author | James Zhu <James.Zhu@amd.com> | 2018-06-18 13:46:16 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-06-19 14:34:27 -0400 |
commit | 5c53d19b76dccbaf340b11acb837d40c0789049d (patch) | |
tree | efd0e1409a71e314179fd8048394114e0fd12135 | |
parent | d9fda248046ac035f18a6e663f2f9245b4bf9470 (diff) |
drm/amdgpu:All UVD instances share one idle_work handle
All UVD instanses have only one dpm control, so it is better
to share one idle_work handle.
Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Stefan Agner <stefan@agner.ch>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 2 |
2 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index bcf68f80bbf0..3ff08e326838 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
130 | unsigned version_major, version_minor, family_id; | 130 | unsigned version_major, version_minor, family_id; |
131 | int i, j, r; | 131 | int i, j, r; |
132 | 132 | ||
133 | INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); | 133 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); |
134 | 134 | ||
135 | switch (adev->asic_type) { | 135 | switch (adev->asic_type) { |
136 | #ifdef CONFIG_DRM_AMDGPU_CIK | 136 | #ifdef CONFIG_DRM_AMDGPU_CIK |
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |||
314 | void *ptr; | 314 | void *ptr; |
315 | int i, j; | 315 | int i, j; |
316 | 316 | ||
317 | cancel_delayed_work_sync(&adev->uvd.idle_work); | ||
318 | |||
317 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { | 319 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
318 | if (adev->uvd.inst[j].vcpu_bo == NULL) | 320 | if (adev->uvd.inst[j].vcpu_bo == NULL) |
319 | continue; | 321 | continue; |
320 | 322 | ||
321 | cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); | ||
322 | |||
323 | /* only valid for physical mode */ | 323 | /* only valid for physical mode */ |
324 | if (adev->asic_type < CHIP_POLARIS10) { | 324 | if (adev->asic_type < CHIP_POLARIS10) { |
325 | for (i = 0; i < adev->uvd.max_handles; ++i) | 325 | for (i = 0; i < adev->uvd.max_handles; ++i) |
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1145 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | 1145 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) |
1146 | { | 1146 | { |
1147 | struct amdgpu_device *adev = | 1147 | struct amdgpu_device *adev = |
1148 | container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); | 1148 | container_of(work, struct amdgpu_device, uvd.idle_work.work); |
1149 | unsigned fences = 0, i, j; | 1149 | unsigned fences = 0, i, j; |
1150 | 1150 | ||
1151 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { | 1151 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |||
1167 | AMD_CG_STATE_GATE); | 1167 | AMD_CG_STATE_GATE); |
1168 | } | 1168 | } |
1169 | } else { | 1169 | } else { |
1170 | schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); | 1170 | schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
1171 | } | 1171 | } |
1172 | } | 1172 | } |
1173 | 1173 | ||
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | |||
1179 | if (amdgpu_sriov_vf(adev)) | 1179 | if (amdgpu_sriov_vf(adev)) |
1180 | return; | 1180 | return; |
1181 | 1181 | ||
1182 | set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); | 1182 | set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); |
1183 | if (set_clocks) { | 1183 | if (set_clocks) { |
1184 | if (adev->pm.dpm_enabled) { | 1184 | if (adev->pm.dpm_enabled) { |
1185 | amdgpu_dpm_enable_uvd(adev, true); | 1185 | amdgpu_dpm_enable_uvd(adev, true); |
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | |||
1196 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) | 1196 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) |
1197 | { | 1197 | { |
1198 | if (!amdgpu_sriov_vf(ring->adev)) | 1198 | if (!amdgpu_sriov_vf(ring->adev)) |
1199 | schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); | 1199 | schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | /** | 1202 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index b1579fba134c..8b23a1b00c76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst { | |||
44 | void *saved_bo; | 44 | void *saved_bo; |
45 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; | 45 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; |
46 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; | 46 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; |
47 | struct delayed_work idle_work; | ||
48 | struct amdgpu_ring ring; | 47 | struct amdgpu_ring ring; |
49 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; | 48 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; |
50 | struct amdgpu_irq_src irq; | 49 | struct amdgpu_irq_src irq; |
@@ -62,6 +61,7 @@ struct amdgpu_uvd { | |||
62 | bool address_64_bit; | 61 | bool address_64_bit; |
63 | bool use_ctx_buf; | 62 | bool use_ctx_buf; |
64 | struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; | 63 | struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; |
64 | struct delayed_work idle_work; | ||
65 | }; | 65 | }; |
66 | 66 | ||
67 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); | 67 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); |