aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Zhu <James.Zhu@amd.com>2018-09-21 14:43:18 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-09-26 22:09:25 -0400
commitbd5d5180db3226f0ce4f132e789c71a8efba3555 (patch)
tree804e984e59853f03aa9c6c4ec5e80355a1965285
parent0b8690b7a84c04533ae65a5ab9deae8950ca408e (diff)
drm/amdgpu:Add DPG pause mode support
Add functions to support VCN DPG pause mode. Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c161
1 files changed, 159 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 27262a81cfa1..c6dd8403414f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -36,6 +36,7 @@
36#include "soc15_common.h" 36#include "soc15_common.h"
37 37
38#include "vcn/vcn_1_0_offset.h" 38#include "vcn/vcn_1_0_offset.h"
39#include "vcn/vcn_1_0_sh_mask.h"
39 40
40/* 1 second timeout */ 41/* 1 second timeout */
41#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) 42#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -212,18 +213,158 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
212 return 0; 213 return 0;
213} 214}
214 215
216static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
217 struct dpg_pause_state *new_state)
218{
219 int ret_code;
220 uint32_t reg_data = 0;
221 uint32_t reg_data2 = 0;
222 struct amdgpu_ring *ring;
223
224 /* pause/unpause if state is changed */
225 if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
226 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
227 adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
228 new_state->fw_based, new_state->jpeg);
229
230 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
231 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
232
233 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
234 ret_code = 0;
235
236 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
237 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
238 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
239 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
240
241 if (!ret_code) {
242 /* pause DPG non-jpeg */
243 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
244 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
245 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
246 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
247 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
248
249 /* Restore */
250 ring = &adev->vcn.ring_enc[0];
251 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
252 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
253 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
254 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
255 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
256
257 ring = &adev->vcn.ring_enc[1];
258 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
259 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
260 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
261 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
262 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
263
264 ring = &adev->vcn.ring_dec;
265 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
266 lower_32_bits(ring->wptr) | 0x80000000);
267 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
268 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
269 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
270 }
271 } else {
272 /* unpause dpg non-jpeg, no need to wait */
273 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
274 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
275 }
276 adev->vcn.pause_state.fw_based = new_state->fw_based;
277 }
278
279 /* pause/unpause if state is changed */
280 if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
281 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
282 adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
283 new_state->fw_based, new_state->jpeg);
284
285 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
286 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
287
288 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
289 ret_code = 0;
290
291 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
292 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
293 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
294 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
295
296 if (!ret_code) {
297 /* Make sure JPRG Snoop is disabled before sending the pause */
298 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
299 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
300 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
301
302 /* pause DPG jpeg */
303 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
304 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
305 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
306 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
307 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
308
309 /* Restore */
310 ring = &adev->vcn.ring_jpeg;
311 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
312 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000001L | 0x00000002L);
313 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
314 lower_32_bits(ring->gpu_addr));
315 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
316 upper_32_bits(ring->gpu_addr));
317 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
318 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
319 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
320
321 ring = &adev->vcn.ring_dec;
322 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
323 lower_32_bits(ring->wptr) | 0x80000000);
324 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
325 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
326 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
327 }
328 } else {
329 /* unpause dpg jpeg, no need to wait */
330 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
331 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
332 }
333 adev->vcn.pause_state.jpeg = new_state->jpeg;
334 }
335
336 return 0;
337}
338
215static void amdgpu_vcn_idle_work_handler(struct work_struct *work) 339static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
216{ 340{
217 struct amdgpu_device *adev = 341 struct amdgpu_device *adev =
218 container_of(work, struct amdgpu_device, vcn.idle_work.work); 342 container_of(work, struct amdgpu_device, vcn.idle_work.work);
219 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); 343 unsigned int fences = 0;
220 unsigned i; 344 unsigned int i;
221 345
222 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 346 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
223 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); 347 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
224 } 348 }
225 349
350 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
351 struct dpg_pause_state new_state;
352
353 if (fences)
354 new_state.fw_based = VCN_DPG_STATE__PAUSE;
355 else
356 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
357
358 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
359 new_state.jpeg = VCN_DPG_STATE__PAUSE;
360 else
361 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
362
363 amdgpu_vcn_pause_dpg_mode(adev, &new_state);
364 }
365
226 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); 366 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
367 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
227 368
228 if (fences == 0) { 369 if (fences == 0) {
229 amdgpu_gfx_off_ctrl(adev, true); 370 amdgpu_gfx_off_ctrl(adev, true);
@@ -250,6 +391,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
250 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 391 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
251 AMD_PG_STATE_UNGATE); 392 AMD_PG_STATE_UNGATE);
252 } 393 }
394
395 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
396 struct dpg_pause_state new_state;
397
398 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
399 new_state.fw_based = VCN_DPG_STATE__PAUSE;
400 else
401 new_state.fw_based = adev->vcn.pause_state.fw_based;
402
403 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
404 new_state.jpeg = VCN_DPG_STATE__PAUSE;
405 else
406 new_state.jpeg = adev->vcn.pause_state.jpeg;
407
408 amdgpu_vcn_pause_dpg_mode(adev, &new_state);
409 }
253} 410}
254 411
255void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) 412void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)