diff options
author | jimqu <Jim.Qu@amd.com> | 2016-07-15 04:57:28 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-07-15 12:34:18 -0400 |
commit | 567e6e29842af8cdd84722ede94e0d2420ab5e35 (patch) | |
tree | a002b87baee9ed81c49ee29f236c5cf8205ecd7b | |
parent | f476852a8cb62f0ebf1631cfb71dda1209b91306 (diff) |
drm/amdgpu: S3 resume fail on Polaris10
Sometimes, driver can not return from fence waiting when doing VCE ring
ib test. The issue is a asic special and random issue. so adjust VCE suspend
and resume sequence.
Signed-off-by: JimQu <Jim.Qu@amd.com>
Reviewed-by: Ken Wang <Qingqing.Wang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 143 |
1 files changed, 97 insertions, 46 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 30e8099e94c5..d7b8da433fe2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 | 43 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 |
44 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 | 44 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 |
45 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 | 45 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 |
46 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 | ||
46 | 47 | ||
47 | #define VCE_V3_0_FW_SIZE (384 * 1024) | 48 | #define VCE_V3_0_FW_SIZE (384 * 1024) |
48 | #define VCE_V3_0_STACK_SIZE (64 * 1024) | 49 | #define VCE_V3_0_STACK_SIZE (64 * 1024) |
@@ -51,6 +52,7 @@ | |||
51 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); | 52 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); |
52 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); | 53 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); |
53 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); | 54 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); |
55 | static int vce_v3_0_wait_for_idle(void *handle); | ||
54 | 56 | ||
55 | /** | 57 | /** |
56 | * vce_v3_0_ring_get_rptr - get read pointer | 58 | * vce_v3_0_ring_get_rptr - get read pointer |
@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, | |||
205 | vce_v3_0_override_vce_clock_gating(adev, false); | 207 | vce_v3_0_override_vce_clock_gating(adev, false); |
206 | } | 208 | } |
207 | 209 | ||
210 | static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) | ||
211 | { | ||
212 | int i, j; | ||
213 | uint32_t status = 0; | ||
214 | |||
215 | for (i = 0; i < 10; ++i) { | ||
216 | for (j = 0; j < 100; ++j) { | ||
217 | status = RREG32(mmVCE_STATUS); | ||
218 | if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) | ||
219 | return 0; | ||
220 | mdelay(10); | ||
221 | } | ||
222 | |||
223 | DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); | ||
224 | WREG32_P(mmVCE_SOFT_RESET, | ||
225 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | ||
226 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
227 | mdelay(10); | ||
228 | WREG32_P(mmVCE_SOFT_RESET, 0, | ||
229 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
230 | mdelay(10); | ||
231 | } | ||
232 | |||
233 | return -ETIMEDOUT; | ||
234 | } | ||
235 | |||
208 | /** | 236 | /** |
209 | * vce_v3_0_start - start VCE block | 237 | * vce_v3_0_start - start VCE block |
210 | * | 238 | * |
@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, | |||
215 | static int vce_v3_0_start(struct amdgpu_device *adev) | 243 | static int vce_v3_0_start(struct amdgpu_device *adev) |
216 | { | 244 | { |
217 | struct amdgpu_ring *ring; | 245 | struct amdgpu_ring *ring; |
218 | int idx, i, j, r; | 246 | int idx, r; |
247 | |||
248 | ring = &adev->vce.ring[0]; | ||
249 | WREG32(mmVCE_RB_RPTR, ring->wptr); | ||
250 | WREG32(mmVCE_RB_WPTR, ring->wptr); | ||
251 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
252 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
253 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
254 | |||
255 | ring = &adev->vce.ring[1]; | ||
256 | WREG32(mmVCE_RB_RPTR2, ring->wptr); | ||
257 | WREG32(mmVCE_RB_WPTR2, ring->wptr); | ||
258 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
259 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
260 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
219 | 261 | ||
220 | mutex_lock(&adev->grbm_idx_mutex); | 262 | mutex_lock(&adev->grbm_idx_mutex); |
221 | for (idx = 0; idx < 2; ++idx) { | 263 | for (idx = 0; idx < 2; ++idx) { |
222 | |||
223 | if (adev->vce.harvest_config & (1 << idx)) | 264 | if (adev->vce.harvest_config & (1 << idx)) |
224 | continue; | 265 | continue; |
225 | 266 | ||
@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
233 | 274 | ||
234 | vce_v3_0_mc_resume(adev, idx); | 275 | vce_v3_0_mc_resume(adev, idx); |
235 | 276 | ||
236 | /* set BUSY flag */ | 277 | WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, |
237 | WREG32_P(mmVCE_STATUS, 1, ~1); | 278 | ~VCE_STATUS__JOB_BUSY_MASK); |
279 | |||
238 | if (adev->asic_type >= CHIP_STONEY) | 280 | if (adev->asic_type >= CHIP_STONEY) |
239 | WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); | 281 | WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); |
240 | else | 282 | else |
241 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, | 283 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, |
242 | ~VCE_VCPU_CNTL__CLK_EN_MASK); | 284 | ~VCE_VCPU_CNTL__CLK_EN_MASK); |
243 | 285 | ||
244 | WREG32_P(mmVCE_SOFT_RESET, | ||
245 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | ||
246 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
247 | |||
248 | mdelay(100); | ||
249 | |||
250 | WREG32_P(mmVCE_SOFT_RESET, 0, | 286 | WREG32_P(mmVCE_SOFT_RESET, 0, |
251 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | 287 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); |
252 | 288 | ||
253 | for (i = 0; i < 10; ++i) { | 289 | mdelay(100); |
254 | uint32_t status; | 290 | |
255 | for (j = 0; j < 100; ++j) { | 291 | r = vce_v3_0_firmware_loaded(adev); |
256 | status = RREG32(mmVCE_STATUS); | ||
257 | if (status & 2) | ||
258 | break; | ||
259 | mdelay(10); | ||
260 | } | ||
261 | r = 0; | ||
262 | if (status & 2) | ||
263 | break; | ||
264 | |||
265 | DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); | ||
266 | WREG32_P(mmVCE_SOFT_RESET, | ||
267 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | ||
268 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
269 | mdelay(10); | ||
270 | WREG32_P(mmVCE_SOFT_RESET, 0, | ||
271 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
272 | mdelay(10); | ||
273 | r = -1; | ||
274 | } | ||
275 | 292 | ||
276 | /* clear BUSY flag */ | 293 | /* clear BUSY flag */ |
277 | WREG32_P(mmVCE_STATUS, 0, ~1); | 294 | WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); |
278 | 295 | ||
279 | /* Set Clock-Gating off */ | 296 | /* Set Clock-Gating off */ |
280 | if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) | 297 | if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) |
@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
290 | WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); | 307 | WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); |
291 | mutex_unlock(&adev->grbm_idx_mutex); | 308 | mutex_unlock(&adev->grbm_idx_mutex); |
292 | 309 | ||
293 | ring = &adev->vce.ring[0]; | 310 | return 0; |
294 | WREG32(mmVCE_RB_RPTR, ring->wptr); | 311 | } |
295 | WREG32(mmVCE_RB_WPTR, ring->wptr); | ||
296 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
297 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
298 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
299 | 312 | ||
300 | ring = &adev->vce.ring[1]; | 313 | static int vce_v3_0_stop(struct amdgpu_device *adev) |
301 | WREG32(mmVCE_RB_RPTR2, ring->wptr); | 314 | { |
302 | WREG32(mmVCE_RB_WPTR2, ring->wptr); | 315 | int idx; |
303 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | 316 | |
304 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | 317 | mutex_lock(&adev->grbm_idx_mutex); |
305 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | 318 | for (idx = 0; idx < 2; ++idx) { |
319 | if (adev->vce.harvest_config & (1 << idx)) | ||
320 | continue; | ||
321 | |||
322 | if (idx == 0) | ||
323 | WREG32_P(mmGRBM_GFX_INDEX, 0, | ||
324 | ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); | ||
325 | else | ||
326 | WREG32_P(mmGRBM_GFX_INDEX, | ||
327 | GRBM_GFX_INDEX__VCE_INSTANCE_MASK, | ||
328 | ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); | ||
329 | |||
330 | if (adev->asic_type >= CHIP_STONEY) | ||
331 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); | ||
332 | else | ||
333 | WREG32_P(mmVCE_VCPU_CNTL, 0, | ||
334 | ~VCE_VCPU_CNTL__CLK_EN_MASK); | ||
335 | /* hold on ECPU */ | ||
336 | WREG32_P(mmVCE_SOFT_RESET, | ||
337 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | ||
338 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
339 | |||
340 | /* clear BUSY flag */ | ||
341 | WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); | ||
342 | |||
343 | /* Set Clock-Gating off */ | ||
344 | if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) | ||
345 | vce_v3_0_set_vce_sw_clock_gating(adev, false); | ||
346 | } | ||
347 | |||
348 | WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); | ||
349 | mutex_unlock(&adev->grbm_idx_mutex); | ||
306 | 350 | ||
307 | return 0; | 351 | return 0; |
308 | } | 352 | } |
@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle) | |||
441 | 485 | ||
442 | static int vce_v3_0_hw_fini(void *handle) | 486 | static int vce_v3_0_hw_fini(void *handle) |
443 | { | 487 | { |
444 | return 0; | 488 | int r; |
489 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
490 | |||
491 | r = vce_v3_0_wait_for_idle(handle); | ||
492 | if (r) | ||
493 | return r; | ||
494 | |||
495 | return vce_v3_0_stop(adev); | ||
445 | } | 496 | } |
446 | 497 | ||
447 | static int vce_v3_0_suspend(void *handle) | 498 | static int vce_v3_0_suspend(void *handle) |