aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
diff options
context:
space:
mode:
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>2018-10-19 16:22:48 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-11-05 14:21:23 -0500
commitc66ed765a0a97b8900f37d4a71f1d75f52f56eeb (patch)
treecfed66df4fc6088c761e1d5cce16a300af57c0df /drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
parentfaf6e1a87e07423a729e04fb2e8188742e89ea4c (diff)
drm/amdgpu: Retire amdgpu_ring.ready flag v4
Start using drm_gpu_scheduler.ready isntead. v3: Add helper function to run ring test and set sched.ready flag status accordingly, clean explicit sched.ready sets from the IP specific files. v4: Add kerneldoc and rebase. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 09fa919d2500..8f6ff9f895c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
336 case AMDGPU_HW_IP_GFX: 336 case AMDGPU_HW_IP_GFX:
337 type = AMD_IP_BLOCK_TYPE_GFX; 337 type = AMD_IP_BLOCK_TYPE_GFX;
338 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 338 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
339 if (adev->gfx.gfx_ring[i].ready) 339 if (adev->gfx.gfx_ring[i].sched.ready)
340 ++num_rings; 340 ++num_rings;
341 ib_start_alignment = 32; 341 ib_start_alignment = 32;
342 ib_size_alignment = 32; 342 ib_size_alignment = 32;
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
344 case AMDGPU_HW_IP_COMPUTE: 344 case AMDGPU_HW_IP_COMPUTE:
345 type = AMD_IP_BLOCK_TYPE_GFX; 345 type = AMD_IP_BLOCK_TYPE_GFX;
346 for (i = 0; i < adev->gfx.num_compute_rings; i++) 346 for (i = 0; i < adev->gfx.num_compute_rings; i++)
347 if (adev->gfx.compute_ring[i].ready) 347 if (adev->gfx.compute_ring[i].sched.ready)
348 ++num_rings; 348 ++num_rings;
349 ib_start_alignment = 32; 349 ib_start_alignment = 32;
350 ib_size_alignment = 32; 350 ib_size_alignment = 32;
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
352 case AMDGPU_HW_IP_DMA: 352 case AMDGPU_HW_IP_DMA:
353 type = AMD_IP_BLOCK_TYPE_SDMA; 353 type = AMD_IP_BLOCK_TYPE_SDMA;
354 for (i = 0; i < adev->sdma.num_instances; i++) 354 for (i = 0; i < adev->sdma.num_instances; i++)
355 if (adev->sdma.instance[i].ring.ready) 355 if (adev->sdma.instance[i].ring.sched.ready)
356 ++num_rings; 356 ++num_rings;
357 ib_start_alignment = 256; 357 ib_start_alignment = 256;
358 ib_size_alignment = 4; 358 ib_size_alignment = 4;
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
363 if (adev->uvd.harvest_config & (1 << i)) 363 if (adev->uvd.harvest_config & (1 << i))
364 continue; 364 continue;
365 365
366 if (adev->uvd.inst[i].ring.ready) 366 if (adev->uvd.inst[i].ring.sched.ready)
367 ++num_rings; 367 ++num_rings;
368 } 368 }
369 ib_start_alignment = 64; 369 ib_start_alignment = 64;
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
372 case AMDGPU_HW_IP_VCE: 372 case AMDGPU_HW_IP_VCE:
373 type = AMD_IP_BLOCK_TYPE_VCE; 373 type = AMD_IP_BLOCK_TYPE_VCE;
374 for (i = 0; i < adev->vce.num_rings; i++) 374 for (i = 0; i < adev->vce.num_rings; i++)
375 if (adev->vce.ring[i].ready) 375 if (adev->vce.ring[i].sched.ready)
376 ++num_rings; 376 ++num_rings;
377 ib_start_alignment = 4; 377 ib_start_alignment = 4;
378 ib_size_alignment = 1; 378 ib_size_alignment = 1;
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
384 continue; 384 continue;
385 385
386 for (j = 0; j < adev->uvd.num_enc_rings; j++) 386 for (j = 0; j < adev->uvd.num_enc_rings; j++)
387 if (adev->uvd.inst[i].ring_enc[j].ready) 387 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
388 ++num_rings; 388 ++num_rings;
389 } 389 }
390 ib_start_alignment = 64; 390 ib_start_alignment = 64;
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
392 break; 392 break;
393 case AMDGPU_HW_IP_VCN_DEC: 393 case AMDGPU_HW_IP_VCN_DEC:
394 type = AMD_IP_BLOCK_TYPE_VCN; 394 type = AMD_IP_BLOCK_TYPE_VCN;
395 if (adev->vcn.ring_dec.ready) 395 if (adev->vcn.ring_dec.sched.ready)
396 ++num_rings; 396 ++num_rings;
397 ib_start_alignment = 16; 397 ib_start_alignment = 16;
398 ib_size_alignment = 16; 398 ib_size_alignment = 16;
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
400 case AMDGPU_HW_IP_VCN_ENC: 400 case AMDGPU_HW_IP_VCN_ENC:
401 type = AMD_IP_BLOCK_TYPE_VCN; 401 type = AMD_IP_BLOCK_TYPE_VCN;
402 for (i = 0; i < adev->vcn.num_enc_rings; i++) 402 for (i = 0; i < adev->vcn.num_enc_rings; i++)
403 if (adev->vcn.ring_enc[i].ready) 403 if (adev->vcn.ring_enc[i].sched.ready)
404 ++num_rings; 404 ++num_rings;
405 ib_start_alignment = 64; 405 ib_start_alignment = 64;
406 ib_size_alignment = 1; 406 ib_size_alignment = 1;
407 break; 407 break;
408 case AMDGPU_HW_IP_VCN_JPEG: 408 case AMDGPU_HW_IP_VCN_JPEG:
409 type = AMD_IP_BLOCK_TYPE_VCN; 409 type = AMD_IP_BLOCK_TYPE_VCN;
410 if (adev->vcn.ring_jpeg.ready) 410 if (adev->vcn.ring_jpeg.sched.ready)
411 ++num_rings; 411 ++num_rings;
412 ib_start_alignment = 16; 412 ib_start_alignment = 16;
413 ib_size_alignment = 16; 413 ib_size_alignment = 16;