diff options
author | Christian König <christian.koenig@amd.com> | 2016-01-31 06:29:04 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-10 14:17:20 -0500 |
commit | b07c60c0652c497af0c42c1278941f7c5a187fe9 (patch) | |
tree | d19e6dfc68fe4b25032c19ceae0e5be0f7e9c25d /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
parent | 9e5d53094c5366a9a14e0694e45e794902cc2c04 (diff) |
drm/amdgpu: move ring from IBs into job
We can't submit to multiple rings at the same time anyway.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 8f3b72f5c91c..d928165bfc33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -542,26 +542,25 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | |||
542 | } | 542 | } |
543 | 543 | ||
544 | static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | 544 | static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, |
545 | struct amdgpu_cs_parser *parser) | 545 | struct amdgpu_cs_parser *p) |
546 | { | 546 | { |
547 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | 547 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
548 | struct amdgpu_vm *vm = &fpriv->vm; | 548 | struct amdgpu_vm *vm = &fpriv->vm; |
549 | struct amdgpu_ring *ring; | 549 | struct amdgpu_ring *ring = p->job->ring; |
550 | int i, r; | 550 | int i, r; |
551 | 551 | ||
552 | /* Only for UVD/VCE VM emulation */ | 552 | /* Only for UVD/VCE VM emulation */ |
553 | for (i = 0; i < parser->job->num_ibs; i++) { | 553 | if (ring->funcs->parse_cs) { |
554 | ring = parser->job->ibs[i].ring; | 554 | for (i = 0; i < p->job->num_ibs; i++) { |
555 | if (ring->funcs->parse_cs) { | 555 | r = amdgpu_ring_parse_cs(ring, p, i); |
556 | r = amdgpu_ring_parse_cs(ring, parser, i); | ||
557 | if (r) | 556 | if (r) |
558 | return r; | 557 | return r; |
559 | } | 558 | } |
560 | } | 559 | } |
561 | 560 | ||
562 | r = amdgpu_bo_vm_update_pte(parser, vm); | 561 | r = amdgpu_bo_vm_update_pte(p, vm); |
563 | if (!r) | 562 | if (!r) |
564 | amdgpu_cs_sync_rings(parser); | 563 | amdgpu_cs_sync_rings(p); |
565 | 564 | ||
566 | return r; | 565 | return r; |
567 | } | 566 | } |
@@ -603,6 +602,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
603 | if (r) | 602 | if (r) |
604 | return r; | 603 | return r; |
605 | 604 | ||
605 | if (parser->job->ring && parser->job->ring != ring) | ||
606 | return -EINVAL; | ||
607 | |||
608 | parser->job->ring = ring; | ||
609 | |||
606 | if (ring->funcs->parse_cs) { | 610 | if (ring->funcs->parse_cs) { |
607 | struct amdgpu_bo_va_mapping *m; | 611 | struct amdgpu_bo_va_mapping *m; |
608 | struct amdgpu_bo *aobj = NULL; | 612 | struct amdgpu_bo *aobj = NULL; |
@@ -631,7 +635,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
631 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; | 635 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; |
632 | kptr += chunk_ib->va_start - offset; | 636 | kptr += chunk_ib->va_start - offset; |
633 | 637 | ||
634 | r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); | 638 | r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); |
635 | if (r) { | 639 | if (r) { |
636 | DRM_ERROR("Failed to get ib !\n"); | 640 | DRM_ERROR("Failed to get ib !\n"); |
637 | return r; | 641 | return r; |
@@ -640,7 +644,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
640 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); | 644 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); |
641 | amdgpu_bo_kunmap(aobj); | 645 | amdgpu_bo_kunmap(aobj); |
642 | } else { | 646 | } else { |
643 | r = amdgpu_ib_get(ring, vm, 0, ib); | 647 | r = amdgpu_ib_get(adev, vm, 0, ib); |
644 | if (r) { | 648 | if (r) { |
645 | DRM_ERROR("Failed to get ib !\n"); | 649 | DRM_ERROR("Failed to get ib !\n"); |
646 | return r; | 650 | return r; |
@@ -680,8 +684,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
680 | struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; | 684 | struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; |
681 | 685 | ||
682 | /* UVD & VCE fw doesn't support user fences */ | 686 | /* UVD & VCE fw doesn't support user fences */ |
683 | if (ib->ring->type == AMDGPU_RING_TYPE_UVD || | 687 | if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD || |
684 | ib->ring->type == AMDGPU_RING_TYPE_VCE) | 688 | parser->job->ring->type == AMDGPU_RING_TYPE_VCE) |
685 | return -EINVAL; | 689 | return -EINVAL; |
686 | 690 | ||
687 | ib->user = &parser->job->uf; | 691 | ib->user = &parser->job->uf; |
@@ -757,7 +761,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) | |||
757 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | 761 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
758 | union drm_amdgpu_cs *cs) | 762 | union drm_amdgpu_cs *cs) |
759 | { | 763 | { |
760 | struct amdgpu_ring * ring = p->job->ibs->ring; | 764 | struct amdgpu_ring *ring = p->job->ring; |
761 | struct amd_sched_fence *fence; | 765 | struct amd_sched_fence *fence; |
762 | struct amdgpu_job *job; | 766 | struct amdgpu_job *job; |
763 | 767 | ||
@@ -766,7 +770,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
766 | 770 | ||
767 | job->base.sched = &ring->sched; | 771 | job->base.sched = &ring->sched; |
768 | job->base.s_entity = &p->ctx->rings[ring->idx].entity; | 772 | job->base.s_entity = &p->ctx->rings[ring->idx].entity; |
769 | job->adev = p->adev; | ||
770 | job->owner = p->filp; | 773 | job->owner = p->filp; |
771 | job->free_job = amdgpu_cs_free_job; | 774 | job->free_job = amdgpu_cs_free_job; |
772 | 775 | ||