diff options
author | Christian König <christian.koenig@amd.com> | 2018-07-30 10:44:14 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-07-31 17:58:19 -0400 |
commit | 4a102ad4ba0daf886dcf0927ce2a7f6c3b3a615c (patch) | |
tree | c2eb0e23188f5bf6af4b07594036372a6f09aa21 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
parent | 920990cb080a44203bf6c8eb706e79ad23241ad3 (diff) |
drm/amdgpu: create an empty bo_list if no handle is provided
Instead of having extra handling just create an empty bo_list when no
handle is provided.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 111 |
1 files changed, 46 insertions, 65 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1d7292ab2b62..502b94fb116a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -561,6 +561,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
561 | union drm_amdgpu_cs *cs) | 561 | union drm_amdgpu_cs *cs) |
562 | { | 562 | { |
563 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 563 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
564 | struct amdgpu_vm *vm = &fpriv->vm; | ||
564 | struct amdgpu_bo_list_entry *e; | 565 | struct amdgpu_bo_list_entry *e; |
565 | struct list_head duplicates; | 566 | struct list_head duplicates; |
566 | struct amdgpu_bo *gds; | 567 | struct amdgpu_bo *gds; |
@@ -580,13 +581,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
580 | &p->bo_list); | 581 | &p->bo_list); |
581 | if (r) | 582 | if (r) |
582 | return r; | 583 | return r; |
584 | } else if (!p->bo_list) { | ||
585 | /* Create a empty bo_list when no handle is provided */ | ||
586 | r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, | ||
587 | &p->bo_list); | ||
588 | if (r) | ||
589 | return r; | ||
583 | } | 590 | } |
584 | 591 | ||
585 | if (p->bo_list) { | 592 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); |
586 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); | 593 | if (p->bo_list->first_userptr != p->bo_list->num_entries) |
587 | if (p->bo_list->first_userptr != p->bo_list->num_entries) | 594 | p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); |
588 | p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); | ||
589 | } | ||
590 | 595 | ||
591 | INIT_LIST_HEAD(&duplicates); | 596 | INIT_LIST_HEAD(&duplicates); |
592 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); | 597 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); |
@@ -605,10 +610,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
605 | goto error_free_pages; | 610 | goto error_free_pages; |
606 | } | 611 | } |
607 | 612 | ||
608 | /* Without a BO list we don't have userptr BOs */ | ||
609 | if (!p->bo_list) | ||
610 | break; | ||
611 | |||
612 | INIT_LIST_HEAD(&need_pages); | 613 | INIT_LIST_HEAD(&need_pages); |
613 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 614 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
614 | struct amdgpu_bo *bo = e->robj; | 615 | struct amdgpu_bo *bo = e->robj; |
@@ -703,21 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
703 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, | 704 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, |
704 | p->bytes_moved_vis); | 705 | p->bytes_moved_vis); |
705 | 706 | ||
706 | if (p->bo_list) { | 707 | gds = p->bo_list->gds_obj; |
707 | struct amdgpu_vm *vm = &fpriv->vm; | 708 | gws = p->bo_list->gws_obj; |
708 | struct amdgpu_bo_list_entry *e; | 709 | oa = p->bo_list->oa_obj; |
709 | 710 | ||
710 | gds = p->bo_list->gds_obj; | 711 | amdgpu_bo_list_for_each_entry(e, p->bo_list) |
711 | gws = p->bo_list->gws_obj; | 712 | e->bo_va = amdgpu_vm_bo_find(vm, e->robj); |
712 | oa = p->bo_list->oa_obj; | ||
713 | |||
714 | amdgpu_bo_list_for_each_entry(e, p->bo_list) | ||
715 | e->bo_va = amdgpu_vm_bo_find(vm, e->robj); | ||
716 | } else { | ||
717 | gds = p->adev->gds.gds_gfx_bo; | ||
718 | gws = p->adev->gds.gws_gfx_bo; | ||
719 | oa = p->adev->gds.oa_gfx_bo; | ||
720 | } | ||
721 | 713 | ||
722 | if (gds) { | 714 | if (gds) { |
723 | p->job->gds_base = amdgpu_bo_gpu_offset(gds); | 715 | p->job->gds_base = amdgpu_bo_gpu_offset(gds); |
@@ -745,15 +737,13 @@ error_validate: | |||
745 | 737 | ||
746 | error_free_pages: | 738 | error_free_pages: |
747 | 739 | ||
748 | if (p->bo_list) { | 740 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
749 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 741 | if (!e->user_pages) |
750 | if (!e->user_pages) | 742 | continue; |
751 | continue; | ||
752 | 743 | ||
753 | release_pages(e->user_pages, | 744 | release_pages(e->user_pages, |
754 | e->robj->tbo.ttm->num_pages); | 745 | e->robj->tbo.ttm->num_pages); |
755 | kvfree(e->user_pages); | 746 | kvfree(e->user_pages); |
756 | } | ||
757 | } | 747 | } |
758 | 748 | ||
759 | return r; | 749 | return r; |
@@ -815,9 +805,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, | |||
815 | 805 | ||
816 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) | 806 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) |
817 | { | 807 | { |
818 | struct amdgpu_device *adev = p->adev; | ||
819 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 808 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
809 | struct amdgpu_device *adev = p->adev; | ||
820 | struct amdgpu_vm *vm = &fpriv->vm; | 810 | struct amdgpu_vm *vm = &fpriv->vm; |
811 | struct amdgpu_bo_list_entry *e; | ||
821 | struct amdgpu_bo_va *bo_va; | 812 | struct amdgpu_bo_va *bo_va; |
822 | struct amdgpu_bo *bo; | 813 | struct amdgpu_bo *bo; |
823 | int r; | 814 | int r; |
@@ -850,31 +841,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) | |||
850 | return r; | 841 | return r; |
851 | } | 842 | } |
852 | 843 | ||
853 | if (p->bo_list) { | 844 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
854 | struct amdgpu_bo_list_entry *e; | 845 | struct dma_fence *f; |
855 | |||
856 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { | ||
857 | struct dma_fence *f; | ||
858 | |||
859 | /* ignore duplicates */ | ||
860 | bo = e->robj; | ||
861 | if (!bo) | ||
862 | continue; | ||
863 | 846 | ||
864 | bo_va = e->bo_va; | 847 | /* ignore duplicates */ |
865 | if (bo_va == NULL) | 848 | bo = e->robj; |
866 | continue; | 849 | if (!bo) |
850 | continue; | ||
867 | 851 | ||
868 | r = amdgpu_vm_bo_update(adev, bo_va, false); | 852 | bo_va = e->bo_va; |
869 | if (r) | 853 | if (bo_va == NULL) |
870 | return r; | 854 | continue; |
871 | 855 | ||
872 | f = bo_va->last_pt_update; | 856 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
873 | r = amdgpu_sync_fence(adev, &p->job->sync, f, false); | 857 | if (r) |
874 | if (r) | 858 | return r; |
875 | return r; | ||
876 | } | ||
877 | 859 | ||
860 | f = bo_va->last_pt_update; | ||
861 | r = amdgpu_sync_fence(adev, &p->job->sync, f, false); | ||
862 | if (r) | ||
863 | return r; | ||
878 | } | 864 | } |
879 | 865 | ||
880 | r = amdgpu_vm_handle_moved(adev, vm); | 866 | r = amdgpu_vm_handle_moved(adev, vm); |
@@ -889,9 +875,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) | |||
889 | if (r) | 875 | if (r) |
890 | return r; | 876 | return r; |
891 | 877 | ||
892 | if (amdgpu_vm_debug && p->bo_list) { | 878 | if (amdgpu_vm_debug) { |
893 | struct amdgpu_bo_list_entry *e; | ||
894 | |||
895 | /* Invalidate all BOs to test for userspace bugs */ | 879 | /* Invalidate all BOs to test for userspace bugs */ |
896 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { | 880 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
897 | /* ignore duplicates */ | 881 | /* ignore duplicates */ |
@@ -1217,22 +1201,19 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1217 | struct amdgpu_ring *ring = p->ring; | 1201 | struct amdgpu_ring *ring = p->ring; |
1218 | struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; | 1202 | struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; |
1219 | enum drm_sched_priority priority; | 1203 | enum drm_sched_priority priority; |
1204 | struct amdgpu_bo_list_entry *e; | ||
1220 | struct amdgpu_job *job; | 1205 | struct amdgpu_job *job; |
1221 | uint64_t seq; | 1206 | uint64_t seq; |
1222 | 1207 | ||
1223 | int r; | 1208 | int r; |
1224 | 1209 | ||
1225 | amdgpu_mn_lock(p->mn); | 1210 | amdgpu_mn_lock(p->mn); |
1226 | if (p->bo_list) { | 1211 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
1227 | struct amdgpu_bo_list_entry *e; | 1212 | struct amdgpu_bo *bo = e->robj; |
1228 | 1213 | ||
1229 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 1214 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { |
1230 | struct amdgpu_bo *bo = e->robj; | 1215 | amdgpu_mn_unlock(p->mn); |
1231 | 1216 | return -ERESTARTSYS; | |
1232 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { | ||
1233 | amdgpu_mn_unlock(p->mn); | ||
1234 | return -ERESTARTSYS; | ||
1235 | } | ||
1236 | } | 1217 | } |
1237 | } | 1218 | } |
1238 | 1219 | ||