aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c51
1 files changed, 22 insertions, 29 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index f7154f3ed807..1d7292ab2b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -563,10 +563,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
563 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 563 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
564 struct amdgpu_bo_list_entry *e; 564 struct amdgpu_bo_list_entry *e;
565 struct list_head duplicates; 565 struct list_head duplicates;
566 unsigned i, tries = 10;
567 struct amdgpu_bo *gds; 566 struct amdgpu_bo *gds;
568 struct amdgpu_bo *gws; 567 struct amdgpu_bo *gws;
569 struct amdgpu_bo *oa; 568 struct amdgpu_bo *oa;
569 unsigned tries = 10;
570 int r; 570 int r;
571 571
572 INIT_LIST_HEAD(&p->validated); 572 INIT_LIST_HEAD(&p->validated);
@@ -596,7 +596,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
596 596
597 while (1) { 597 while (1) {
598 struct list_head need_pages; 598 struct list_head need_pages;
599 unsigned i;
600 599
601 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 600 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
602 &duplicates); 601 &duplicates);
@@ -611,12 +610,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
611 break; 610 break;
612 611
613 INIT_LIST_HEAD(&need_pages); 612 INIT_LIST_HEAD(&need_pages);
614 for (i = p->bo_list->first_userptr; 613 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
615 i < p->bo_list->num_entries; ++i) { 614 struct amdgpu_bo *bo = e->robj;
616 struct amdgpu_bo *bo;
617
618 e = &p->bo_list->array[i];
619 bo = e->robj;
620 615
621 if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, 616 if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
622 &e->user_invalidated) && e->user_pages) { 617 &e->user_invalidated) && e->user_pages) {
@@ -710,16 +705,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
710 705
711 if (p->bo_list) { 706 if (p->bo_list) {
712 struct amdgpu_vm *vm = &fpriv->vm; 707 struct amdgpu_vm *vm = &fpriv->vm;
713 unsigned i; 708 struct amdgpu_bo_list_entry *e;
714 709
715 gds = p->bo_list->gds_obj; 710 gds = p->bo_list->gds_obj;
716 gws = p->bo_list->gws_obj; 711 gws = p->bo_list->gws_obj;
717 oa = p->bo_list->oa_obj; 712 oa = p->bo_list->oa_obj;
718 for (i = 0; i < p->bo_list->num_entries; i++) {
719 struct amdgpu_bo *bo = p->bo_list->array[i].robj;
720 713
721 p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); 714 amdgpu_bo_list_for_each_entry(e, p->bo_list)
722 } 715 e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
723 } else { 716 } else {
724 gds = p->adev->gds.gds_gfx_bo; 717 gds = p->adev->gds.gds_gfx_bo;
725 gws = p->adev->gds.gws_gfx_bo; 718 gws = p->adev->gds.gws_gfx_bo;
@@ -753,10 +746,7 @@ error_validate:
753error_free_pages: 746error_free_pages:
754 747
755 if (p->bo_list) { 748 if (p->bo_list) {
756 for (i = p->bo_list->first_userptr; 749 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
757 i < p->bo_list->num_entries; ++i) {
758 e = &p->bo_list->array[i];
759
760 if (!e->user_pages) 750 if (!e->user_pages)
761 continue; 751 continue;
762 752
@@ -830,7 +820,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
830 struct amdgpu_vm *vm = &fpriv->vm; 820 struct amdgpu_vm *vm = &fpriv->vm;
831 struct amdgpu_bo_va *bo_va; 821 struct amdgpu_bo_va *bo_va;
832 struct amdgpu_bo *bo; 822 struct amdgpu_bo *bo;
833 int i, r; 823 int r;
834 824
835 r = amdgpu_vm_clear_freed(adev, vm, NULL); 825 r = amdgpu_vm_clear_freed(adev, vm, NULL);
836 if (r) 826 if (r)
@@ -861,15 +851,17 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
861 } 851 }
862 852
863 if (p->bo_list) { 853 if (p->bo_list) {
864 for (i = 0; i < p->bo_list->num_entries; i++) { 854 struct amdgpu_bo_list_entry *e;
855
856 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
865 struct dma_fence *f; 857 struct dma_fence *f;
866 858
867 /* ignore duplicates */ 859 /* ignore duplicates */
868 bo = p->bo_list->array[i].robj; 860 bo = e->robj;
869 if (!bo) 861 if (!bo)
870 continue; 862 continue;
871 863
872 bo_va = p->bo_list->array[i].bo_va; 864 bo_va = e->bo_va;
873 if (bo_va == NULL) 865 if (bo_va == NULL)
874 continue; 866 continue;
875 867
@@ -898,14 +890,15 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
898 return r; 890 return r;
899 891
900 if (amdgpu_vm_debug && p->bo_list) { 892 if (amdgpu_vm_debug && p->bo_list) {
893 struct amdgpu_bo_list_entry *e;
894
901 /* Invalidate all BOs to test for userspace bugs */ 895 /* Invalidate all BOs to test for userspace bugs */
902 for (i = 0; i < p->bo_list->num_entries; i++) { 896 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
903 /* ignore duplicates */ 897 /* ignore duplicates */
904 bo = p->bo_list->array[i].robj; 898 if (!e->robj)
905 if (!bo)
906 continue; 899 continue;
907 900
908 amdgpu_vm_bo_invalidate(adev, bo, false); 901 amdgpu_vm_bo_invalidate(adev, e->robj, false);
909 } 902 }
910 } 903 }
911 904
@@ -1225,16 +1218,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1225 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 1218 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
1226 enum drm_sched_priority priority; 1219 enum drm_sched_priority priority;
1227 struct amdgpu_job *job; 1220 struct amdgpu_job *job;
1228 unsigned i;
1229 uint64_t seq; 1221 uint64_t seq;
1230 1222
1231 int r; 1223 int r;
1232 1224
1233 amdgpu_mn_lock(p->mn); 1225 amdgpu_mn_lock(p->mn);
1234 if (p->bo_list) { 1226 if (p->bo_list) {
1235 for (i = p->bo_list->first_userptr; 1227 struct amdgpu_bo_list_entry *e;
1236 i < p->bo_list->num_entries; ++i) { 1228
1237 struct amdgpu_bo *bo = p->bo_list->array[i].robj; 1229 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1230 struct amdgpu_bo *bo = e->robj;
1238 1231
1239 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { 1232 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1240 amdgpu_mn_unlock(p->mn); 1233 amdgpu_mn_unlock(p->mn);