diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-07-11 02:24:10 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-07-11 02:24:10 -0400 |
commit | 597473720f4dc69749542bfcfed4a927a43d935e (patch) | |
tree | 711bf773910fb93d1dd9120c633adc807685e0d8 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
parent | f0dd687815f9546860fc3ac4379d55da045942c9 (diff) | |
parent | 593fdd4fb44ef2cbf4ec53ec2c6eb60eb079bb4c (diff) |
Merge branch 'next' into for-linus
Prepare input updates for 5.3 merge window.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 46 |
1 files changed, 33 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0acc8dee2cb8..52a5e4fdc95b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | |||
50 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | 50 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
51 | p->uf_entry.priority = 0; | 51 | p->uf_entry.priority = 0; |
52 | p->uf_entry.tv.bo = &bo->tbo; | 52 | p->uf_entry.tv.bo = &bo->tbo; |
53 | p->uf_entry.tv.shared = true; | 53 | /* One for TTM and one for the CS job */ |
54 | p->uf_entry.tv.num_shared = 2; | ||
54 | p->uf_entry.user_pages = NULL; | 55 | p->uf_entry.user_pages = NULL; |
55 | 56 | ||
56 | drm_gem_object_put_unlocked(gobj); | 57 | drm_gem_object_put_unlocked(gobj); |
@@ -213,6 +214,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs | |||
213 | case AMDGPU_CHUNK_ID_DEPENDENCIES: | 214 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
214 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: | 215 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: |
215 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: | 216 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: |
217 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: | ||
216 | break; | 218 | break; |
217 | 219 | ||
218 | default: | 220 | default: |
@@ -598,6 +600,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
598 | return r; | 600 | return r; |
599 | } | 601 | } |
600 | 602 | ||
603 | /* One for TTM and one for the CS job */ | ||
604 | amdgpu_bo_list_for_each_entry(e, p->bo_list) | ||
605 | e->tv.num_shared = 2; | ||
606 | |||
601 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); | 607 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); |
602 | if (p->bo_list->first_userptr != p->bo_list->num_entries) | 608 | if (p->bo_list->first_userptr != p->bo_list->num_entries) |
603 | p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); | 609 | p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); |
@@ -717,8 +723,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
717 | gws = p->bo_list->gws_obj; | 723 | gws = p->bo_list->gws_obj; |
718 | oa = p->bo_list->oa_obj; | 724 | oa = p->bo_list->oa_obj; |
719 | 725 | ||
720 | amdgpu_bo_list_for_each_entry(e, p->bo_list) | 726 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
721 | e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo)); | 727 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
728 | |||
729 | /* Make sure we use the exclusive slot for shared BOs */ | ||
730 | if (bo->prime_shared_count) | ||
731 | e->tv.num_shared = 0; | ||
732 | e->bo_va = amdgpu_vm_bo_find(vm, bo); | ||
733 | } | ||
722 | 734 | ||
723 | if (gds) { | 735 | if (gds) { |
724 | p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; | 736 | p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; |
@@ -955,10 +967,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) | |||
955 | if (r) | 967 | if (r) |
956 | return r; | 968 | return r; |
957 | 969 | ||
958 | r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); | ||
959 | if (r) | ||
960 | return r; | ||
961 | |||
962 | p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); | 970 | p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); |
963 | 971 | ||
964 | if (amdgpu_vm_debug) { | 972 | if (amdgpu_vm_debug) { |
@@ -1083,6 +1091,15 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, | |||
1083 | 1091 | ||
1084 | fence = amdgpu_ctx_get_fence(ctx, entity, | 1092 | fence = amdgpu_ctx_get_fence(ctx, entity, |
1085 | deps[i].handle); | 1093 | deps[i].handle); |
1094 | |||
1095 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { | ||
1096 | struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); | ||
1097 | struct dma_fence *old = fence; | ||
1098 | |||
1099 | fence = dma_fence_get(&s_fence->scheduled); | ||
1100 | dma_fence_put(old); | ||
1101 | } | ||
1102 | |||
1086 | if (IS_ERR(fence)) { | 1103 | if (IS_ERR(fence)) { |
1087 | r = PTR_ERR(fence); | 1104 | r = PTR_ERR(fence); |
1088 | amdgpu_ctx_put(ctx); | 1105 | amdgpu_ctx_put(ctx); |
@@ -1104,7 +1121,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, | |||
1104 | { | 1121 | { |
1105 | int r; | 1122 | int r; |
1106 | struct dma_fence *fence; | 1123 | struct dma_fence *fence; |
1107 | r = drm_syncobj_find_fence(p->filp, handle, 0, &fence); | 1124 | r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence); |
1108 | if (r) | 1125 | if (r) |
1109 | return r; | 1126 | return r; |
1110 | 1127 | ||
@@ -1170,7 +1187,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
1170 | 1187 | ||
1171 | chunk = &p->chunks[i]; | 1188 | chunk = &p->chunks[i]; |
1172 | 1189 | ||
1173 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) { | 1190 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES || |
1191 | chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { | ||
1174 | r = amdgpu_cs_process_fence_dep(p, chunk); | 1192 | r = amdgpu_cs_process_fence_dep(p, chunk); |
1175 | if (r) | 1193 | if (r) |
1176 | return r; | 1194 | return r; |
@@ -1193,7 +1211,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) | |||
1193 | int i; | 1211 | int i; |
1194 | 1212 | ||
1195 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) | 1213 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) |
1196 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence); | 1214 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); |
1197 | } | 1215 | } |
1198 | 1216 | ||
1199 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | 1217 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
@@ -1260,8 +1278,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1260 | return 0; | 1278 | return 0; |
1261 | 1279 | ||
1262 | error_abort: | 1280 | error_abort: |
1263 | dma_fence_put(&job->base.s_fence->finished); | 1281 | drm_sched_job_cleanup(&job->base); |
1264 | job->base.s_fence = NULL; | ||
1265 | amdgpu_mn_unlock(p->mn); | 1282 | amdgpu_mn_unlock(p->mn); |
1266 | 1283 | ||
1267 | error_unlock: | 1284 | error_unlock: |
@@ -1285,7 +1302,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
1285 | 1302 | ||
1286 | r = amdgpu_cs_parser_init(&parser, data); | 1303 | r = amdgpu_cs_parser_init(&parser, data); |
1287 | if (r) { | 1304 | if (r) { |
1288 | DRM_ERROR("Failed to initialize parser !\n"); | 1305 | DRM_ERROR("Failed to initialize parser %d!\n", r); |
1289 | goto out; | 1306 | goto out; |
1290 | } | 1307 | } |
1291 | 1308 | ||
@@ -1422,6 +1439,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, | |||
1422 | if (IS_ERR(fence)) | 1439 | if (IS_ERR(fence)) |
1423 | return PTR_ERR(fence); | 1440 | return PTR_ERR(fence); |
1424 | 1441 | ||
1442 | if (!fence) | ||
1443 | fence = dma_fence_get_stub(); | ||
1444 | |||
1425 | switch (info->in.what) { | 1445 | switch (info->in.what) { |
1426 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: | 1446 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: |
1427 | r = drm_syncobj_create(&syncobj, 0, fence); | 1447 | r = drm_syncobj_create(&syncobj, 0, fence); |