aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c53
6 files changed, 51 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 99e660fec190..5947a95ac853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -800,7 +800,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
800 struct amdgpu_job **job); 800 struct amdgpu_job **job);
801void amdgpu_job_free(struct amdgpu_job *job); 801void amdgpu_job_free(struct amdgpu_job *job);
802int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 802int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
803 void *owner, struct fence **f); 803 struct amd_sched_entity *entity, void *owner,
804 struct fence **f);
804 805
805struct amdgpu_ring { 806struct amdgpu_ring {
806 struct amdgpu_device *adev; 807 struct amdgpu_device *adev;
@@ -917,6 +918,9 @@ struct amdgpu_vm {
917 918
918 /* protecting freed */ 919 /* protecting freed */
919 spinlock_t freed_lock; 920 spinlock_t freed_lock;
921
922 /* Scheduler entity for page table updates */
923 struct amd_sched_entity entity;
920}; 924};
921 925
922struct amdgpu_vm_manager_id { 926struct amdgpu_vm_manager_id {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 0f6719e0ace0..97db6beeca13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -80,13 +80,17 @@ void amdgpu_job_free(struct amdgpu_job *job)
80} 80}
81 81
82int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 82int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
83 void *owner, struct fence **f) 83 struct amd_sched_entity *entity, void *owner,
84 struct fence **f)
84{ 85{
85 struct amdgpu_device *adev = job->adev; 86 struct amdgpu_device *adev = job->adev;
86 87
88 if (!entity)
89 entity = &adev->kernel_ctx.rings[ring->idx].entity;
90
87 job->ring = ring; 91 job->ring = ring;
88 job->base.sched = &ring->sched; 92 job->base.sched = &ring->sched;
89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 93 job->base.s_entity = entity;
90 job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); 94 job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
91 if (!job->base.s_fence) 95 if (!job->base.s_fence)
92 return -ENOMEM; 96 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e47d5188c886..3deb7d3b218a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1053,7 +1053,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
1053 1053
1054 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 1054 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1055 WARN_ON(job->ibs[0].length_dw > num_dw); 1055 WARN_ON(job->ibs[0].length_dw > num_dw);
1056 r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence); 1056 r = amdgpu_job_submit(job, ring, NULL, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1057 if (r) 1057 if (r)
1058 goto error_free; 1058 goto error_free;
1059 1059
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c536630580f8..f4283432bf4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -880,7 +880,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
880 880
881 amdgpu_job_free(job); 881 amdgpu_job_free(job);
882 } else { 882 } else {
883 r = amdgpu_job_submit(job, ring, 883 r = amdgpu_job_submit(job, ring, NULL,
884 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 884 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
885 if (r) 885 if (r)
886 goto err_free; 886 goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index fb2ce3ed9aab..8a3119379cd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -481,7 +481,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
481 481
482 amdgpu_job_free(job); 482 amdgpu_job_free(job);
483 } else { 483 } else {
484 r = amdgpu_job_submit(job, ring, 484 r = amdgpu_job_submit(job, ring, NULL,
485 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 485 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
486 if (r) 486 if (r)
487 goto err; 487 goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b291b1a4611a..5e38b344d56b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -322,6 +322,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
322 * need to reserve bo first before calling it. 322 * need to reserve bo first before calling it.
323 */ 323 */
324static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 324static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
325 struct amdgpu_vm *vm,
325 struct amdgpu_bo *bo) 326 struct amdgpu_bo *bo)
326{ 327{
327 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 328 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
@@ -351,7 +352,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
351 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 352 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
352 353
353 WARN_ON(job->ibs[0].length_dw > 64); 354 WARN_ON(job->ibs[0].length_dw > 64);
354 r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); 355 r = amdgpu_job_submit(job, ring, &vm->entity,
356 AMDGPU_FENCE_OWNER_VM, &fence);
355 if (r) 357 if (r)
356 goto error_free; 358 goto error_free;
357 359
@@ -476,7 +478,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
476 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, 478 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
477 AMDGPU_FENCE_OWNER_VM); 479 AMDGPU_FENCE_OWNER_VM);
478 WARN_ON(ib->length_dw > ndw); 480 WARN_ON(ib->length_dw > ndw);
479 r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); 481 r = amdgpu_job_submit(job, ring, &vm->entity,
482 AMDGPU_FENCE_OWNER_VM, &fence);
480 if (r) 483 if (r)
481 goto error_free; 484 goto error_free;
482 485
@@ -729,7 +732,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
729 732
730 amdgpu_ring_pad_ib(ring, ib); 733 amdgpu_ring_pad_ib(ring, ib);
731 WARN_ON(ib->length_dw > ndw); 734 WARN_ON(ib->length_dw > ndw);
732 r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f); 735 r = amdgpu_job_submit(job, ring, &vm->entity,
736 AMDGPU_FENCE_OWNER_VM, &f);
733 if (r) 737 if (r)
734 goto error_free; 738 goto error_free;
735 739
@@ -1104,7 +1108,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1104 */ 1108 */
1105 pt->parent = amdgpu_bo_ref(vm->page_directory); 1109 pt->parent = amdgpu_bo_ref(vm->page_directory);
1106 1110
1107 r = amdgpu_vm_clear_bo(adev, pt); 1111 r = amdgpu_vm_clear_bo(adev, vm, pt);
1108 if (r) { 1112 if (r) {
1109 amdgpu_bo_unref(&pt); 1113 amdgpu_bo_unref(&pt);
1110 goto error_free; 1114 goto error_free;
@@ -1265,9 +1269,11 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1265 */ 1269 */
1266int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1270int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1267{ 1271{
1272 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
1268 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 1273 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1269 AMDGPU_VM_PTE_COUNT * 8); 1274 AMDGPU_VM_PTE_COUNT * 8);
1270 unsigned pd_size, pd_entries; 1275 unsigned pd_size, pd_entries;
1276 struct amd_sched_rq *rq;
1271 int i, r; 1277 int i, r;
1272 1278
1273 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1279 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1291,6 +1297,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1291 return -ENOMEM; 1297 return -ENOMEM;
1292 } 1298 }
1293 1299
1300 /* create scheduler entity for page table updates */
1301 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1302 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1303 rq, amdgpu_sched_jobs);
1304 if (r)
1305 return r;
1306
1294 vm->page_directory_fence = NULL; 1307 vm->page_directory_fence = NULL;
1295 1308
1296 r = amdgpu_bo_create(adev, pd_size, align, true, 1309 r = amdgpu_bo_create(adev, pd_size, align, true,
@@ -1298,22 +1311,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1298 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1311 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1299 NULL, NULL, &vm->page_directory); 1312 NULL, NULL, &vm->page_directory);
1300 if (r) 1313 if (r)
1301 return r; 1314 goto error_free_sched_entity;
1315
1302 r = amdgpu_bo_reserve(vm->page_directory, false); 1316 r = amdgpu_bo_reserve(vm->page_directory, false);
1303 if (r) { 1317 if (r)
1304 amdgpu_bo_unref(&vm->page_directory); 1318 goto error_free_page_directory;
1305 vm->page_directory = NULL; 1319
1306 return r; 1320 r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1307 }
1308 r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1309 amdgpu_bo_unreserve(vm->page_directory); 1321 amdgpu_bo_unreserve(vm->page_directory);
1310 if (r) { 1322 if (r)
1311 amdgpu_bo_unref(&vm->page_directory); 1323 goto error_free_page_directory;
1312 vm->page_directory = NULL;
1313 return r;
1314 }
1315 1324
1316 return 0; 1325 return 0;
1326
1327error_free_page_directory:
1328 amdgpu_bo_unref(&vm->page_directory);
1329 vm->page_directory = NULL;
1330
1331error_free_sched_entity:
1332 amd_sched_entity_fini(&ring->sched, &vm->entity);
1333
1334 return r;
1317} 1335}
1318 1336
1319/** 1337/**
@@ -1327,9 +1345,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1327 */ 1345 */
1328void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1346void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1329{ 1347{
1348 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
1330 struct amdgpu_bo_va_mapping *mapping, *tmp; 1349 struct amdgpu_bo_va_mapping *mapping, *tmp;
1331 int i; 1350 int i;
1332 1351
1352 amd_sched_entity_fini(&ring->sched, &vm->entity);
1353
1333 if (!RB_EMPTY_ROOT(&vm->va)) { 1354 if (!RB_EMPTY_ROOT(&vm->va)) {
1334 dev_err(adev->dev, "still active bo inside vm\n"); 1355 dev_err(adev->dev, "still active bo inside vm\n");
1335 } 1356 }