aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-07-21 04:52:10 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:37 -0400
commitd5fc5e82a3127b8839c4a4457f3b950a009737a7 (patch)
tree4fa16b5508f599e63da7e41a331252fd6f623237 /drivers/gpu/drm/amd
parent23ca0e4e478836dcb93a54aa68cb48fbc66fb0ed (diff)
drm/amdgpu: dispatch job for vm
use kernel context to submit command for vm Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Christian K?nig <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c245
2 files changed, 217 insertions, 48 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 47e4809c6e71..1b8d05ff88e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1221,6 +1221,19 @@ struct amdgpu_cs_chunk {
1221 void __user *user_ptr; 1221 void __user *user_ptr;
1222}; 1222};
1223 1223
1224union amdgpu_sched_job_param {
1225 struct {
1226 struct amdgpu_vm *vm;
1227 uint64_t start;
1228 uint64_t last;
1229 struct amdgpu_fence **fence;
1230
1231 } vm_mapping;
1232 struct {
1233 struct amdgpu_bo *bo;
1234 } vm;
1235};
1236
1224struct amdgpu_cs_parser { 1237struct amdgpu_cs_parser {
1225 struct amdgpu_device *adev; 1238 struct amdgpu_device *adev;
1226 struct drm_file *filp; 1239 struct drm_file *filp;
@@ -1245,6 +1258,7 @@ struct amdgpu_cs_parser {
1245 struct mutex job_lock; 1258 struct mutex job_lock;
1246 struct work_struct job_work; 1259 struct work_struct job_work;
1247 int (*prepare_job)(struct amdgpu_cs_parser *sched_job); 1260 int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
1261 union amdgpu_sched_job_param job_param;
1248 int (*run_job)(struct amdgpu_cs_parser *sched_job); 1262 int (*run_job)(struct amdgpu_cs_parser *sched_job);
1249 int (*free_job)(struct amdgpu_cs_parser *sched_job); 1263 int (*free_job)(struct amdgpu_cs_parser *sched_job);
1250}; 1264};
@@ -2255,6 +2269,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2255bool amdgpu_card_posted(struct amdgpu_device *adev); 2269bool amdgpu_card_posted(struct amdgpu_device *adev);
2256void amdgpu_update_display_priority(struct amdgpu_device *adev); 2270void amdgpu_update_display_priority(struct amdgpu_device *adev);
2257bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2271bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2272struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2273 struct drm_file *filp,
2274 struct amdgpu_ctx *ctx,
2275 struct amdgpu_ib *ibs,
2276 uint32_t num_ibs);
2277
2258int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2278int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2259int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2279int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2260 u32 ip_instance, u32 ring, 2280 u32 ip_instance, u32 ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index fd8395f25723..34938d2417a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -306,6 +306,24 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
306 } 306 }
307} 307}
308 308
309static int amdgpu_vm_free_job(
310 struct amdgpu_cs_parser *sched_job)
311{
312 int i;
313 for (i = 0; i < sched_job->num_ibs; i++)
314 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
315 kfree(sched_job->ibs);
316 return 0;
317}
318
319static int amdgpu_vm_run_job(
320 struct amdgpu_cs_parser *sched_job)
321{
322 amdgpu_bo_fence(sched_job->job_param.vm.bo,
323 sched_job->ibs[sched_job->num_ibs -1].fence, true);
324 return 0;
325}
326
309/** 327/**
310 * amdgpu_vm_clear_bo - initially clear the page dir/table 328 * amdgpu_vm_clear_bo - initially clear the page dir/table
311 * 329 *
@@ -316,7 +334,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
316 struct amdgpu_bo *bo) 334 struct amdgpu_bo *bo)
317{ 335{
318 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 336 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
319 struct amdgpu_ib ib; 337 struct amdgpu_cs_parser *sched_job = NULL;
338 struct amdgpu_ib *ib;
320 unsigned entries; 339 unsigned entries;
321 uint64_t addr; 340 uint64_t addr;
322 int r; 341 int r;
@@ -336,24 +355,54 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
336 addr = amdgpu_bo_gpu_offset(bo); 355 addr = amdgpu_bo_gpu_offset(bo);
337 entries = amdgpu_bo_size(bo) / 8; 356 entries = amdgpu_bo_size(bo) / 8;
338 357
339 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib); 358 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
340 if (r) 359 if (!ib)
341 goto error_unreserve; 360 goto error_unreserve;
342 361
343 ib.length_dw = 0; 362 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
344
345 amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
346 amdgpu_vm_pad_ib(adev, &ib);
347 WARN_ON(ib.length_dw > 64);
348
349 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
350 if (r) 363 if (r)
351 goto error_free; 364 goto error_free;
352 365
353 amdgpu_bo_fence(bo, ib.fence, true); 366 ib->length_dw = 0;
367
368 amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
369 amdgpu_vm_pad_ib(adev, ib);
370 WARN_ON(ib->length_dw > 64);
371
372 if (amdgpu_enable_scheduler) {
373 int r;
374 uint64_t v_seq;
375 sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
376 adev->kernel_ctx, ib, 1);
377 if(!sched_job)
378 goto error_free;
379 sched_job->job_param.vm.bo = bo;
380 sched_job->run_job = amdgpu_vm_run_job;
381 sched_job->free_job = amdgpu_vm_free_job;
382 v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
383 sched_job->uf.sequence = v_seq;
384 amd_sched_push_job(ring->scheduler,
385 &adev->kernel_ctx->rings[ring->idx].c_entity,
386 sched_job);
387 r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
388 v_seq,
389 true,
390 -1);
391 if (r)
392 DRM_ERROR("emit timeout\n");
393
394 amdgpu_bo_unreserve(bo);
395 return 0;
396 } else {
397 r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
398 if (r)
399 goto error_free;
400 amdgpu_bo_fence(bo, ib->fence, true);
401 }
354 402
355error_free: 403error_free:
356 amdgpu_ib_free(adev, &ib); 404 amdgpu_ib_free(adev, ib);
405 kfree(ib);
357 406
358error_unreserve: 407error_unreserve:
359 amdgpu_bo_unreserve(bo); 408 amdgpu_bo_unreserve(bo);
@@ -406,7 +455,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
406 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; 455 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
407 uint64_t last_pde = ~0, last_pt = ~0; 456 uint64_t last_pde = ~0, last_pt = ~0;
408 unsigned count = 0, pt_idx, ndw; 457 unsigned count = 0, pt_idx, ndw;
409 struct amdgpu_ib ib; 458 struct amdgpu_ib *ib;
459 struct amdgpu_cs_parser *sched_job = NULL;
460
410 int r; 461 int r;
411 462
412 /* padding, etc. */ 463 /* padding, etc. */
@@ -419,10 +470,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
419 if (ndw > 0xfffff) 470 if (ndw > 0xfffff)
420 return -ENOMEM; 471 return -ENOMEM;
421 472
422 r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); 473 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
474 if (!ib)
475 return -ENOMEM;
476
477 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
423 if (r) 478 if (r)
424 return r; 479 return r;
425 ib.length_dw = 0; 480 ib->length_dw = 0;
426 481
427 /* walk over the address space and update the page directory */ 482 /* walk over the address space and update the page directory */
428 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 483 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -442,7 +497,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
442 ((last_pt + incr * count) != pt)) { 497 ((last_pt + incr * count) != pt)) {
443 498
444 if (count) { 499 if (count) {
445 amdgpu_vm_update_pages(adev, &ib, last_pde, 500 amdgpu_vm_update_pages(adev, ib, last_pde,
446 last_pt, count, incr, 501 last_pt, count, incr,
447 AMDGPU_PTE_VALID, 0); 502 AMDGPU_PTE_VALID, 0);
448 } 503 }
@@ -456,23 +511,59 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
456 } 511 }
457 512
458 if (count) 513 if (count)
459 amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count, 514 amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
460 incr, AMDGPU_PTE_VALID, 0); 515 incr, AMDGPU_PTE_VALID, 0);
461 516
462 if (ib.length_dw != 0) { 517 if (ib->length_dw != 0) {
463 amdgpu_vm_pad_ib(adev, &ib); 518 amdgpu_vm_pad_ib(adev, ib);
464 amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); 519 amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
465 WARN_ON(ib.length_dw > ndw); 520 WARN_ON(ib->length_dw > ndw);
466 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); 521
467 if (r) { 522 if (amdgpu_enable_scheduler) {
468 amdgpu_ib_free(adev, &ib); 523 int r;
469 return r; 524 uint64_t v_seq;
525 sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
526 adev->kernel_ctx,
527 ib, 1);
528 if(!sched_job)
529 goto error_free;
530 sched_job->job_param.vm.bo = pd;
531 sched_job->run_job = amdgpu_vm_run_job;
532 sched_job->free_job = amdgpu_vm_free_job;
533 v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
534 sched_job->uf.sequence = v_seq;
535 amd_sched_push_job(ring->scheduler,
536 &adev->kernel_ctx->rings[ring->idx].c_entity,
537 sched_job);
538 r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
539 v_seq,
540 true,
541 -1);
542 if (r)
543 DRM_ERROR("emit timeout\n");
544 } else {
545 r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
546 if (r) {
547 amdgpu_ib_free(adev, ib);
548 return r;
549 }
550 amdgpu_bo_fence(pd, ib->fence, true);
470 } 551 }
471 amdgpu_bo_fence(pd, ib.fence, true);
472 } 552 }
473 amdgpu_ib_free(adev, &ib); 553
554 if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
555 amdgpu_ib_free(adev, ib);
556 kfree(ib);
557 }
474 558
475 return 0; 559 return 0;
560
561error_free:
562 if (sched_job)
563 kfree(sched_job);
564 amdgpu_ib_free(adev, ib);
565 kfree(ib);
566 return -ENOMEM;
476} 567}
477 568
478/** 569/**
@@ -657,6 +748,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
657 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); 748 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
658} 749}
659 750
751static int amdgpu_vm_bo_update_mapping_run_job(
752 struct amdgpu_cs_parser *sched_job)
753{
754 struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
755 amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
756 sched_job->job_param.vm_mapping.start,
757 sched_job->job_param.vm_mapping.last + 1,
758 sched_job->ibs[sched_job->num_ibs -1].fence);
759 if (fence) {
760 amdgpu_fence_unref(fence);
761 *fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
762 }
763 return 0;
764}
660/** 765/**
661 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 766 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
662 * 767 *
@@ -681,7 +786,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
681 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 786 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
682 unsigned nptes, ncmds, ndw; 787 unsigned nptes, ncmds, ndw;
683 uint32_t flags = gtt_flags; 788 uint32_t flags = gtt_flags;
684 struct amdgpu_ib ib; 789 struct amdgpu_ib *ib;
790 struct amdgpu_cs_parser *sched_job = NULL;
685 int r; 791 int r;
686 792
687 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 793 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -728,48 +834,91 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
728 if (ndw > 0xfffff) 834 if (ndw > 0xfffff)
729 return -ENOMEM; 835 return -ENOMEM;
730 836
731 r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); 837 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
732 if (r) 838 if (!ib)
839 return -ENOMEM;
840
841 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
842 if (r) {
843 kfree(ib);
733 return r; 844 return r;
734 ib.length_dw = 0; 845 }
846
847 ib->length_dw = 0;
735 848
736 if (!(flags & AMDGPU_PTE_VALID)) { 849 if (!(flags & AMDGPU_PTE_VALID)) {
737 unsigned i; 850 unsigned i;
738 851
739 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 852 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
740 struct amdgpu_fence *f = vm->ids[i].last_id_use; 853 struct amdgpu_fence *f = vm->ids[i].last_id_use;
741 r = amdgpu_sync_fence(adev, &ib.sync, &f->base); 854 r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
742 if (r) 855 if (r)
743 return r; 856 return r;
744 } 857 }
745 } 858 }
746 859
747 r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start, 860 r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
748 mapping->it.last + 1, addr + mapping->offset, 861 mapping->it.last + 1, addr + mapping->offset,
749 flags, gtt_flags); 862 flags, gtt_flags);
750 863
751 if (r) { 864 if (r) {
752 amdgpu_ib_free(adev, &ib); 865 amdgpu_ib_free(adev, ib);
866 kfree(ib);
753 return r; 867 return r;
754 } 868 }
755 869
756 amdgpu_vm_pad_ib(adev, &ib); 870 amdgpu_vm_pad_ib(adev, ib);
757 WARN_ON(ib.length_dw > ndw); 871 WARN_ON(ib->length_dw > ndw);
758 872
759 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); 873 if (amdgpu_enable_scheduler) {
760 if (r) { 874 int r;
761 amdgpu_ib_free(adev, &ib); 875 uint64_t v_seq;
762 return r; 876 sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
763 } 877 adev->kernel_ctx, ib, 1);
764 amdgpu_vm_fence_pts(vm, mapping->it.start, 878 if(!sched_job)
765 mapping->it.last + 1, ib.fence); 879 goto error_free;
766 if (fence) { 880 sched_job->job_param.vm_mapping.vm = vm;
767 amdgpu_fence_unref(fence); 881 sched_job->job_param.vm_mapping.start = mapping->it.start;
768 *fence = amdgpu_fence_ref(ib.fence); 882 sched_job->job_param.vm_mapping.last = mapping->it.last;
769 } 883 sched_job->job_param.vm_mapping.fence = fence;
770 amdgpu_ib_free(adev, &ib); 884 sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
885 sched_job->free_job = amdgpu_vm_free_job;
886 v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
887 sched_job->uf.sequence = v_seq;
888 amd_sched_push_job(ring->scheduler,
889 &adev->kernel_ctx->rings[ring->idx].c_entity,
890 sched_job);
891 r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
892 v_seq,
893 true,
894 -1);
895 if (r)
896 DRM_ERROR("emit timeout\n");
897 } else {
898 r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
899 if (r) {
900 amdgpu_ib_free(adev, ib);
901 return r;
902 }
903
904 amdgpu_vm_fence_pts(vm, mapping->it.start,
905 mapping->it.last + 1, ib->fence);
906 if (fence) {
907 amdgpu_fence_unref(fence);
908 *fence = amdgpu_fence_ref(ib->fence);
909 }
771 910
911 amdgpu_ib_free(adev, ib);
912 kfree(ib);
913 }
772 return 0; 914 return 0;
915
916error_free:
917 if (sched_job)
918 kfree(sched_job);
919 amdgpu_ib_free(adev, ib);
920 kfree(ib);
921 return -ENOMEM;
773} 922}
774 923
775/** 924/**