diff options
author | Christian König <christian.koenig@amd.com> | 2016-02-01 06:20:25 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-10 14:17:22 -0500 |
commit | d71518b5aa7c9c298ffbd12ddd23297e3373a37b (patch) | |
tree | bb548c89c8f21770cdf7086fee8c1c92aea1fa64 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | a0332b56f6e03e15946fb9d8813cfe44aeeb0e6c (diff) |
drm/amdgpu: cleanup in kernel job submission
Add a job_alloc_with_ib helper and proper job submission.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 87 |
1 files changed, 27 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b68642b47b7b..fb003089f73c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -313,15 +313,6 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | |||
313 | } | 313 | } |
314 | } | 314 | } |
315 | 315 | ||
316 | int amdgpu_vm_free_job(struct amdgpu_job *job) | ||
317 | { | ||
318 | int i; | ||
319 | for (i = 0; i < job->num_ibs; i++) | ||
320 | amdgpu_ib_free(job->adev, &job->ibs[i]); | ||
321 | kfree(job->ibs); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | /** | 316 | /** |
326 | * amdgpu_vm_clear_bo - initially clear the page dir/table | 317 | * amdgpu_vm_clear_bo - initially clear the page dir/table |
327 | * | 318 | * |
@@ -335,7 +326,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
335 | { | 326 | { |
336 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | 327 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; |
337 | struct fence *fence = NULL; | 328 | struct fence *fence = NULL; |
338 | struct amdgpu_ib *ib; | 329 | struct amdgpu_job *job; |
339 | unsigned entries; | 330 | unsigned entries; |
340 | uint64_t addr; | 331 | uint64_t addr; |
341 | int r; | 332 | int r; |
@@ -351,32 +342,25 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
351 | addr = amdgpu_bo_gpu_offset(bo); | 342 | addr = amdgpu_bo_gpu_offset(bo); |
352 | entries = amdgpu_bo_size(bo) / 8; | 343 | entries = amdgpu_bo_size(bo) / 8; |
353 | 344 | ||
354 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); | 345 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
355 | if (!ib) | 346 | if (r) |
356 | goto error; | 347 | goto error; |
357 | 348 | ||
358 | r = amdgpu_ib_get(adev, NULL, 64, ib); | 349 | amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, |
350 | 0, 0); | ||
351 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | ||
352 | |||
353 | WARN_ON(job->ibs[0].length_dw > 64); | ||
354 | r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); | ||
359 | if (r) | 355 | if (r) |
360 | goto error_free; | 356 | goto error_free; |
361 | 357 | ||
362 | ib->length_dw = 0; | 358 | amdgpu_bo_fence(bo, fence, true); |
363 | |||
364 | amdgpu_vm_update_pages(adev, NULL, 0, ib, addr, 0, entries, 0, 0); | ||
365 | amdgpu_ring_pad_ib(ring, ib); | ||
366 | |||
367 | WARN_ON(ib->length_dw > 64); | ||
368 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, | ||
369 | &amdgpu_vm_free_job, | ||
370 | AMDGPU_FENCE_OWNER_VM, | ||
371 | &fence); | ||
372 | if (!r) | ||
373 | amdgpu_bo_fence(bo, fence, true); | ||
374 | fence_put(fence); | 359 | fence_put(fence); |
375 | return 0; | 360 | return 0; |
376 | 361 | ||
377 | error_free: | 362 | error_free: |
378 | amdgpu_ib_free(adev, ib); | 363 | amdgpu_job_free(job); |
379 | kfree(ib); | ||
380 | 364 | ||
381 | error: | 365 | error: |
382 | return r; | 366 | return r; |
@@ -433,6 +417,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
433 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | 417 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; |
434 | uint64_t last_pde = ~0, last_pt = ~0; | 418 | uint64_t last_pde = ~0, last_pt = ~0; |
435 | unsigned count = 0, pt_idx, ndw; | 419 | unsigned count = 0, pt_idx, ndw; |
420 | struct amdgpu_job *job; | ||
436 | struct amdgpu_ib *ib; | 421 | struct amdgpu_ib *ib; |
437 | struct fence *fence = NULL; | 422 | struct fence *fence = NULL; |
438 | 423 | ||
@@ -444,16 +429,11 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
444 | /* assume the worst case */ | 429 | /* assume the worst case */ |
445 | ndw += vm->max_pde_used * 6; | 430 | ndw += vm->max_pde_used * 6; |
446 | 431 | ||
447 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); | 432 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
448 | if (!ib) | 433 | if (r) |
449 | return -ENOMEM; | ||
450 | |||
451 | r = amdgpu_ib_get(adev, NULL, ndw * 4, ib); | ||
452 | if (r) { | ||
453 | kfree(ib); | ||
454 | return r; | 434 | return r; |
455 | } | 435 | |
456 | ib->length_dw = 0; | 436 | ib = &job->ibs[0]; |
457 | 437 | ||
458 | /* walk over the address space and update the page directory */ | 438 | /* walk over the address space and update the page directory */ |
459 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | 439 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
@@ -495,10 +475,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
495 | amdgpu_ring_pad_ib(ring, ib); | 475 | amdgpu_ring_pad_ib(ring, ib); |
496 | amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); | 476 | amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); |
497 | WARN_ON(ib->length_dw > ndw); | 477 | WARN_ON(ib->length_dw > ndw); |
498 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, | 478 | r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); |
499 | &amdgpu_vm_free_job, | ||
500 | AMDGPU_FENCE_OWNER_VM, | ||
501 | &fence); | ||
502 | if (r) | 479 | if (r) |
503 | goto error_free; | 480 | goto error_free; |
504 | 481 | ||
@@ -506,18 +483,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
506 | fence_put(vm->page_directory_fence); | 483 | fence_put(vm->page_directory_fence); |
507 | vm->page_directory_fence = fence_get(fence); | 484 | vm->page_directory_fence = fence_get(fence); |
508 | fence_put(fence); | 485 | fence_put(fence); |
509 | } | ||
510 | 486 | ||
511 | if (ib->length_dw == 0) { | 487 | } else { |
512 | amdgpu_ib_free(adev, ib); | 488 | amdgpu_job_free(job); |
513 | kfree(ib); | ||
514 | } | 489 | } |
515 | 490 | ||
516 | return 0; | 491 | return 0; |
517 | 492 | ||
518 | error_free: | 493 | error_free: |
519 | amdgpu_ib_free(adev, ib); | 494 | amdgpu_job_free(job); |
520 | kfree(ib); | ||
521 | return r; | 495 | return r; |
522 | } | 496 | } |
523 | 497 | ||
@@ -695,6 +669,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
695 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | 669 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; |
696 | void *owner = AMDGPU_FENCE_OWNER_VM; | 670 | void *owner = AMDGPU_FENCE_OWNER_VM; |
697 | unsigned nptes, ncmds, ndw; | 671 | unsigned nptes, ncmds, ndw; |
672 | struct amdgpu_job *job; | ||
698 | struct amdgpu_ib *ib; | 673 | struct amdgpu_ib *ib; |
699 | struct fence *f = NULL; | 674 | struct fence *f = NULL; |
700 | int r; | 675 | int r; |
@@ -733,15 +708,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
733 | ndw += 2 * 10; | 708 | ndw += 2 * 10; |
734 | } | 709 | } |
735 | 710 | ||
736 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); | 711 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
737 | if (!ib) | 712 | if (r) |
738 | return -ENOMEM; | ||
739 | |||
740 | r = amdgpu_ib_get(adev, NULL, ndw * 4, ib); | ||
741 | if (r) { | ||
742 | kfree(ib); | ||
743 | return r; | 713 | return r; |
744 | } | 714 | |
715 | ib = &job->ibs[0]; | ||
745 | 716 | ||
746 | r = amdgpu_sync_resv(adev, &ib->sync, vm->page_directory->tbo.resv, | 717 | r = amdgpu_sync_resv(adev, &ib->sync, vm->page_directory->tbo.resv, |
747 | owner); | 718 | owner); |
@@ -757,10 +728,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
757 | 728 | ||
758 | amdgpu_ring_pad_ib(ring, ib); | 729 | amdgpu_ring_pad_ib(ring, ib); |
759 | WARN_ON(ib->length_dw > ndw); | 730 | WARN_ON(ib->length_dw > ndw); |
760 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, | 731 | r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f); |
761 | &amdgpu_vm_free_job, | ||
762 | AMDGPU_FENCE_OWNER_VM, | ||
763 | &f); | ||
764 | if (r) | 732 | if (r) |
765 | goto error_free; | 733 | goto error_free; |
766 | 734 | ||
@@ -773,8 +741,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
773 | return 0; | 741 | return 0; |
774 | 742 | ||
775 | error_free: | 743 | error_free: |
776 | amdgpu_ib_free(adev, ib); | 744 | amdgpu_job_free(job); |
777 | kfree(ib); | ||
778 | return r; | 745 | return r; |
779 | } | 746 | } |
780 | 747 | ||