diff options
author | Christian König <christian.koenig@amd.com> | 2014-07-30 15:05:17 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2014-08-05 08:53:55 -0400 |
commit | 03f62abd112d5150b6ce8957fa85d4f6e85e357f (patch) | |
tree | f0e6beb6d82b225f936bf33f1756c19bdc671208 /drivers/gpu/drm/radeon/radeon_vm.c | |
parent | 5a341be27fe23c31e4849b0a0506a4469bcbc283 (diff) |
drm/radeon: split PT setup in more functions
Move the decision what to use into the common VM code.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_vm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 67 |
1 files changed, 53 insertions, 14 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index d520ab71b748..e97588162030 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -341,6 +341,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, | |||
341 | } | 341 | } |
342 | 342 | ||
343 | /** | 343 | /** |
344 | * radeon_vm_set_pages - helper to call the right asic function | ||
345 | * | ||
346 | * @rdev: radeon_device pointer | ||
347 | * @ib: indirect buffer to fill with commands | ||
348 | * @pe: addr of the page entry | ||
349 | * @addr: dst addr to write into pe | ||
350 | * @count: number of page entries to update | ||
351 | * @incr: increase next addr by incr bytes | ||
352 | * @flags: hw access flags | ||
353 | * | ||
354 | * Traces the parameters and calls the right asic functions | ||
355 | * to setup the page table using the DMA. | ||
356 | */ | ||
357 | static void radeon_vm_set_pages(struct radeon_device *rdev, | ||
358 | struct radeon_ib *ib, | ||
359 | uint64_t pe, | ||
360 | uint64_t addr, unsigned count, | ||
361 | uint32_t incr, uint32_t flags) | ||
362 | { | ||
363 | trace_radeon_vm_set_page(pe, addr, count, incr, flags); | ||
364 | |||
365 | if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { | ||
366 | uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; | ||
367 | radeon_asic_vm_copy_pages(rdev, ib, pe, src, count); | ||
368 | |||
369 | } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) { | ||
370 | radeon_asic_vm_write_pages(rdev, ib, pe, addr, | ||
371 | count, incr, flags); | ||
372 | |||
373 | } else { | ||
374 | radeon_asic_vm_set_pages(rdev, ib, pe, addr, | ||
375 | count, incr, flags); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | /** | ||
344 | * radeon_vm_clear_bo - initially clear the page dir/table | 380 | * radeon_vm_clear_bo - initially clear the page dir/table |
345 | * | 381 | * |
346 | * @rdev: radeon_device pointer | 382 | * @rdev: radeon_device pointer |
@@ -381,7 +417,8 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
381 | 417 | ||
382 | ib.length_dw = 0; | 418 | ib.length_dw = 0; |
383 | 419 | ||
384 | radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); | 420 | radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); |
421 | radeon_asic_vm_pad_ib(rdev, &ib); | ||
385 | 422 | ||
386 | r = radeon_ib_schedule(rdev, &ib, NULL); | 423 | r = radeon_ib_schedule(rdev, &ib, NULL); |
387 | if (r) | 424 | if (r) |
@@ -634,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
634 | ((last_pt + incr * count) != pt)) { | 671 | ((last_pt + incr * count) != pt)) { |
635 | 672 | ||
636 | if (count) { | 673 | if (count) { |
637 | radeon_asic_vm_set_page(rdev, &ib, last_pde, | 674 | radeon_vm_set_pages(rdev, &ib, last_pde, |
638 | last_pt, count, incr, | 675 | last_pt, count, incr, |
639 | R600_PTE_VALID); | 676 | R600_PTE_VALID); |
640 | } | 677 | } |
641 | 678 | ||
642 | count = 1; | 679 | count = 1; |
@@ -648,10 +685,11 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
648 | } | 685 | } |
649 | 686 | ||
650 | if (count) | 687 | if (count) |
651 | radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, | 688 | radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count, |
652 | incr, R600_PTE_VALID); | 689 | incr, R600_PTE_VALID); |
653 | 690 | ||
654 | if (ib.length_dw != 0) { | 691 | if (ib.length_dw != 0) { |
692 | radeon_asic_vm_pad_ib(rdev, &ib); | ||
655 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); | 693 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); |
656 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); | 694 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
657 | r = radeon_ib_schedule(rdev, &ib, NULL); | 695 | r = radeon_ib_schedule(rdev, &ib, NULL); |
@@ -719,30 +757,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, | |||
719 | (frag_start >= frag_end)) { | 757 | (frag_start >= frag_end)) { |
720 | 758 | ||
721 | count = (pe_end - pe_start) / 8; | 759 | count = (pe_end - pe_start) / 8; |
722 | radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, | 760 | radeon_vm_set_pages(rdev, ib, pe_start, addr, count, |
723 | RADEON_GPU_PAGE_SIZE, flags); | 761 | RADEON_GPU_PAGE_SIZE, flags); |
724 | return; | 762 | return; |
725 | } | 763 | } |
726 | 764 | ||
727 | /* handle the 4K area at the beginning */ | 765 | /* handle the 4K area at the beginning */ |
728 | if (pe_start != frag_start) { | 766 | if (pe_start != frag_start) { |
729 | count = (frag_start - pe_start) / 8; | 767 | count = (frag_start - pe_start) / 8; |
730 | radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, | 768 | radeon_vm_set_pages(rdev, ib, pe_start, addr, count, |
731 | RADEON_GPU_PAGE_SIZE, flags); | 769 | RADEON_GPU_PAGE_SIZE, flags); |
732 | addr += RADEON_GPU_PAGE_SIZE * count; | 770 | addr += RADEON_GPU_PAGE_SIZE * count; |
733 | } | 771 | } |
734 | 772 | ||
735 | /* handle the area in the middle */ | 773 | /* handle the area in the middle */ |
736 | count = (frag_end - frag_start) / 8; | 774 | count = (frag_end - frag_start) / 8; |
737 | radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count, | 775 | radeon_vm_set_pages(rdev, ib, frag_start, addr, count, |
738 | RADEON_GPU_PAGE_SIZE, flags | frag_flags); | 776 | RADEON_GPU_PAGE_SIZE, flags | frag_flags); |
739 | 777 | ||
740 | /* handle the 4K area at the end */ | 778 | /* handle the 4K area at the end */ |
741 | if (frag_end != pe_end) { | 779 | if (frag_end != pe_end) { |
742 | addr += RADEON_GPU_PAGE_SIZE * count; | 780 | addr += RADEON_GPU_PAGE_SIZE * count; |
743 | count = (pe_end - frag_end) / 8; | 781 | count = (pe_end - frag_end) / 8; |
744 | radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count, | 782 | radeon_vm_set_pages(rdev, ib, frag_end, addr, count, |
745 | RADEON_GPU_PAGE_SIZE, flags); | 783 | RADEON_GPU_PAGE_SIZE, flags); |
746 | } | 784 | } |
747 | } | 785 | } |
748 | 786 | ||
@@ -900,6 +938,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
900 | bo_va->it.last + 1, addr, | 938 | bo_va->it.last + 1, addr, |
901 | radeon_vm_page_flags(bo_va->flags)); | 939 | radeon_vm_page_flags(bo_va->flags)); |
902 | 940 | ||
941 | radeon_asic_vm_pad_ib(rdev, &ib); | ||
903 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | 942 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); |
904 | r = radeon_ib_schedule(rdev, &ib, NULL); | 943 | r = radeon_ib_schedule(rdev, &ib, NULL); |
905 | if (r) { | 944 | if (r) { |