aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-08-04 08:52:50 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-08-10 14:05:39 -0400
commit29efc4f5dfe47e992b04f92c4a4d990d03816e78 (patch)
tree8f6e5e34332a471560492fbb3e9c65b868eabe84 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parent1303c73c61fea8cc5509e5b0e3cbe5253e260ca1 (diff)
drm/amdgpu: rename amdgpu_vm_update_params
Well those are actually page table entry parameters. This also makes the variable names used a bit shorter. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c91
1 files changed, 44 insertions, 47 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ac209a51772b..577abfd3879e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -57,7 +57,7 @@
57/* Local structure. Encapsulate some VM table update parameters to reduce 57/* Local structure. Encapsulate some VM table update parameters to reduce
58 * the number of function parameters 58 * the number of function parameters
59 */ 59 */
60struct amdgpu_vm_update_params { 60struct amdgpu_pte_update_params {
61 /* address where to copy page table entries from */ 61 /* address where to copy page table entries from */
62 uint64_t src; 62 uint64_t src;
63 /* DMA addresses to use for mapping */ 63 /* DMA addresses to use for mapping */
@@ -470,7 +470,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
470 * amdgpu_vm_update_pages - helper to call the right asic function 470 * amdgpu_vm_update_pages - helper to call the right asic function
471 * 471 *
472 * @adev: amdgpu_device pointer 472 * @adev: amdgpu_device pointer
473 * @vm_update_params: see amdgpu_vm_update_params definition 473 * @params: see amdgpu_pte_update_params definition
474 * @pe: addr of the page entry 474 * @pe: addr of the page entry
475 * @addr: dst addr to write into pe 475 * @addr: dst addr to write into pe
476 * @count: number of page entries to update 476 * @count: number of page entries to update
@@ -481,29 +481,28 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
481 * to setup the page table using the DMA. 481 * to setup the page table using the DMA.
482 */ 482 */
483static void amdgpu_vm_update_pages(struct amdgpu_device *adev, 483static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
484 struct amdgpu_vm_update_params 484 struct amdgpu_pte_update_params *params,
485 *vm_update_params,
486 uint64_t pe, uint64_t addr, 485 uint64_t pe, uint64_t addr,
487 unsigned count, uint32_t incr, 486 unsigned count, uint32_t incr,
488 uint32_t flags) 487 uint32_t flags)
489{ 488{
490 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); 489 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
491 490
492 if (vm_update_params->src) { 491 if (params->src) {
493 amdgpu_vm_copy_pte(adev, vm_update_params->ib, 492 amdgpu_vm_copy_pte(adev, params->ib,
494 pe, (vm_update_params->src + (addr >> 12) * 8), count); 493 pe, (params->src + (addr >> 12) * 8), count);
495 494
496 } else if (vm_update_params->pages_addr) { 495 } else if (params->pages_addr) {
497 amdgpu_vm_write_pte(adev, vm_update_params->ib, 496 amdgpu_vm_write_pte(adev, params->ib,
498 vm_update_params->pages_addr, 497 params->pages_addr,
499 pe, addr, count, incr, flags); 498 pe, addr, count, incr, flags);
500 499
501 } else if (count < 3) { 500 } else if (count < 3) {
502 amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, 501 amdgpu_vm_write_pte(adev, params->ib, NULL, pe, addr,
503 count, incr, flags); 502 count, incr, flags);
504 503
505 } else { 504 } else {
506 amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, 505 amdgpu_vm_set_pte_pde(adev, params->ib, pe, addr,
507 count, incr, flags); 506 count, incr, flags);
508 } 507 }
509} 508}
@@ -523,12 +522,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
523 struct amdgpu_ring *ring; 522 struct amdgpu_ring *ring;
524 struct fence *fence = NULL; 523 struct fence *fence = NULL;
525 struct amdgpu_job *job; 524 struct amdgpu_job *job;
526 struct amdgpu_vm_update_params vm_update_params; 525 struct amdgpu_pte_update_params params;
527 unsigned entries; 526 unsigned entries;
528 uint64_t addr; 527 uint64_t addr;
529 int r; 528 int r;
530 529
531 memset(&vm_update_params, 0, sizeof(vm_update_params)); 530 memset(&params, 0, sizeof(params));
532 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 531 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
533 532
534 r = reservation_object_reserve_shared(bo->tbo.resv); 533 r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -546,8 +545,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
546 if (r) 545 if (r)
547 goto error; 546 goto error;
548 547
549 vm_update_params.ib = &job->ibs[0]; 548 params.ib = &job->ibs[0];
550 amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, 549 amdgpu_vm_update_pages(adev, &params, addr, 0, entries,
551 0, 0); 550 0, 0);
552 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 551 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
553 552
@@ -620,12 +619,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
620 uint64_t last_pde = ~0, last_pt = ~0; 619 uint64_t last_pde = ~0, last_pt = ~0;
621 unsigned count = 0, pt_idx, ndw; 620 unsigned count = 0, pt_idx, ndw;
622 struct amdgpu_job *job; 621 struct amdgpu_job *job;
623 struct amdgpu_vm_update_params vm_update_params; 622 struct amdgpu_pte_update_params params;
624 struct fence *fence = NULL; 623 struct fence *fence = NULL;
625 624
626 int r; 625 int r;
627 626
628 memset(&vm_update_params, 0, sizeof(vm_update_params)); 627 memset(&params, 0, sizeof(params));
629 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 628 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
630 629
631 /* padding, etc. */ 630 /* padding, etc. */
@@ -638,7 +637,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
638 if (r) 637 if (r)
639 return r; 638 return r;
640 639
641 vm_update_params.ib = &job->ibs[0]; 640 params.ib = &job->ibs[0];
642 641
643 /* walk over the address space and update the page directory */ 642 /* walk over the address space and update the page directory */
644 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 643 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -658,7 +657,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
658 ((last_pt + incr * count) != pt)) { 657 ((last_pt + incr * count) != pt)) {
659 658
660 if (count) { 659 if (count) {
661 amdgpu_vm_update_pages(adev, &vm_update_params, 660 amdgpu_vm_update_pages(adev, &params,
662 last_pde, last_pt, 661 last_pde, last_pt,
663 count, incr, 662 count, incr,
664 AMDGPU_PTE_VALID); 663 AMDGPU_PTE_VALID);
@@ -673,15 +672,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
673 } 672 }
674 673
675 if (count) 674 if (count)
676 amdgpu_vm_update_pages(adev, &vm_update_params, 675 amdgpu_vm_update_pages(adev, &params,
677 last_pde, last_pt, 676 last_pde, last_pt,
678 count, incr, AMDGPU_PTE_VALID); 677 count, incr, AMDGPU_PTE_VALID);
679 678
680 if (vm_update_params.ib->length_dw != 0) { 679 if (params.ib->length_dw != 0) {
681 amdgpu_ring_pad_ib(ring, vm_update_params.ib); 680 amdgpu_ring_pad_ib(ring, params.ib);
682 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, 681 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
683 AMDGPU_FENCE_OWNER_VM); 682 AMDGPU_FENCE_OWNER_VM);
684 WARN_ON(vm_update_params.ib->length_dw > ndw); 683 WARN_ON(params.ib->length_dw > ndw);
685 r = amdgpu_job_submit(job, ring, &vm->entity, 684 r = amdgpu_job_submit(job, ring, &vm->entity,
686 AMDGPU_FENCE_OWNER_VM, &fence); 685 AMDGPU_FENCE_OWNER_VM, &fence);
687 if (r) 686 if (r)
@@ -707,15 +706,14 @@ error_free:
707 * amdgpu_vm_frag_ptes - add fragment information to PTEs 706 * amdgpu_vm_frag_ptes - add fragment information to PTEs
708 * 707 *
709 * @adev: amdgpu_device pointer 708 * @adev: amdgpu_device pointer
710 * @vm_update_params: see amdgpu_vm_update_params definition 709 * @params: see amdgpu_pte_update_params definition
711 * @pe_start: first PTE to handle 710 * @pe_start: first PTE to handle
712 * @pe_end: last PTE to handle 711 * @pe_end: last PTE to handle
713 * @addr: addr those PTEs should point to 712 * @addr: addr those PTEs should point to
714 * @flags: hw mapping flags 713 * @flags: hw mapping flags
715 */ 714 */
716static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, 715static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
717 struct amdgpu_vm_update_params 716 struct amdgpu_pte_update_params *params,
718 *vm_update_params,
719 uint64_t pe_start, uint64_t pe_end, 717 uint64_t pe_start, uint64_t pe_end,
720 uint64_t addr, uint32_t flags) 718 uint64_t addr, uint32_t flags)
721{ 719{
@@ -752,11 +750,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
752 return; 750 return;
753 751
754 /* system pages are non continuously */ 752 /* system pages are non continuously */
755 if (vm_update_params->src || vm_update_params->pages_addr || 753 if (params->src || params->pages_addr ||
756 !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { 754 !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
757 755
758 count = (pe_end - pe_start) / 8; 756 count = (pe_end - pe_start) / 8;
759 amdgpu_vm_update_pages(adev, vm_update_params, pe_start, 757 amdgpu_vm_update_pages(adev, params, pe_start,
760 addr, count, AMDGPU_GPU_PAGE_SIZE, 758 addr, count, AMDGPU_GPU_PAGE_SIZE,
761 flags); 759 flags);
762 return; 760 return;
@@ -765,21 +763,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
765 /* handle the 4K area at the beginning */ 763 /* handle the 4K area at the beginning */
766 if (pe_start != frag_start) { 764 if (pe_start != frag_start) {
767 count = (frag_start - pe_start) / 8; 765 count = (frag_start - pe_start) / 8;
768 amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, 766 amdgpu_vm_update_pages(adev, params, pe_start, addr,
769 count, AMDGPU_GPU_PAGE_SIZE, flags); 767 count, AMDGPU_GPU_PAGE_SIZE, flags);
770 addr += AMDGPU_GPU_PAGE_SIZE * count; 768 addr += AMDGPU_GPU_PAGE_SIZE * count;
771 } 769 }
772 770
773 /* handle the area in the middle */ 771 /* handle the area in the middle */
774 count = (frag_end - frag_start) / 8; 772 count = (frag_end - frag_start) / 8;
775 amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, 773 amdgpu_vm_update_pages(adev, params, frag_start, addr, count,
776 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); 774 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
777 775
778 /* handle the 4K area at the end */ 776 /* handle the 4K area at the end */
779 if (frag_end != pe_end) { 777 if (frag_end != pe_end) {
780 addr += AMDGPU_GPU_PAGE_SIZE * count; 778 addr += AMDGPU_GPU_PAGE_SIZE * count;
781 count = (pe_end - frag_end) / 8; 779 count = (pe_end - frag_end) / 8;
782 amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, 780 amdgpu_vm_update_pages(adev, params, frag_end, addr,
783 count, AMDGPU_GPU_PAGE_SIZE, flags); 781 count, AMDGPU_GPU_PAGE_SIZE, flags);
784 } 782 }
785} 783}
@@ -788,7 +786,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
788 * amdgpu_vm_update_ptes - make sure that page tables are valid 786 * amdgpu_vm_update_ptes - make sure that page tables are valid
789 * 787 *
790 * @adev: amdgpu_device pointer 788 * @adev: amdgpu_device pointer
791 * @vm_update_params: see amdgpu_vm_update_params definition 789 * @params: see amdgpu_pte_update_params definition
792 * @vm: requested vm 790 * @vm: requested vm
793 * @start: start of GPU address range 791 * @start: start of GPU address range
794 * @end: end of GPU address range 792 * @end: end of GPU address range
@@ -798,8 +796,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
798 * Update the page tables in the range @start - @end. 796 * Update the page tables in the range @start - @end.
799 */ 797 */
800static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, 798static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
801 struct amdgpu_vm_update_params 799 struct amdgpu_pte_update_params *params,
802 *vm_update_params,
803 struct amdgpu_vm *vm, 800 struct amdgpu_vm *vm,
804 uint64_t start, uint64_t end, 801 uint64_t start, uint64_t end,
805 uint64_t dst, uint32_t flags) 802 uint64_t dst, uint32_t flags)
@@ -852,7 +849,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
852 */ 849 */
853 cur_pe_end += 8 * nptes; 850 cur_pe_end += 8 * nptes;
854 } else { 851 } else {
855 amdgpu_vm_frag_ptes(adev, vm_update_params, 852 amdgpu_vm_frag_ptes(adev, params,
856 cur_pe_start, cur_pe_end, 853 cur_pe_start, cur_pe_end,
857 cur_dst, flags); 854 cur_dst, flags);
858 855
@@ -866,7 +863,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
866 dst += nptes * AMDGPU_GPU_PAGE_SIZE; 863 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
867 } 864 }
868 865
869 amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, 866 amdgpu_vm_frag_ptes(adev, params, cur_pe_start,
870 cur_pe_end, cur_dst, flags); 867 cur_pe_end, cur_dst, flags);
871} 868}
872 869
@@ -900,14 +897,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
900 void *owner = AMDGPU_FENCE_OWNER_VM; 897 void *owner = AMDGPU_FENCE_OWNER_VM;
901 unsigned nptes, ncmds, ndw; 898 unsigned nptes, ncmds, ndw;
902 struct amdgpu_job *job; 899 struct amdgpu_job *job;
903 struct amdgpu_vm_update_params vm_update_params; 900 struct amdgpu_pte_update_params params;
904 struct fence *f = NULL; 901 struct fence *f = NULL;
905 int r; 902 int r;
906 903
907 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 904 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
908 memset(&vm_update_params, 0, sizeof(vm_update_params)); 905 memset(&params, 0, sizeof(params));
909 vm_update_params.src = src; 906 params.src = src;
910 vm_update_params.pages_addr = pages_addr; 907 params.pages_addr = pages_addr;
911 908
912 /* sync to everything on unmapping */ 909 /* sync to everything on unmapping */
913 if (!(flags & AMDGPU_PTE_VALID)) 910 if (!(flags & AMDGPU_PTE_VALID))
@@ -924,11 +921,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
924 /* padding, etc. */ 921 /* padding, etc. */
925 ndw = 64; 922 ndw = 64;
926 923
927 if (vm_update_params.src) { 924 if (params.src) {
928 /* only copy commands needed */ 925 /* only copy commands needed */
929 ndw += ncmds * 7; 926 ndw += ncmds * 7;
930 927
931 } else if (vm_update_params.pages_addr) { 928 } else if (params.pages_addr) {
932 /* header for write data commands */ 929 /* header for write data commands */
933 ndw += ncmds * 4; 930 ndw += ncmds * 4;
934 931
@@ -947,7 +944,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
947 if (r) 944 if (r)
948 return r; 945 return r;
949 946
950 vm_update_params.ib = &job->ibs[0]; 947 params.ib = &job->ibs[0];
951 948
952 r = amdgpu_sync_fence(adev, &job->sync, exclusive); 949 r = amdgpu_sync_fence(adev, &job->sync, exclusive);
953 if (r) 950 if (r)
@@ -962,11 +959,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
962 if (r) 959 if (r)
963 goto error_free; 960 goto error_free;
964 961
965 amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, 962 amdgpu_vm_update_ptes(adev, &params, vm, start,
966 last + 1, addr, flags); 963 last + 1, addr, flags);
967 964
968 amdgpu_ring_pad_ib(ring, vm_update_params.ib); 965 amdgpu_ring_pad_ib(ring, params.ib);
969 WARN_ON(vm_update_params.ib->length_dw > ndw); 966 WARN_ON(params.ib->length_dw > ndw);
970 r = amdgpu_job_submit(job, ring, &vm->entity, 967 r = amdgpu_job_submit(job, ring, &vm->entity,
971 AMDGPU_FENCE_OWNER_VM, &f); 968 AMDGPU_FENCE_OWNER_VM, &f);
972 if (r) 969 if (r)