diff options
author | Christian König <christian.koenig@amd.com> | 2016-08-04 09:02:49 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-08-10 14:05:49 -0400 |
commit | 27c5f36fe138e29d63eea7d1445bda1ca64921d9 (patch) | |
tree | 8c44e9ff0d9f636baabb12f8b1b828f8b85f7410 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 29efc4f5dfe47e992b04f92c4a4d990d03816e78 (diff) |
drm/amdgpu: add adev to the pte_update_params
No need to carry that forward as a separate parameter.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 65 |
1 files changed, 29 insertions, 36 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 577abfd3879e..fd7901c1320f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -58,6 +58,8 @@ | |||
58 | * the number of function parameters | 58 | * the number of function parameters |
59 | */ | 59 | */ |
60 | struct amdgpu_pte_update_params { | 60 | struct amdgpu_pte_update_params { |
61 | /* amdgpu device we do this update for */ | ||
62 | struct amdgpu_device *adev; | ||
61 | /* address where to copy page table entries from */ | 63 | /* address where to copy page table entries from */ |
62 | uint64_t src; | 64 | uint64_t src; |
63 | /* DMA addresses to use for mapping */ | 65 | /* DMA addresses to use for mapping */ |
@@ -469,7 +471,6 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |||
469 | /** | 471 | /** |
470 | * amdgpu_vm_update_pages - helper to call the right asic function | 472 | * amdgpu_vm_update_pages - helper to call the right asic function |
471 | * | 473 | * |
472 | * @adev: amdgpu_device pointer | ||
473 | * @params: see amdgpu_pte_update_params definition | 474 | * @params: see amdgpu_pte_update_params definition |
474 | * @pe: addr of the page entry | 475 | * @pe: addr of the page entry |
475 | * @addr: dst addr to write into pe | 476 | * @addr: dst addr to write into pe |
@@ -480,8 +481,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |||
480 | * Traces the parameters and calls the right asic functions | 481 | * Traces the parameters and calls the right asic functions |
481 | * to setup the page table using the DMA. | 482 | * to setup the page table using the DMA. |
482 | */ | 483 | */ |
483 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | 484 | static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params *params, |
484 | struct amdgpu_pte_update_params *params, | ||
485 | uint64_t pe, uint64_t addr, | 485 | uint64_t pe, uint64_t addr, |
486 | unsigned count, uint32_t incr, | 486 | unsigned count, uint32_t incr, |
487 | uint32_t flags) | 487 | uint32_t flags) |
@@ -489,20 +489,20 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | |||
489 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | 489 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); |
490 | 490 | ||
491 | if (params->src) { | 491 | if (params->src) { |
492 | amdgpu_vm_copy_pte(adev, params->ib, | 492 | amdgpu_vm_copy_pte(params->adev, params->ib, |
493 | pe, (params->src + (addr >> 12) * 8), count); | 493 | pe, (params->src + (addr >> 12) * 8), count); |
494 | 494 | ||
495 | } else if (params->pages_addr) { | 495 | } else if (params->pages_addr) { |
496 | amdgpu_vm_write_pte(adev, params->ib, | 496 | amdgpu_vm_write_pte(params->adev, params->ib, |
497 | params->pages_addr, | 497 | params->pages_addr, |
498 | pe, addr, count, incr, flags); | 498 | pe, addr, count, incr, flags); |
499 | 499 | ||
500 | } else if (count < 3) { | 500 | } else if (count < 3) { |
501 | amdgpu_vm_write_pte(adev, params->ib, NULL, pe, addr, | 501 | amdgpu_vm_write_pte(params->adev, params->ib, NULL, pe, addr, |
502 | count, incr, flags); | 502 | count, incr, flags); |
503 | 503 | ||
504 | } else { | 504 | } else { |
505 | amdgpu_vm_set_pte_pde(adev, params->ib, pe, addr, | 505 | amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, |
506 | count, incr, flags); | 506 | count, incr, flags); |
507 | } | 507 | } |
508 | } | 508 | } |
@@ -527,7 +527,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
527 | uint64_t addr; | 527 | uint64_t addr; |
528 | int r; | 528 | int r; |
529 | 529 | ||
530 | memset(¶ms, 0, sizeof(params)); | ||
531 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 530 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
532 | 531 | ||
533 | r = reservation_object_reserve_shared(bo->tbo.resv); | 532 | r = reservation_object_reserve_shared(bo->tbo.resv); |
@@ -545,9 +544,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
545 | if (r) | 544 | if (r) |
546 | goto error; | 545 | goto error; |
547 | 546 | ||
547 | memset(¶ms, 0, sizeof(params)); | ||
548 | params.adev = adev; | ||
548 | params.ib = &job->ibs[0]; | 549 | params.ib = &job->ibs[0]; |
549 | amdgpu_vm_update_pages(adev, ¶ms, addr, 0, entries, | 550 | amdgpu_vm_update_pages(¶ms, addr, 0, entries, 0, 0); |
550 | 0, 0); | ||
551 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | 551 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
552 | 552 | ||
553 | WARN_ON(job->ibs[0].length_dw > 64); | 553 | WARN_ON(job->ibs[0].length_dw > 64); |
@@ -624,7 +624,6 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
624 | 624 | ||
625 | int r; | 625 | int r; |
626 | 626 | ||
627 | memset(¶ms, 0, sizeof(params)); | ||
628 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 627 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
629 | 628 | ||
630 | /* padding, etc. */ | 629 | /* padding, etc. */ |
@@ -637,6 +636,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
637 | if (r) | 636 | if (r) |
638 | return r; | 637 | return r; |
639 | 638 | ||
639 | memset(¶ms, 0, sizeof(params)); | ||
640 | params.adev = adev; | ||
640 | params.ib = &job->ibs[0]; | 641 | params.ib = &job->ibs[0]; |
641 | 642 | ||
642 | /* walk over the address space and update the page directory */ | 643 | /* walk over the address space and update the page directory */ |
@@ -657,9 +658,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
657 | ((last_pt + incr * count) != pt)) { | 658 | ((last_pt + incr * count) != pt)) { |
658 | 659 | ||
659 | if (count) { | 660 | if (count) { |
660 | amdgpu_vm_update_pages(adev, ¶ms, | 661 | amdgpu_vm_update_pages(¶ms, last_pde, |
661 | last_pde, last_pt, | 662 | last_pt, count, incr, |
662 | count, incr, | ||
663 | AMDGPU_PTE_VALID); | 663 | AMDGPU_PTE_VALID); |
664 | } | 664 | } |
665 | 665 | ||
@@ -672,8 +672,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
672 | } | 672 | } |
673 | 673 | ||
674 | if (count) | 674 | if (count) |
675 | amdgpu_vm_update_pages(adev, ¶ms, | 675 | amdgpu_vm_update_pages(¶ms, last_pde, last_pt, |
676 | last_pde, last_pt, | ||
677 | count, incr, AMDGPU_PTE_VALID); | 676 | count, incr, AMDGPU_PTE_VALID); |
678 | 677 | ||
679 | if (params.ib->length_dw != 0) { | 678 | if (params.ib->length_dw != 0) { |
@@ -705,15 +704,13 @@ error_free: | |||
705 | /** | 704 | /** |
706 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | 705 | * amdgpu_vm_frag_ptes - add fragment information to PTEs |
707 | * | 706 | * |
708 | * @adev: amdgpu_device pointer | ||
709 | * @params: see amdgpu_pte_update_params definition | 707 | * @params: see amdgpu_pte_update_params definition |
710 | * @pe_start: first PTE to handle | 708 | * @pe_start: first PTE to handle |
711 | * @pe_end: last PTE to handle | 709 | * @pe_end: last PTE to handle |
712 | * @addr: addr those PTEs should point to | 710 | * @addr: addr those PTEs should point to |
713 | * @flags: hw mapping flags | 711 | * @flags: hw mapping flags |
714 | */ | 712 | */ |
715 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | 713 | static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, |
716 | struct amdgpu_pte_update_params *params, | ||
717 | uint64_t pe_start, uint64_t pe_end, | 714 | uint64_t pe_start, uint64_t pe_end, |
718 | uint64_t addr, uint32_t flags) | 715 | uint64_t addr, uint32_t flags) |
719 | { | 716 | { |
@@ -754,38 +751,36 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
754 | !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { | 751 | !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { |
755 | 752 | ||
756 | count = (pe_end - pe_start) / 8; | 753 | count = (pe_end - pe_start) / 8; |
757 | amdgpu_vm_update_pages(adev, params, pe_start, | 754 | amdgpu_vm_update_pages(params, pe_start, addr, count, |
758 | addr, count, AMDGPU_GPU_PAGE_SIZE, | 755 | AMDGPU_GPU_PAGE_SIZE, flags); |
759 | flags); | ||
760 | return; | 756 | return; |
761 | } | 757 | } |
762 | 758 | ||
763 | /* handle the 4K area at the beginning */ | 759 | /* handle the 4K area at the beginning */ |
764 | if (pe_start != frag_start) { | 760 | if (pe_start != frag_start) { |
765 | count = (frag_start - pe_start) / 8; | 761 | count = (frag_start - pe_start) / 8; |
766 | amdgpu_vm_update_pages(adev, params, pe_start, addr, | 762 | amdgpu_vm_update_pages(params, pe_start, addr, count, |
767 | count, AMDGPU_GPU_PAGE_SIZE, flags); | 763 | AMDGPU_GPU_PAGE_SIZE, flags); |
768 | addr += AMDGPU_GPU_PAGE_SIZE * count; | 764 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
769 | } | 765 | } |
770 | 766 | ||
771 | /* handle the area in the middle */ | 767 | /* handle the area in the middle */ |
772 | count = (frag_end - frag_start) / 8; | 768 | count = (frag_end - frag_start) / 8; |
773 | amdgpu_vm_update_pages(adev, params, frag_start, addr, count, | 769 | amdgpu_vm_update_pages(params, frag_start, addr, count, |
774 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); | 770 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); |
775 | 771 | ||
776 | /* handle the 4K area at the end */ | 772 | /* handle the 4K area at the end */ |
777 | if (frag_end != pe_end) { | 773 | if (frag_end != pe_end) { |
778 | addr += AMDGPU_GPU_PAGE_SIZE * count; | 774 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
779 | count = (pe_end - frag_end) / 8; | 775 | count = (pe_end - frag_end) / 8; |
780 | amdgpu_vm_update_pages(adev, params, frag_end, addr, | 776 | amdgpu_vm_update_pages(params, frag_end, addr, count, |
781 | count, AMDGPU_GPU_PAGE_SIZE, flags); | 777 | AMDGPU_GPU_PAGE_SIZE, flags); |
782 | } | 778 | } |
783 | } | 779 | } |
784 | 780 | ||
785 | /** | 781 | /** |
786 | * amdgpu_vm_update_ptes - make sure that page tables are valid | 782 | * amdgpu_vm_update_ptes - make sure that page tables are valid |
787 | * | 783 | * |
788 | * @adev: amdgpu_device pointer | ||
789 | * @params: see amdgpu_pte_update_params definition | 784 | * @params: see amdgpu_pte_update_params definition |
790 | * @vm: requested vm | 785 | * @vm: requested vm |
791 | * @start: start of GPU address range | 786 | * @start: start of GPU address range |
@@ -795,8 +790,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
795 | * | 790 | * |
796 | * Update the page tables in the range @start - @end. | 791 | * Update the page tables in the range @start - @end. |
797 | */ | 792 | */ |
798 | static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | 793 | static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, |
799 | struct amdgpu_pte_update_params *params, | ||
800 | struct amdgpu_vm *vm, | 794 | struct amdgpu_vm *vm, |
801 | uint64_t start, uint64_t end, | 795 | uint64_t start, uint64_t end, |
802 | uint64_t dst, uint32_t flags) | 796 | uint64_t dst, uint32_t flags) |
@@ -849,8 +843,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
849 | */ | 843 | */ |
850 | cur_pe_end += 8 * nptes; | 844 | cur_pe_end += 8 * nptes; |
851 | } else { | 845 | } else { |
852 | amdgpu_vm_frag_ptes(adev, params, | 846 | amdgpu_vm_frag_ptes(params, cur_pe_start, cur_pe_end, |
853 | cur_pe_start, cur_pe_end, | ||
854 | cur_dst, flags); | 847 | cur_dst, flags); |
855 | 848 | ||
856 | cur_pe_start = next_pe_start; | 849 | cur_pe_start = next_pe_start; |
@@ -863,8 +856,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
863 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | 856 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; |
864 | } | 857 | } |
865 | 858 | ||
866 | amdgpu_vm_frag_ptes(adev, params, cur_pe_start, | 859 | amdgpu_vm_frag_ptes(params, cur_pe_start, cur_pe_end, cur_dst, flags); |
867 | cur_pe_end, cur_dst, flags); | ||
868 | } | 860 | } |
869 | 861 | ||
870 | /** | 862 | /** |
@@ -902,7 +894,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
902 | int r; | 894 | int r; |
903 | 895 | ||
904 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 896 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
897 | |||
905 | memset(¶ms, 0, sizeof(params)); | 898 | memset(¶ms, 0, sizeof(params)); |
899 | params.adev = adev; | ||
906 | params.src = src; | 900 | params.src = src; |
907 | params.pages_addr = pages_addr; | 901 | params.pages_addr = pages_addr; |
908 | 902 | ||
@@ -959,8 +953,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
959 | if (r) | 953 | if (r) |
960 | goto error_free; | 954 | goto error_free; |
961 | 955 | ||
962 | amdgpu_vm_update_ptes(adev, ¶ms, vm, start, | 956 | amdgpu_vm_update_ptes(¶ms, vm, start, last + 1, addr, flags); |
963 | last + 1, addr, flags); | ||
964 | 957 | ||
965 | amdgpu_ring_pad_ib(ring, params.ib); | 958 | amdgpu_ring_pad_ib(ring, params.ib); |
966 | WARN_ON(params.ib->length_dw > ndw); | 959 | WARN_ON(params.ib->length_dw > ndw); |