aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_gart.c
diff options
context:
space:
mode:
authorDmitry Cherkasov <dcherkassov@gmail.com>2012-09-17 13:36:19 -0400
committerAlex Deucher <alexander.deucher@amd.com>2012-09-27 10:22:44 -0400
commitfa87e62d357ccf73831cd52ed316593d0968020d (patch)
treec80c7a2c1bae57ce3da99683681eae1509b4b46c /drivers/gpu/drm/radeon/radeon_gart.c
parentdce34bfd633d23ebddb196af8a4fa1c93c90ed07 (diff)
drm/radeon: add 2-level VM pagetables support v9
PDE/PTE update code uses CP ring for memory writes. All page table entries are preallocated for now in alloc_pt(). It is made as whole because it's hard to divide it to several patches that compile and doesn't break anything being applied separately. Tested on cayman card. v2: rebased on top of "refactor set_page chipset interface v3", code cleanups v3: switched offsets calc macros to inline funcs where possible, remove pd_addr from radeon_vm, switched RADEON_BLOCK_SIZE define, to 9 (and PTE_COUNT to 1 << BLOCK_SIZE) v4 (ck): move "incr" documentation to previous patch, cleanup and document RADEON_VM_* constants, change commit message to our usual format, simplify patch allot by removing everything current not necessary, disable SI workaround. v5: (agd5f): Fix typo in tables_size calculation in radeon_vm_alloc_pt(). Second line should have been '+=' rather than '='. v6: fix npdes calculation. In scenario when pfns to be mapped overlap two PDE spans: +-----------+-------------+ | PDE span | PDE span | +-----------+----+--------+ | | +---------+ | pfns | +---------+ the following npdes calculation gives incorrect result: npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 1; For the case above picture it should give npdes = 2, but gives one. This patch corrects it by rounding last pfn up to 512 border, first - down to 512 border and then subtracting and dividing by 512. v7: Make npde calculation clearer, fix ndw calculation. v8: (agd5f): reserve enough for 2 full VM PTs, add some additional comments. v9: fix typo in npde calculation Signed-off-by: Dmitry Cherkasov <Dmitrii.Cherkasov@amd.com> Signed-off-by: Christian König <deathsimple@vodafone.de> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c91
1 files changed, 67 insertions, 24 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index bb9fc594779c..753b7ca3c807 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
423 */ 423 */
424 424
425/** 425/**
426 * radeon_vm_directory_size - returns the size of the page directory in bytes
427 *
428 * @rdev: radeon_device pointer
429 *
430 * Calculate the size of the page directory in bytes (cayman+).
431 */
432static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
433{
434 return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
435}
436
437/**
426 * radeon_vm_manager_init - init the vm manager 438 * radeon_vm_manager_init - init the vm manager
427 * 439 *
428 * @rdev: radeon_device pointer 440 * @rdev: radeon_device pointer
@@ -435,11 +447,15 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
435 struct radeon_vm *vm; 447 struct radeon_vm *vm;
436 struct radeon_bo_va *bo_va; 448 struct radeon_bo_va *bo_va;
437 int r; 449 int r;
450 unsigned size;
438 451
439 if (!rdev->vm_manager.enabled) { 452 if (!rdev->vm_manager.enabled) {
440 /* allocate enough for 2 full VM pts */ 453 /* allocate enough for 2 full VM pts */
454 size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
455 size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
456 size *= 2;
441 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 457 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
442 rdev->vm_manager.max_pfn * 8 * 2, 458 size,
443 RADEON_GEM_DOMAIN_VRAM); 459 RADEON_GEM_DOMAIN_VRAM);
444 if (r) { 460 if (r) {
445 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 461 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -490,7 +506,6 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
490 506
491 list_del_init(&vm->list); 507 list_del_init(&vm->list);
492 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); 508 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
493 vm->pt = NULL;
494 509
495 list_for_each_entry(bo_va, &vm->va, vm_list) { 510 list_for_each_entry(bo_va, &vm->va, vm_list) {
496 bo_va->valid = false; 511 bo_va->valid = false;
@@ -546,11 +561,17 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
546{ 561{
547 struct radeon_vm *vm_evict; 562 struct radeon_vm *vm_evict;
548 int r; 563 int r;
564 u64 *pd_addr;
565 int tables_size;
549 566
550 if (vm == NULL) { 567 if (vm == NULL) {
551 return -EINVAL; 568 return -EINVAL;
552 } 569 }
553 570
571 /* allocate enough to cover the current VM size */
572 tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
573 tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
574
554 if (vm->sa_bo != NULL) { 575 if (vm->sa_bo != NULL) {
555 /* update lru */ 576 /* update lru */
556 list_del_init(&vm->list); 577 list_del_init(&vm->list);
@@ -560,8 +581,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
560 581
561retry: 582retry:
562 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 583 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
563 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), 584 tables_size, RADEON_GPU_PAGE_SIZE, false);
564 RADEON_GPU_PAGE_SIZE, false);
565 if (r == -ENOMEM) { 585 if (r == -ENOMEM) {
566 if (list_empty(&rdev->vm_manager.lru_vm)) { 586 if (list_empty(&rdev->vm_manager.lru_vm)) {
567 return r; 587 return r;
@@ -576,9 +596,9 @@ retry:
576 return r; 596 return r;
577 } 597 }
578 598
579 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo); 599 pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
580 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); 600 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
581 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8)); 601 memset(pd_addr, 0, tables_size);
582 602
583 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 603 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
584 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, 604 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
@@ -866,8 +886,9 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
866 struct radeon_ring *ring = &rdev->ring[ridx]; 886 struct radeon_ring *ring = &rdev->ring[ridx];
867 struct radeon_semaphore *sem = NULL; 887 struct radeon_semaphore *sem = NULL;
868 struct radeon_bo_va *bo_va; 888 struct radeon_bo_va *bo_va;
869 unsigned ngpu_pages, ndw; 889 unsigned nptes, npdes, ndw;
870 uint64_t pfn, addr; 890 uint64_t pe, addr;
891 uint64_t pfn;
871 int r; 892 int r;
872 893
873 /* nothing to do if vm isn't bound */ 894 /* nothing to do if vm isn't bound */
@@ -889,10 +910,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
889 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) 910 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
890 return 0; 911 return 0;
891 912
892 ngpu_pages = radeon_bo_ngpu_pages(bo);
893 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 913 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
894 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 914 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
895 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
896 if (mem) { 915 if (mem) {
897 addr = mem->start << PAGE_SHIFT; 916 addr = mem->start << PAGE_SHIFT;
898 if (mem->mem_type != TTM_PL_SYSTEM) { 917 if (mem->mem_type != TTM_PL_SYSTEM) {
@@ -921,9 +940,26 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
921 } 940 }
922 941
923 /* estimate number of dw needed */ 942 /* estimate number of dw needed */
943 /* reserve space for 32-bit padding */
924 ndw = 32; 944 ndw = 32;
925 ndw += (ngpu_pages >> 12) * 3; 945
926 ndw += ngpu_pages * 2; 946 nptes = radeon_bo_ngpu_pages(bo);
947
948 pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
949
950 /* handle cases where a bo spans several pdes */
951 npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
952 (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
953
954 /* reserve space for one header for every 2k dwords */
955 ndw += (nptes >> 11) * 3;
956 /* reserve space for pte addresses */
957 ndw += nptes * 2;
958
959 /* reserve space for one header for every 2k dwords */
960 ndw += (npdes >> 11) * 3;
961 /* reserve space for pde addresses */
962 ndw += npdes * 2;
927 963
928 r = radeon_ring_lock(rdev, ring, ndw); 964 r = radeon_ring_lock(rdev, ring, ndw);
929 if (r) { 965 if (r) {
@@ -935,8 +971,22 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
935 radeon_fence_note_sync(vm->fence, ridx); 971 radeon_fence_note_sync(vm->fence, ridx);
936 } 972 }
937 973
938 radeon_asic_vm_set_page(rdev, vm->pt_gpu_addr + pfn * 8, addr, 974 /* update page table entries */
939 ngpu_pages, RADEON_GPU_PAGE_SIZE, bo_va->flags); 975 pe = vm->pd_gpu_addr;
976 pe += radeon_vm_directory_size(rdev);
977 pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
978
979 radeon_asic_vm_set_page(rdev, pe, addr, nptes,
980 RADEON_GPU_PAGE_SIZE, bo_va->flags);
981
982 /* update page directory entries */
983 addr = pe;
984
985 pe = vm->pd_gpu_addr;
986 pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
987
988 radeon_asic_vm_set_page(rdev, pe, addr, npdes,
989 RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
940 990
941 radeon_fence_unref(&vm->fence); 991 radeon_fence_unref(&vm->fence);
942 r = radeon_fence_emit(rdev, &vm->fence, ridx); 992 r = radeon_fence_emit(rdev, &vm->fence, ridx);
@@ -1018,18 +1068,11 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1018 1068
1019 vm->id = 0; 1069 vm->id = 0;
1020 vm->fence = NULL; 1070 vm->fence = NULL;
1071 vm->last_pfn = 0;
1021 mutex_init(&vm->mutex); 1072 mutex_init(&vm->mutex);
1022 INIT_LIST_HEAD(&vm->list); 1073 INIT_LIST_HEAD(&vm->list);
1023 INIT_LIST_HEAD(&vm->va); 1074 INIT_LIST_HEAD(&vm->va);
1024 /* SI requires equal sized PTs for all VMs, so always set 1075
1025 * last_pfn to max_pfn. cayman allows variable sized
1026 * pts so we can grow then as needed. Once we switch
1027 * to two level pts we can unify this again.
1028 */
1029 if (rdev->family >= CHIP_TAHITI)
1030 vm->last_pfn = rdev->vm_manager.max_pfn;
1031 else
1032 vm->last_pfn = 0;
1033 /* map the ib pool buffer at 0 in virtual address space, set 1076 /* map the ib pool buffer at 0 in virtual address space, set
1034 * read only 1077 * read only
1035 */ 1078 */