diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 43 |
1 files changed, 30 insertions, 13 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index e97588162030..ccae4d9dc3de 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -410,8 +410,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
410 | addr = radeon_bo_gpu_offset(bo); | 410 | addr = radeon_bo_gpu_offset(bo); |
411 | entries = radeon_bo_size(bo) / 8; | 411 | entries = radeon_bo_size(bo) / 8; |
412 | 412 | ||
413 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, | 413 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); |
414 | NULL, entries * 2 + 64); | ||
415 | if (r) | 414 | if (r) |
416 | goto error; | 415 | goto error; |
417 | 416 | ||
@@ -419,6 +418,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
419 | 418 | ||
420 | radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); | 419 | radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); |
421 | radeon_asic_vm_pad_ib(rdev, &ib); | 420 | radeon_asic_vm_pad_ib(rdev, &ib); |
421 | WARN_ON(ib.length_dw > 64); | ||
422 | 422 | ||
423 | r = radeon_ib_schedule(rdev, &ib, NULL); | 423 | r = radeon_ib_schedule(rdev, &ib, NULL); |
424 | if (r) | 424 | if (r) |
@@ -642,7 +642,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
642 | ndw = 64; | 642 | ndw = 64; |
643 | 643 | ||
644 | /* assume the worst case */ | 644 | /* assume the worst case */ |
645 | ndw += vm->max_pde_used * 16; | 645 | ndw += vm->max_pde_used * 6; |
646 | 646 | ||
647 | /* update too big for an IB */ | 647 | /* update too big for an IB */ |
648 | if (ndw > 0xfffff) | 648 | if (ndw > 0xfffff) |
@@ -692,6 +692,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
692 | radeon_asic_vm_pad_ib(rdev, &ib); | 692 | radeon_asic_vm_pad_ib(rdev, &ib); |
693 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); | 693 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); |
694 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); | 694 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
695 | WARN_ON(ib.length_dw > ndw); | ||
695 | r = radeon_ib_schedule(rdev, &ib, NULL); | 696 | r = radeon_ib_schedule(rdev, &ib, NULL); |
696 | if (r) { | 697 | if (r) { |
697 | radeon_ib_free(rdev, &ib); | 698 | radeon_ib_free(rdev, &ib); |
@@ -871,8 +872,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
871 | { | 872 | { |
872 | struct radeon_vm *vm = bo_va->vm; | 873 | struct radeon_vm *vm = bo_va->vm; |
873 | struct radeon_ib ib; | 874 | struct radeon_ib ib; |
874 | unsigned nptes, ndw; | 875 | unsigned nptes, ncmds, ndw; |
875 | uint64_t addr; | 876 | uint64_t addr; |
877 | uint32_t flags; | ||
876 | int r; | 878 | int r; |
877 | 879 | ||
878 | if (!bo_va->it.start) { | 880 | if (!bo_va->it.start) { |
@@ -911,19 +913,32 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
911 | 913 | ||
912 | nptes = bo_va->it.last - bo_va->it.start + 1; | 914 | nptes = bo_va->it.last - bo_va->it.start + 1; |
913 | 915 | ||
916 | /* reserve space for one command every (1 << BLOCK_SIZE) entries | ||
917 | or 2k dwords (whatever is smaller) */ | ||
918 | ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1; | ||
919 | |||
914 | /* padding, etc. */ | 920 | /* padding, etc. */ |
915 | ndw = 64; | 921 | ndw = 64; |
916 | 922 | ||
917 | if (radeon_vm_block_size > 11) | 923 | flags = radeon_vm_page_flags(bo_va->flags); |
918 | /* reserve space for one header for every 2k dwords */ | 924 | if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { |
919 | ndw += (nptes >> 11) * 4; | 925 | /* only copy commands needed */ |
920 | else | 926 | ndw += ncmds * 7; |
921 | /* reserve space for one header for | 927 | |
922 | every (1 << BLOCK_SIZE) entries */ | 928 | } else if (flags & R600_PTE_SYSTEM) { |
923 | ndw += (nptes >> radeon_vm_block_size) * 4; | 929 | /* header for write data commands */ |
930 | ndw += ncmds * 4; | ||
931 | |||
932 | /* body of write data command */ | ||
933 | ndw += nptes * 2; | ||
924 | 934 | ||
925 | /* reserve space for pte addresses */ | 935 | } else { |
926 | ndw += nptes * 2; | 936 | /* set page commands needed */ |
937 | ndw += ncmds * 10; | ||
938 | |||
939 | /* two extra commands for begin/end of fragment */ | ||
940 | ndw += 2 * 10; | ||
941 | } | ||
927 | 942 | ||
928 | /* update too big for an IB */ | 943 | /* update too big for an IB */ |
929 | if (ndw > 0xfffff) | 944 | if (ndw > 0xfffff) |
@@ -939,6 +954,8 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
939 | radeon_vm_page_flags(bo_va->flags)); | 954 | radeon_vm_page_flags(bo_va->flags)); |
940 | 955 | ||
941 | radeon_asic_vm_pad_ib(rdev, &ib); | 956 | radeon_asic_vm_pad_ib(rdev, &ib); |
957 | WARN_ON(ib.length_dw > ndw); | ||
958 | |||
942 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | 959 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); |
943 | r = radeon_ib_schedule(rdev, &ib, NULL); | 960 | r = radeon_ib_schedule(rdev, &ib, NULL); |
944 | if (r) { | 961 | if (r) { |