diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_vm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 30 |
1 files changed, 23 insertions, 7 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index ccae4d9dc3de..671ee566aa51 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -399,7 +399,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
399 | INIT_LIST_HEAD(&head); | 399 | INIT_LIST_HEAD(&head); |
400 | list_add(&tv.head, &head); | 400 | list_add(&tv.head, &head); |
401 | 401 | ||
402 | r = ttm_eu_reserve_buffers(&ticket, &head); | 402 | r = ttm_eu_reserve_buffers(&ticket, &head, true); |
403 | if (r) | 403 | if (r) |
404 | return r; | 404 | return r; |
405 | 405 | ||
@@ -420,11 +420,11 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
420 | radeon_asic_vm_pad_ib(rdev, &ib); | 420 | radeon_asic_vm_pad_ib(rdev, &ib); |
421 | WARN_ON(ib.length_dw > 64); | 421 | WARN_ON(ib.length_dw > 64); |
422 | 422 | ||
423 | r = radeon_ib_schedule(rdev, &ib, NULL); | 423 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
424 | if (r) | 424 | if (r) |
425 | goto error; | 425 | goto error; |
426 | 426 | ||
427 | ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); | 427 | ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); |
428 | radeon_ib_free(rdev, &ib); | 428 | radeon_ib_free(rdev, &ib); |
429 | 429 | ||
430 | return 0; | 430 | return 0; |
@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
483 | /* add a clone of the bo_va to clear the old address */ | 483 | /* add a clone of the bo_va to clear the old address */ |
484 | struct radeon_bo_va *tmp; | 484 | struct radeon_bo_va *tmp; |
485 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | 485 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
486 | if (!tmp) { | ||
487 | mutex_unlock(&vm->mutex); | ||
488 | return -ENOMEM; | ||
489 | } | ||
486 | tmp->it.start = bo_va->it.start; | 490 | tmp->it.start = bo_va->it.start; |
487 | tmp->it.last = bo_va->it.last; | 491 | tmp->it.last = bo_va->it.last; |
488 | tmp->vm = vm; | 492 | tmp->vm = vm; |
@@ -689,11 +693,17 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
689 | incr, R600_PTE_VALID); | 693 | incr, R600_PTE_VALID); |
690 | 694 | ||
691 | if (ib.length_dw != 0) { | 695 | if (ib.length_dw != 0) { |
696 | struct fence *fence; | ||
697 | |||
692 | radeon_asic_vm_pad_ib(rdev, &ib); | 698 | radeon_asic_vm_pad_ib(rdev, &ib); |
693 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); | 699 | |
700 | fence = reservation_object_get_excl(pd->tbo.resv); | ||
701 | radeon_semaphore_sync_to(ib.semaphore, | ||
702 | (struct radeon_fence *)fence); | ||
703 | |||
694 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); | 704 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
695 | WARN_ON(ib.length_dw > ndw); | 705 | WARN_ON(ib.length_dw > ndw); |
696 | r = radeon_ib_schedule(rdev, &ib, NULL); | 706 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
697 | if (r) { | 707 | if (r) { |
698 | radeon_ib_free(rdev, &ib); | 708 | radeon_ib_free(rdev, &ib); |
699 | return r; | 709 | return r; |
@@ -816,8 +826,11 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, | |||
816 | struct radeon_bo *pt = vm->page_tables[pt_idx].bo; | 826 | struct radeon_bo *pt = vm->page_tables[pt_idx].bo; |
817 | unsigned nptes; | 827 | unsigned nptes; |
818 | uint64_t pte; | 828 | uint64_t pte; |
829 | struct fence *fence; | ||
819 | 830 | ||
820 | radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); | 831 | fence = reservation_object_get_excl(pt->tbo.resv); |
832 | radeon_semaphore_sync_to(ib->semaphore, | ||
833 | (struct radeon_fence *)fence); | ||
821 | 834 | ||
822 | if ((addr & ~mask) == (end & ~mask)) | 835 | if ((addr & ~mask) == (end & ~mask)) |
823 | nptes = end - addr; | 836 | nptes = end - addr; |
@@ -888,6 +901,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
888 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; | 901 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
889 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; | 902 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
890 | bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; | 903 | bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; |
904 | if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) | ||
905 | bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; | ||
906 | |||
891 | if (mem) { | 907 | if (mem) { |
892 | addr = mem->start << PAGE_SHIFT; | 908 | addr = mem->start << PAGE_SHIFT; |
893 | if (mem->mem_type != TTM_PL_SYSTEM) { | 909 | if (mem->mem_type != TTM_PL_SYSTEM) { |
@@ -957,7 +973,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
957 | WARN_ON(ib.length_dw > ndw); | 973 | WARN_ON(ib.length_dw > ndw); |
958 | 974 | ||
959 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | 975 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); |
960 | r = radeon_ib_schedule(rdev, &ib, NULL); | 976 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
961 | if (r) { | 977 | if (r) { |
962 | radeon_ib_free(rdev, &ib); | 978 | radeon_ib_free(rdev, &ib); |
963 | return r; | 979 | return r; |