diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 456a77cf4b7f..79db56e6c2ac 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -80,7 +80,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
80 | if (rdev->gart.robj == NULL) { | 80 | if (rdev->gart.robj == NULL) { |
81 | r = radeon_bo_create(rdev, rdev->gart.table_size, | 81 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
83 | &rdev->gart.robj); | 83 | NULL, &rdev->gart.robj); |
84 | if (r) { | 84 | if (r) { |
85 | return r; | 85 | return r; |
86 | } | 86 | } |
@@ -326,7 +326,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev, | |||
326 | rdev->vm_manager.use_bitmap &= ~(1 << vm->id); | 326 | rdev->vm_manager.use_bitmap &= ~(1 << vm->id); |
327 | list_del_init(&vm->list); | 327 | list_del_init(&vm->list); |
328 | vm->id = -1; | 328 | vm->id = -1; |
329 | radeon_sa_bo_free(rdev, &vm->sa_bo); | 329 | radeon_sa_bo_free(rdev, &vm->sa_bo, NULL); |
330 | vm->pt = NULL; | 330 | vm->pt = NULL; |
331 | 331 | ||
332 | list_for_each_entry(bo_va, &vm->va, vm_list) { | 332 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) | |||
395 | retry: | 395 | retry: |
396 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, | 396 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, |
397 | RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), | 397 | RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), |
398 | RADEON_GPU_PAGE_SIZE); | 398 | RADEON_GPU_PAGE_SIZE, false); |
399 | if (r) { | 399 | if (r) { |
400 | if (list_empty(&rdev->vm_manager.lru_vm)) { | 400 | if (list_empty(&rdev->vm_manager.lru_vm)) { |
401 | return r; | 401 | return r; |
@@ -404,10 +404,8 @@ retry: | |||
404 | radeon_vm_unbind(rdev, vm_evict); | 404 | radeon_vm_unbind(rdev, vm_evict); |
405 | goto retry; | 405 | goto retry; |
406 | } | 406 | } |
407 | vm->pt = rdev->vm_manager.sa_manager.cpu_ptr; | 407 | vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo); |
408 | vm->pt += (vm->sa_bo.offset >> 3); | 408 | vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); |
409 | vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr; | ||
410 | vm->pt_gpu_addr += vm->sa_bo.offset; | ||
411 | memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8)); | 409 | memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8)); |
412 | 410 | ||
413 | retry_id: | 411 | retry_id: |
@@ -428,14 +426,14 @@ retry_id: | |||
428 | /* do hw bind */ | 426 | /* do hw bind */ |
429 | r = rdev->vm_manager.funcs->bind(rdev, vm, id); | 427 | r = rdev->vm_manager.funcs->bind(rdev, vm, id); |
430 | if (r) { | 428 | if (r) { |
431 | radeon_sa_bo_free(rdev, &vm->sa_bo); | 429 | radeon_sa_bo_free(rdev, &vm->sa_bo, NULL); |
432 | return r; | 430 | return r; |
433 | } | 431 | } |
434 | rdev->vm_manager.use_bitmap |= 1 << id; | 432 | rdev->vm_manager.use_bitmap |= 1 << id; |
435 | vm->id = id; | 433 | vm->id = id; |
436 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); | 434 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); |
437 | return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo, | 435 | return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, |
438 | &rdev->ib_pool.sa_manager.bo->tbo.mem); | 436 | &rdev->ring_tmp_bo.bo->tbo.mem); |
439 | } | 437 | } |
440 | 438 | ||
441 | /* object have to be reserved */ | 439 | /* object have to be reserved */ |
@@ -633,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) | |||
633 | /* map the ib pool buffer at 0 in virtual address space, set | 631 | /* map the ib pool buffer at 0 in virtual address space, set |
634 | * read only | 632 | * read only |
635 | */ | 633 | */ |
636 | r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0, | 634 | r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0, |
637 | RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); | 635 | RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); |
638 | return r; | 636 | return r; |
639 | } | 637 | } |
@@ -650,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
650 | radeon_mutex_unlock(&rdev->cs_mutex); | 648 | radeon_mutex_unlock(&rdev->cs_mutex); |
651 | 649 | ||
652 | /* remove all bo */ | 650 | /* remove all bo */ |
653 | r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false); | 651 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
654 | if (!r) { | 652 | if (!r) { |
655 | bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm); | 653 | bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); |
656 | list_del_init(&bo_va->bo_list); | 654 | list_del_init(&bo_va->bo_list); |
657 | list_del_init(&bo_va->vm_list); | 655 | list_del_init(&bo_va->vm_list); |
658 | radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo); | 656 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
659 | kfree(bo_va); | 657 | kfree(bo_va); |
660 | } | 658 | } |
661 | if (!list_empty(&vm->va)) { | 659 | if (!list_empty(&vm->va)) { |