aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c29
2 files changed, 25 insertions, 25 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 16dca46314bd..00c5b580f56c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -515,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
515 struct amdgpu_fpriv *fpriv = filp->driver_priv; 515 struct amdgpu_fpriv *fpriv = filp->driver_priv;
516 struct amdgpu_bo *rbo; 516 struct amdgpu_bo *rbo;
517 struct amdgpu_bo_va *bo_va; 517 struct amdgpu_bo_va *bo_va;
518 struct ttm_validate_buffer tv, tv_pd;
519 struct ww_acquire_ctx ticket;
520 struct list_head list, duplicates;
518 uint32_t invalid_flags, va_flags = 0; 521 uint32_t invalid_flags, va_flags = 0;
519 int r = 0; 522 int r = 0;
520 523
@@ -552,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
552 return -ENOENT; 555 return -ENOENT;
553 mutex_lock(&fpriv->vm.mutex); 556 mutex_lock(&fpriv->vm.mutex);
554 rbo = gem_to_amdgpu_bo(gobj); 557 rbo = gem_to_amdgpu_bo(gobj);
555 r = amdgpu_bo_reserve(rbo, false); 558 INIT_LIST_HEAD(&list);
559 INIT_LIST_HEAD(&duplicates);
560 tv.bo = &rbo->tbo;
561 tv.shared = true;
562 list_add(&tv.head, &list);
563
564 if (args->operation == AMDGPU_VA_OP_MAP) {
565 tv_pd.bo = &fpriv->vm.page_directory->tbo;
566 tv_pd.shared = true;
567 list_add(&tv_pd.head, &list);
568 }
569 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
556 if (r) { 570 if (r) {
557 mutex_unlock(&fpriv->vm.mutex); 571 mutex_unlock(&fpriv->vm.mutex);
558 drm_gem_object_unreference_unlocked(gobj); 572 drm_gem_object_unreference_unlocked(gobj);
@@ -561,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
561 575
562 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 576 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
563 if (!bo_va) { 577 if (!bo_va) {
564 amdgpu_bo_unreserve(rbo); 578 ttm_eu_backoff_reservation(&ticket, &list);
579 drm_gem_object_unreference_unlocked(gobj);
565 mutex_unlock(&fpriv->vm.mutex); 580 mutex_unlock(&fpriv->vm.mutex);
566 return -ENOENT; 581 return -ENOENT;
567 } 582 }
@@ -584,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
584 default: 599 default:
585 break; 600 break;
586 } 601 }
587 602 ttm_eu_backoff_reservation(&ticket, &list);
588 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 603 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
589 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 604 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
590 mutex_unlock(&fpriv->vm.mutex); 605 mutex_unlock(&fpriv->vm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e6dc19bc2dd8..159ce54bbd8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -985,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
985 * Add a mapping of the BO at the specefied addr into the VM. 985 * Add a mapping of the BO at the specefied addr into the VM.
986 * Returns 0 for success, error for failure. 986 * Returns 0 for success, error for failure.
987 * 987 *
988 * Object has to be reserved and gets unreserved by this function! 988 * Object has to be reserved and unreserved outside!
989 */ 989 */
990int amdgpu_vm_bo_map(struct amdgpu_device *adev, 990int amdgpu_vm_bo_map(struct amdgpu_device *adev,
991 struct amdgpu_bo_va *bo_va, 991 struct amdgpu_bo_va *bo_va,
@@ -1001,23 +1001,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1001 1001
1002 /* validate the parameters */ 1002 /* validate the parameters */
1003 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1003 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1004 size == 0 || size & AMDGPU_GPU_PAGE_MASK) { 1004 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1005 amdgpu_bo_unreserve(bo_va->bo);
1006 return -EINVAL; 1005 return -EINVAL;
1007 }
1008 1006
1009 /* make sure object fit at this offset */ 1007 /* make sure object fit at this offset */
1010 eaddr = saddr + size; 1008 eaddr = saddr + size;
1011 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { 1009 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1012 amdgpu_bo_unreserve(bo_va->bo);
1013 return -EINVAL; 1010 return -EINVAL;
1014 }
1015 1011
1016 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1012 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1017 if (last_pfn > adev->vm_manager.max_pfn) { 1013 if (last_pfn > adev->vm_manager.max_pfn) {
1018 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", 1014 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1019 last_pfn, adev->vm_manager.max_pfn); 1015 last_pfn, adev->vm_manager.max_pfn);
1020 amdgpu_bo_unreserve(bo_va->bo);
1021 return -EINVAL; 1016 return -EINVAL;
1022 } 1017 }
1023 1018
@@ -1034,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1034 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1029 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1035 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1030 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1036 tmp->it.start, tmp->it.last + 1); 1031 tmp->it.start, tmp->it.last + 1);
1037 amdgpu_bo_unreserve(bo_va->bo);
1038 r = -EINVAL; 1032 r = -EINVAL;
1039 goto error; 1033 goto error;
1040 } 1034 }
1041 1035
1042 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1036 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1043 if (!mapping) { 1037 if (!mapping) {
1044 amdgpu_bo_unreserve(bo_va->bo);
1045 r = -ENOMEM; 1038 r = -ENOMEM;
1046 goto error; 1039 goto error;
1047 } 1040 }
@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1067 if (eaddr > vm->max_pde_used) 1060 if (eaddr > vm->max_pde_used)
1068 vm->max_pde_used = eaddr; 1061 vm->max_pde_used = eaddr;
1069 1062
1070 amdgpu_bo_unreserve(bo_va->bo);
1071
1072 /* walk over the address space and allocate the page tables */ 1063 /* walk over the address space and allocate the page tables */
1073 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1064 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1074 struct reservation_object *resv = vm->page_directory->tbo.resv; 1065 struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,18 +1068,15 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1077 if (vm->page_tables[pt_idx].bo) 1068 if (vm->page_tables[pt_idx].bo)
1078 continue; 1069 continue;
1079 1070
1080 ww_mutex_lock(&resv->lock, NULL);
1081 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1071 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1082 AMDGPU_GPU_PAGE_SIZE, true, 1072 AMDGPU_GPU_PAGE_SIZE, true,
1083 AMDGPU_GEM_DOMAIN_VRAM, 1073 AMDGPU_GEM_DOMAIN_VRAM,
1084 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1074 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1085 NULL, resv, &pt); 1075 NULL, resv, &pt);
1086 if (r) { 1076 if (r)
1087 ww_mutex_unlock(&resv->lock);
1088 goto error_free; 1077 goto error_free;
1089 } 1078
1090 r = amdgpu_vm_clear_bo(adev, pt); 1079 r = amdgpu_vm_clear_bo(adev, pt);
1091 ww_mutex_unlock(&resv->lock);
1092 if (r) { 1080 if (r) {
1093 amdgpu_bo_unref(&pt); 1081 amdgpu_bo_unref(&pt);
1094 goto error_free; 1082 goto error_free;
@@ -1122,7 +1110,7 @@ error:
1122 * Remove a mapping of the BO at the specefied addr from the VM. 1110 * Remove a mapping of the BO at the specefied addr from the VM.
1123 * Returns 0 for success, error for failure. 1111 * Returns 0 for success, error for failure.
1124 * 1112 *
1125 * Object has to be reserved and gets unreserved by this function! 1113 * Object has to be reserved and unreserved outside!
1126 */ 1114 */
1127int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1115int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1128 struct amdgpu_bo_va *bo_va, 1116 struct amdgpu_bo_va *bo_va,
@@ -1147,10 +1135,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1147 break; 1135 break;
1148 } 1136 }
1149 1137
1150 if (&mapping->list == &bo_va->invalids) { 1138 if (&mapping->list == &bo_va->invalids)
1151 amdgpu_bo_unreserve(bo_va->bo);
1152 return -ENOENT; 1139 return -ENOENT;
1153 }
1154 } 1140 }
1155 1141
1156 list_del(&mapping->list); 1142 list_del(&mapping->list);
@@ -1163,7 +1149,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1163 list_add(&mapping->list, &vm->freed); 1149 list_add(&mapping->list, &vm->freed);
1164 else 1150 else
1165 kfree(mapping); 1151 kfree(mapping);
1166 amdgpu_bo_unreserve(bo_va->bo);
1167 1152
1168 return 0; 1153 return 0;
1169} 1154}