aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-11-23 10:29:05 -0500
committerThomas Gleixner <tglx@linutronix.de>2017-11-23 10:29:05 -0500
commit866c9b94ef968445c52214b3748ecc52a8491bca (patch)
tree1fd073acb9be8e89e77b35c41e2964ac6feabee6 /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
parentaea3706cfc4d952ed6d32b6d5845b5ecd99ed7f5 (diff)
parent841b86f3289dbe858daeceec36423d4ea286fac2 (diff)
Merge tag 'for-linus-timers-conversion-final-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into timers/urgent
Pull the last batch of manual timer conversions from Kees Cook: - final batch of "non trivial" timer conversions (multi-tree dependencies, things Coccinelle couldn't handle, etc). - treewide conversions via Coccinelle, in 4 steps: - DEFINE_TIMER() functions converted to struct timer_list * argument - init_timer() -> setup_timer() - setup_timer() -> timer_setup() - setup_timer() -> timer_setup() (with a single embedded structure) - deprecated timer API removals (init_timer(), setup_*timer()) - finalization of new API (remove global casts)
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c130
1 files changed, 64 insertions, 66 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 6149a47fe63d..a418df1b9422 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
44} 44}
45 45
46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
47 int alignment, u32 initial_domain, 47 int alignment, u32 initial_domain,
48 u64 flags, bool kernel, 48 u64 flags, bool kernel,
49 struct drm_gem_object **obj) 49 struct reservation_object *resv,
50 struct drm_gem_object **obj)
50{ 51{
51 struct amdgpu_bo *robj; 52 struct amdgpu_bo *bo;
52 int r; 53 int r;
53 54
54 *obj = NULL; 55 *obj = NULL;
@@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
59 60
60retry: 61retry:
61 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, 62 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
62 flags, NULL, NULL, 0, &robj); 63 flags, NULL, resv, 0, &bo);
63 if (r) { 64 if (r) {
64 if (r != -ERESTARTSYS) { 65 if (r != -ERESTARTSYS) {
65 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 66 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -71,7 +72,7 @@ retry:
71 } 72 }
72 return r; 73 return r;
73 } 74 }
74 *obj = &robj->gem_base; 75 *obj = &bo->gem_base;
75 76
76 return 0; 77 return 0;
77} 78}
@@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
112 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 113 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
113 struct amdgpu_vm *vm = &fpriv->vm; 114 struct amdgpu_vm *vm = &fpriv->vm;
114 struct amdgpu_bo_va *bo_va; 115 struct amdgpu_bo_va *bo_va;
116 struct mm_struct *mm;
115 int r; 117 int r;
118
119 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
120 if (mm && mm != current->mm)
121 return -EPERM;
122
123 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
124 abo->tbo.resv != vm->root.base.bo->tbo.resv)
125 return -EPERM;
126
116 r = amdgpu_bo_reserve(abo, false); 127 r = amdgpu_bo_reserve(abo, false);
117 if (r) 128 if (r)
118 return r; 129 return r;
@@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
127 return 0; 138 return 0;
128} 139}
129 140
130static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
131{
132 /* if anything is swapped out don't swap it in here,
133 just abort and wait for the next CS */
134 if (!amdgpu_bo_gpu_accessible(bo))
135 return -ERESTARTSYS;
136
137 if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
138 return -ERESTARTSYS;
139
140 return 0;
141}
142
143static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
144 struct amdgpu_vm *vm,
145 struct list_head *list)
146{
147 struct ttm_validate_buffer *entry;
148
149 list_for_each_entry(entry, list, head) {
150 struct amdgpu_bo *bo =
151 container_of(entry->bo, struct amdgpu_bo, tbo);
152 if (amdgpu_gem_vm_check(NULL, bo))
153 return false;
154 }
155
156 return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
157}
158
159void amdgpu_gem_object_close(struct drm_gem_object *obj, 141void amdgpu_gem_object_close(struct drm_gem_object *obj,
160 struct drm_file *file_priv) 142 struct drm_file *file_priv)
161{ 143{
@@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
165 struct amdgpu_vm *vm = &fpriv->vm; 147 struct amdgpu_vm *vm = &fpriv->vm;
166 148
167 struct amdgpu_bo_list_entry vm_pd; 149 struct amdgpu_bo_list_entry vm_pd;
168 struct list_head list; 150 struct list_head list, duplicates;
169 struct ttm_validate_buffer tv; 151 struct ttm_validate_buffer tv;
170 struct ww_acquire_ctx ticket; 152 struct ww_acquire_ctx ticket;
171 struct amdgpu_bo_va *bo_va; 153 struct amdgpu_bo_va *bo_va;
172 int r; 154 int r;
173 155
174 INIT_LIST_HEAD(&list); 156 INIT_LIST_HEAD(&list);
157 INIT_LIST_HEAD(&duplicates);
175 158
176 tv.bo = &bo->tbo; 159 tv.bo = &bo->tbo;
177 tv.shared = true; 160 tv.shared = true;
@@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
179 162
180 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); 163 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
181 164
182 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); 165 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
183 if (r) { 166 if (r) {
184 dev_err(adev->dev, "leaking bo va because " 167 dev_err(adev->dev, "leaking bo va because "
185 "we fail to reserve bo (%d)\n", r); 168 "we fail to reserve bo (%d)\n", r);
@@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
189 if (bo_va && --bo_va->ref_count == 0) { 172 if (bo_va && --bo_va->ref_count == 0) {
190 amdgpu_vm_bo_rmv(adev, bo_va); 173 amdgpu_vm_bo_rmv(adev, bo_va);
191 174
192 if (amdgpu_gem_vm_ready(adev, vm, &list)) { 175 if (amdgpu_vm_ready(vm)) {
193 struct dma_fence *fence = NULL; 176 struct dma_fence *fence = NULL;
194 177
195 r = amdgpu_vm_clear_freed(adev, vm, &fence); 178 r = amdgpu_vm_clear_freed(adev, vm, &fence);
@@ -214,18 +197,24 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
214 struct drm_file *filp) 197 struct drm_file *filp)
215{ 198{
216 struct amdgpu_device *adev = dev->dev_private; 199 struct amdgpu_device *adev = dev->dev_private;
200 struct amdgpu_fpriv *fpriv = filp->driver_priv;
201 struct amdgpu_vm *vm = &fpriv->vm;
217 union drm_amdgpu_gem_create *args = data; 202 union drm_amdgpu_gem_create *args = data;
203 uint64_t flags = args->in.domain_flags;
218 uint64_t size = args->in.bo_size; 204 uint64_t size = args->in.bo_size;
205 struct reservation_object *resv = NULL;
219 struct drm_gem_object *gobj; 206 struct drm_gem_object *gobj;
220 uint32_t handle; 207 uint32_t handle;
221 bool kernel = false;
222 int r; 208 int r;
223 209
224 /* reject invalid gem flags */ 210 /* reject invalid gem flags */
225 if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 211 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
226 AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 212 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
227 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 213 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
228 AMDGPU_GEM_CREATE_VRAM_CLEARED)) 214 AMDGPU_GEM_CREATE_VRAM_CLEARED |
215 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
216 AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
217
229 return -EINVAL; 218 return -EINVAL;
230 219
231 /* reject invalid gem domains */ 220 /* reject invalid gem domains */
@@ -240,7 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
240 /* create a gem object to contain this object in */ 229 /* create a gem object to contain this object in */
241 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 230 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
242 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 231 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
243 kernel = true; 232 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
244 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) 233 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
245 size = size << AMDGPU_GDS_SHIFT; 234 size = size << AMDGPU_GDS_SHIFT;
246 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) 235 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
@@ -252,10 +241,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
252 } 241 }
253 size = roundup(size, PAGE_SIZE); 242 size = roundup(size, PAGE_SIZE);
254 243
244 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
245 r = amdgpu_bo_reserve(vm->root.base.bo, false);
246 if (r)
247 return r;
248
249 resv = vm->root.base.bo->tbo.resv;
250 }
251
255 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 252 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
256 (u32)(0xffffffff & args->in.domains), 253 (u32)(0xffffffff & args->in.domains),
257 args->in.domain_flags, 254 flags, false, resv, &gobj);
258 kernel, &gobj); 255 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
256 if (!r) {
257 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
258
259 abo->parent = amdgpu_bo_ref(vm->root.base.bo);
260 }
261 amdgpu_bo_unreserve(vm->root.base.bo);
262 }
259 if (r) 263 if (r)
260 return r; 264 return r;
261 265
@@ -297,9 +301,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
297 } 301 }
298 302
299 /* create a gem object to contain this object in */ 303 /* create a gem object to contain this object in */
300 r = amdgpu_gem_object_create(adev, args->size, 0, 304 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
301 AMDGPU_GEM_DOMAIN_CPU, 0, 305 0, 0, NULL, &gobj);
302 0, &gobj);
303 if (r) 306 if (r)
304 return r; 307 return r;
305 308
@@ -317,8 +320,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
317 } 320 }
318 321
319 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 322 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
320 down_read(&current->mm->mmap_sem);
321
322 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, 323 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
323 bo->tbo.ttm->pages); 324 bo->tbo.ttm->pages);
324 if (r) 325 if (r)
@@ -333,8 +334,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
333 amdgpu_bo_unreserve(bo); 334 amdgpu_bo_unreserve(bo);
334 if (r) 335 if (r)
335 goto free_pages; 336 goto free_pages;
336
337 up_read(&current->mm->mmap_sem);
338 } 337 }
339 338
340 r = drm_gem_handle_create(filp, gobj, &handle); 339 r = drm_gem_handle_create(filp, gobj, &handle);
@@ -347,7 +346,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
347 return 0; 346 return 0;
348 347
349free_pages: 348free_pages:
350 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false); 349 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
351 350
352unlock_mmap_sem: 351unlock_mmap_sem:
353 up_read(&current->mm->mmap_sem); 352 up_read(&current->mm->mmap_sem);
@@ -511,10 +510,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
511 struct list_head *list, 510 struct list_head *list,
512 uint32_t operation) 511 uint32_t operation)
513{ 512{
514 int r = -ERESTARTSYS; 513 int r;
515 514
516 if (!amdgpu_gem_vm_ready(adev, vm, list)) 515 if (!amdgpu_vm_ready(vm))
517 goto error; 516 return;
518 517
519 r = amdgpu_vm_update_directories(adev, vm); 518 r = amdgpu_vm_update_directories(adev, vm);
520 if (r) 519 if (r)
@@ -551,7 +550,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
551 struct amdgpu_bo_list_entry vm_pd; 550 struct amdgpu_bo_list_entry vm_pd;
552 struct ttm_validate_buffer tv; 551 struct ttm_validate_buffer tv;
553 struct ww_acquire_ctx ticket; 552 struct ww_acquire_ctx ticket;
554 struct list_head list; 553 struct list_head list, duplicates;
555 uint64_t va_flags; 554 uint64_t va_flags;
556 int r = 0; 555 int r = 0;
557 556
@@ -580,13 +579,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580 args->operation); 579 args->operation);
581 return -EINVAL; 580 return -EINVAL;
582 } 581 }
583 if ((args->operation == AMDGPU_VA_OP_MAP) ||
584 (args->operation == AMDGPU_VA_OP_REPLACE)) {
585 if (amdgpu_kms_vram_lost(adev, fpriv))
586 return -ENODEV;
587 }
588 582
589 INIT_LIST_HEAD(&list); 583 INIT_LIST_HEAD(&list);
584 INIT_LIST_HEAD(&duplicates);
590 if ((args->operation != AMDGPU_VA_OP_CLEAR) && 585 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
591 !(args->flags & AMDGPU_VM_PAGE_PRT)) { 586 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
592 gobj = drm_gem_object_lookup(filp, args->handle); 587 gobj = drm_gem_object_lookup(filp, args->handle);
@@ -603,7 +598,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
603 598
604 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); 599 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
605 600
606 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 601 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
607 if (r) 602 if (r)
608 goto error_unref; 603 goto error_unref;
609 604
@@ -669,6 +664,7 @@ error_unref:
669int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 664int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
670 struct drm_file *filp) 665 struct drm_file *filp)
671{ 666{
667 struct amdgpu_device *adev = dev->dev_private;
672 struct drm_amdgpu_gem_op *args = data; 668 struct drm_amdgpu_gem_op *args = data;
673 struct drm_gem_object *gobj; 669 struct drm_gem_object *gobj;
674 struct amdgpu_bo *robj; 670 struct amdgpu_bo *robj;
@@ -716,6 +712,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
716 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 712 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
717 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 713 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
718 714
715 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
716 amdgpu_vm_bo_invalidate(adev, robj, true);
717
719 amdgpu_bo_unreserve(robj); 718 amdgpu_bo_unreserve(robj);
720 break; 719 break;
721 default: 720 default:
@@ -745,8 +744,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
745 r = amdgpu_gem_object_create(adev, args->size, 0, 744 r = amdgpu_gem_object_create(adev, args->size, 0,
746 AMDGPU_GEM_DOMAIN_VRAM, 745 AMDGPU_GEM_DOMAIN_VRAM,
747 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 746 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
748 ttm_bo_type_device, 747 false, NULL, &gobj);
749 &gobj);
750 if (r) 748 if (r)
751 return -ENOMEM; 749 return -ENOMEM;
752 750