aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-02-23 06:36:59 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-03-08 11:01:50 -0500
commit2f568dbd6b944c2e8c0c54b53c2211c23995e6a4 (patch)
tree385cdb8bc9d264b40e9f83e6e89440b867866c07 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parentd564a06e1c9c285bab1c1579c18c811aa1271884 (diff)
drm/amdgpu: move get_user_pages out of amdgpu_ttm_tt_pin_userptr v6
That avoids lock inversion between the BO reservation lock and the anon_vma lock. v2: * Changed amdgpu_bo_list_entry.user_pages to an array of pointers * Lock mmap_sem only for get_user_pages * Added invalidation of unbound userpointer BOs * Fixed memory leak and page reference leak v3 (chk): * Revert locking mmap_sem only for_get user_pages * Revert adding invalidation of unbound userpointer BOs * Sanitize and fix error handling v4 (chk): * Init userpages pointer everywhere. * Fix error handling when get_user_pages() fails. * Add invalidation of unbound userpointer BOs again. v5 (chk): * Add maximum number of tries. v6 (chk): * Fix error handling when we run out of tries. Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> (v4) Acked-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c114
1 files changed, 110 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7833dfb1ff6e..4f5ef4149e87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -111,6 +111,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
111 p->uf_entry.priority = 0; 111 p->uf_entry.priority = 0;
112 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 112 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
113 p->uf_entry.tv.shared = true; 113 p->uf_entry.tv.shared = true;
114 p->uf_entry.user_pages = NULL;
114 115
115 drm_gem_object_unreference_unlocked(gobj); 116 drm_gem_object_unreference_unlocked(gobj);
116 return 0; 117 return 0;
@@ -297,6 +298,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
297 298
298 list_for_each_entry(lobj, validated, tv.head) { 299 list_for_each_entry(lobj, validated, tv.head) {
299 struct amdgpu_bo *bo = lobj->robj; 300 struct amdgpu_bo *bo = lobj->robj;
301 bool binding_userptr = false;
300 struct mm_struct *usermm; 302 struct mm_struct *usermm;
301 uint32_t domain; 303 uint32_t domain;
302 304
@@ -304,6 +306,15 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
304 if (usermm && usermm != current->mm) 306 if (usermm && usermm != current->mm)
305 return -EPERM; 307 return -EPERM;
306 308
309 /* Check if we have user pages and nobody bound the BO already */
310 if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
311 size_t size = sizeof(struct page *);
312
313 size *= bo->tbo.ttm->num_pages;
314 memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
315 binding_userptr = true;
316 }
317
307 if (bo->pin_count) 318 if (bo->pin_count)
308 continue; 319 continue;
309 320
@@ -334,6 +345,11 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
334 } 345 }
335 return r; 346 return r;
336 } 347 }
348
349 if (binding_userptr) {
350 drm_free_large(lobj->user_pages);
351 lobj->user_pages = NULL;
352 }
337 } 353 }
338 return 0; 354 return 0;
339} 355}
@@ -342,8 +358,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
342 union drm_amdgpu_cs *cs) 358 union drm_amdgpu_cs *cs)
343{ 359{
344 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 360 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
361 struct amdgpu_bo_list_entry *e;
345 struct list_head duplicates; 362 struct list_head duplicates;
346 bool need_mmap_lock = false; 363 bool need_mmap_lock = false;
364 unsigned i, tries = 10;
347 int r; 365 int r;
348 366
349 INIT_LIST_HEAD(&p->validated); 367 INIT_LIST_HEAD(&p->validated);
@@ -364,9 +382,81 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
364 if (need_mmap_lock) 382 if (need_mmap_lock)
365 down_read(&current->mm->mmap_sem); 383 down_read(&current->mm->mmap_sem);
366 384
367 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); 385 while (1) {
368 if (unlikely(r != 0)) 386 struct list_head need_pages;
369 goto error_reserve; 387 unsigned i;
388
389 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
390 &duplicates);
391 if (unlikely(r != 0))
392 goto error_free_pages;
393
394 /* Without a BO list we don't have userptr BOs */
395 if (!p->bo_list)
396 break;
397
398 INIT_LIST_HEAD(&need_pages);
399 for (i = p->bo_list->first_userptr;
400 i < p->bo_list->num_entries; ++i) {
401
402 e = &p->bo_list->array[i];
403
404 if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
405 &e->user_invalidated) && e->user_pages) {
406
407 /* We acquired a page array, but somebody
408 * invalidated it. Free it an try again
409 */
410 release_pages(e->user_pages,
411 e->robj->tbo.ttm->num_pages,
412 false);
413 drm_free_large(e->user_pages);
414 e->user_pages = NULL;
415 }
416
417 if (e->robj->tbo.ttm->state != tt_bound &&
418 !e->user_pages) {
419 list_del(&e->tv.head);
420 list_add(&e->tv.head, &need_pages);
421
422 amdgpu_bo_unreserve(e->robj);
423 }
424 }
425
426 if (list_empty(&need_pages))
427 break;
428
429 /* Unreserve everything again. */
430 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
431
432 /* We tried to often, just abort */
433 if (!--tries) {
434 r = -EDEADLK;
435 goto error_free_pages;
436 }
437
438 /* Fill the page arrays for all useptrs. */
439 list_for_each_entry(e, &need_pages, tv.head) {
440 struct ttm_tt *ttm = e->robj->tbo.ttm;
441
442 e->user_pages = drm_calloc_large(ttm->num_pages,
443 sizeof(struct page*));
444 if (!e->user_pages) {
445 r = -ENOMEM;
446 goto error_free_pages;
447 }
448
449 r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
450 if (r) {
451 drm_free_large(e->user_pages);
452 e->user_pages = NULL;
453 goto error_free_pages;
454 }
455 }
456
457 /* And try again. */
458 list_splice(&need_pages, &p->validated);
459 }
370 460
371 amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); 461 amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
372 462
@@ -398,10 +488,26 @@ error_validate:
398 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 488 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
399 } 489 }
400 490
401error_reserve: 491error_free_pages:
492
402 if (need_mmap_lock) 493 if (need_mmap_lock)
403 up_read(&current->mm->mmap_sem); 494 up_read(&current->mm->mmap_sem);
404 495
496 if (p->bo_list) {
497 for (i = p->bo_list->first_userptr;
498 i < p->bo_list->num_entries; ++i) {
499 e = &p->bo_list->array[i];
500
501 if (!e->user_pages)
502 continue;
503
504 release_pages(e->user_pages,
505 e->robj->tbo.ttm->num_pages,
506 false);
507 drm_free_large(e->user_pages);
508 }
509 }
510
405 return r; 511 return r;
406} 512}
407 513