aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2017-05-02 12:48:26 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2017-05-02 12:48:26 -0400
commit0337966d121ebebf73a1c346123e8112796e684e (patch)
treec0d4388591e72dc5a26ee976a9cbca70f6bafbbd /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
parent7c5bb4ac2b76d2a09256aec8a7d584bf3e2b0466 (diff)
parent8a038b83e012097a7ac6cfb9f6c5fac1da8fad6e (diff)
Merge branch 'next' into for-linus
Prepare input updates for 4.12 merge window.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c93
1 files changed, 49 insertions, 44 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index cd62f6ffde2a..106cf83c2e6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
202 bool kernel = false; 202 bool kernel = false;
203 int r; 203 int r;
204 204
205 /* reject invalid gem flags */
206 if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
207 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
208 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
209 AMDGPU_GEM_CREATE_VRAM_CLEARED|
210 AMDGPU_GEM_CREATE_SHADOW |
211 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
212 r = -EINVAL;
213 goto error_unlock;
214 }
215 /* reject invalid gem domains */
216 if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
217 AMDGPU_GEM_DOMAIN_GTT |
218 AMDGPU_GEM_DOMAIN_VRAM |
219 AMDGPU_GEM_DOMAIN_GDS |
220 AMDGPU_GEM_DOMAIN_GWS |
221 AMDGPU_GEM_DOMAIN_OA)) {
222 r = -EINVAL;
223 goto error_unlock;
224 }
225
205 /* create a gem object to contain this object in */ 226 /* create a gem object to contain this object in */
206 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 227 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
207 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 228 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -471,12 +492,15 @@ out:
471 492
472static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) 493static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
473{ 494{
474 unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
475
476 /* if anything is swapped out don't swap it in here, 495 /* if anything is swapped out don't swap it in here,
477 just abort and wait for the next CS */ 496 just abort and wait for the next CS */
497 if (!amdgpu_bo_gpu_accessible(bo))
498 return -ERESTARTSYS;
499
500 if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
501 return -ERESTARTSYS;
478 502
479 return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0; 503 return 0;
480} 504}
481 505
482/** 506/**
@@ -484,62 +508,44 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
484 * 508 *
485 * @adev: amdgpu_device pointer 509 * @adev: amdgpu_device pointer
486 * @bo_va: bo_va to update 510 * @bo_va: bo_va to update
511 * @list: validation list
512 * @operation: map or unmap
487 * 513 *
488 * Update the bo_va directly after setting it's address. Errors are not 514 * Update the bo_va directly after setting its address. Errors are not
489 * vital here, so they are not reported back to userspace. 515 * vital here, so they are not reported back to userspace.
490 */ 516 */
491static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 517static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
492 struct amdgpu_bo_va *bo_va, 518 struct amdgpu_bo_va *bo_va,
519 struct list_head *list,
493 uint32_t operation) 520 uint32_t operation)
494{ 521{
495 struct ttm_validate_buffer tv, *entry; 522 struct ttm_validate_buffer *entry;
496 struct amdgpu_bo_list_entry vm_pd; 523 int r = -ERESTARTSYS;
497 struct ww_acquire_ctx ticket; 524
498 struct list_head list, duplicates; 525 list_for_each_entry(entry, list, head) {
499 unsigned domain; 526 struct amdgpu_bo *bo =
500 int r; 527 container_of(entry->bo, struct amdgpu_bo, tbo);
501 528 if (amdgpu_gem_va_check(NULL, bo))
502 INIT_LIST_HEAD(&list); 529 goto error;
503 INIT_LIST_HEAD(&duplicates);
504
505 tv.bo = &bo_va->bo->tbo;
506 tv.shared = true;
507 list_add(&tv.head, &list);
508
509 amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
510
511 /* Provide duplicates to avoid -EALREADY */
512 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
513 if (r)
514 goto error_print;
515
516 list_for_each_entry(entry, &list, head) {
517 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
518 /* if anything is swapped out don't swap it in here,
519 just abort and wait for the next CS */
520 if (domain == AMDGPU_GEM_DOMAIN_CPU)
521 goto error_unreserve;
522 } 530 }
531
523 r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, 532 r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
524 NULL); 533 NULL);
525 if (r) 534 if (r)
526 goto error_unreserve; 535 goto error;
527 536
528 r = amdgpu_vm_update_page_directory(adev, bo_va->vm); 537 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
529 if (r) 538 if (r)
530 goto error_unreserve; 539 goto error;
531 540
532 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 541 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
533 if (r) 542 if (r)
534 goto error_unreserve; 543 goto error;
535 544
536 if (operation == AMDGPU_VA_OP_MAP) 545 if (operation == AMDGPU_VA_OP_MAP)
537 r = amdgpu_vm_bo_update(adev, bo_va, false); 546 r = amdgpu_vm_bo_update(adev, bo_va, false);
538 547
539error_unreserve: 548error:
540 ttm_eu_backoff_reservation(&ticket, &list);
541
542error_print:
543 if (r && r != -ERESTARTSYS) 549 if (r && r != -ERESTARTSYS)
544 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 550 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
545} 551}
@@ -556,7 +562,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
556 struct amdgpu_bo_list_entry vm_pd; 562 struct amdgpu_bo_list_entry vm_pd;
557 struct ttm_validate_buffer tv; 563 struct ttm_validate_buffer tv;
558 struct ww_acquire_ctx ticket; 564 struct ww_acquire_ctx ticket;
559 struct list_head list, duplicates; 565 struct list_head list;
560 uint32_t invalid_flags, va_flags = 0; 566 uint32_t invalid_flags, va_flags = 0;
561 int r = 0; 567 int r = 0;
562 568
@@ -594,14 +600,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
594 return -ENOENT; 600 return -ENOENT;
595 abo = gem_to_amdgpu_bo(gobj); 601 abo = gem_to_amdgpu_bo(gobj);
596 INIT_LIST_HEAD(&list); 602 INIT_LIST_HEAD(&list);
597 INIT_LIST_HEAD(&duplicates);
598 tv.bo = &abo->tbo; 603 tv.bo = &abo->tbo;
599 tv.shared = true; 604 tv.shared = false;
600 list_add(&tv.head, &list); 605 list_add(&tv.head, &list);
601 606
602 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); 607 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
603 608
604 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 609 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
605 if (r) { 610 if (r) {
606 drm_gem_object_unreference_unlocked(gobj); 611 drm_gem_object_unreference_unlocked(gobj);
607 return r; 612 return r;
@@ -632,10 +637,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
632 default: 637 default:
633 break; 638 break;
634 } 639 }
635 ttm_eu_backoff_reservation(&ticket, &list);
636 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && 640 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
637 !amdgpu_vm_debug) 641 !amdgpu_vm_debug)
638 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 642 amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
643 ttm_eu_backoff_reservation(&ticket, &list);
639 644
640 drm_gem_object_unreference_unlocked(gobj); 645 drm_gem_object_unreference_unlocked(gobj);
641 return r; 646 return r;