diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
commit | 8e22e1b3499a446df48c2b26667ca36c55bf864c (patch) | |
tree | 5329f98b3eb3c95a9dcbab0fa4f9b6e62f0e788d /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |
parent | 00d3c14f14d51babd8aeafd5fa734ccf04f5ca3d (diff) | |
parent | 64a577196d66b44e37384bc5c4d78c61f59d5b2a (diff) |
Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed
drivers. Otherwise we'll have chaos even before 4.12 started in
earnest.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 60 |
1 files changed, 18 insertions, 42 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9bd1b4eae32e..51d759463384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -487,67 +487,44 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) | |||
487 | * | 487 | * |
488 | * @adev: amdgpu_device pointer | 488 | * @adev: amdgpu_device pointer |
489 | * @bo_va: bo_va to update | 489 | * @bo_va: bo_va to update |
490 | * @list: validation list | ||
491 | * @operation: map or unmap | ||
490 | * | 492 | * |
491 | * Update the bo_va directly after setting it's address. Errors are not | 493 | * Update the bo_va directly after setting its address. Errors are not |
492 | * vital here, so they are not reported back to userspace. | 494 | * vital here, so they are not reported back to userspace. |
493 | */ | 495 | */ |
494 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | 496 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, |
495 | struct amdgpu_bo_va *bo_va, | 497 | struct amdgpu_bo_va *bo_va, |
498 | struct list_head *list, | ||
496 | uint32_t operation) | 499 | uint32_t operation) |
497 | { | 500 | { |
498 | struct ttm_validate_buffer tv, *entry; | 501 | struct ttm_validate_buffer *entry; |
499 | struct amdgpu_bo_list_entry vm_pd; | 502 | int r = -ERESTARTSYS; |
500 | struct ww_acquire_ctx ticket; | ||
501 | struct list_head list, duplicates; | ||
502 | int r; | ||
503 | |||
504 | INIT_LIST_HEAD(&list); | ||
505 | INIT_LIST_HEAD(&duplicates); | ||
506 | |||
507 | tv.bo = &bo_va->bo->tbo; | ||
508 | tv.shared = true; | ||
509 | list_add(&tv.head, &list); | ||
510 | |||
511 | amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); | ||
512 | 503 | ||
513 | /* Provide duplicates to avoid -EALREADY */ | 504 | list_for_each_entry(entry, list, head) { |
514 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | ||
515 | if (r) | ||
516 | goto error_print; | ||
517 | |||
518 | list_for_each_entry(entry, &list, head) { | ||
519 | struct amdgpu_bo *bo = | 505 | struct amdgpu_bo *bo = |
520 | container_of(entry->bo, struct amdgpu_bo, tbo); | 506 | container_of(entry->bo, struct amdgpu_bo, tbo); |
521 | 507 | if (amdgpu_gem_va_check(NULL, bo)) | |
522 | /* if anything is swapped out don't swap it in here, | 508 | goto error; |
523 | just abort and wait for the next CS */ | ||
524 | if (!amdgpu_bo_gpu_accessible(bo)) | ||
525 | goto error_unreserve; | ||
526 | |||
527 | if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) | ||
528 | goto error_unreserve; | ||
529 | } | 509 | } |
530 | 510 | ||
531 | r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, | 511 | r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, |
532 | NULL); | 512 | NULL); |
533 | if (r) | 513 | if (r) |
534 | goto error_unreserve; | 514 | goto error; |
535 | 515 | ||
536 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | 516 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
537 | if (r) | 517 | if (r) |
538 | goto error_unreserve; | 518 | goto error; |
539 | 519 | ||
540 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); | 520 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); |
541 | if (r) | 521 | if (r) |
542 | goto error_unreserve; | 522 | goto error; |
543 | 523 | ||
544 | if (operation == AMDGPU_VA_OP_MAP) | 524 | if (operation == AMDGPU_VA_OP_MAP) |
545 | r = amdgpu_vm_bo_update(adev, bo_va, false); | 525 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
546 | 526 | ||
547 | error_unreserve: | 527 | error: |
548 | ttm_eu_backoff_reservation(&ticket, &list); | ||
549 | |||
550 | error_print: | ||
551 | if (r && r != -ERESTARTSYS) | 528 | if (r && r != -ERESTARTSYS) |
552 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); | 529 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
553 | } | 530 | } |
@@ -564,7 +541,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
564 | struct amdgpu_bo_list_entry vm_pd; | 541 | struct amdgpu_bo_list_entry vm_pd; |
565 | struct ttm_validate_buffer tv; | 542 | struct ttm_validate_buffer tv; |
566 | struct ww_acquire_ctx ticket; | 543 | struct ww_acquire_ctx ticket; |
567 | struct list_head list, duplicates; | 544 | struct list_head list; |
568 | uint32_t invalid_flags, va_flags = 0; | 545 | uint32_t invalid_flags, va_flags = 0; |
569 | int r = 0; | 546 | int r = 0; |
570 | 547 | ||
@@ -602,14 +579,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
602 | return -ENOENT; | 579 | return -ENOENT; |
603 | abo = gem_to_amdgpu_bo(gobj); | 580 | abo = gem_to_amdgpu_bo(gobj); |
604 | INIT_LIST_HEAD(&list); | 581 | INIT_LIST_HEAD(&list); |
605 | INIT_LIST_HEAD(&duplicates); | ||
606 | tv.bo = &abo->tbo; | 582 | tv.bo = &abo->tbo; |
607 | tv.shared = true; | 583 | tv.shared = false; |
608 | list_add(&tv.head, &list); | 584 | list_add(&tv.head, &list); |
609 | 585 | ||
610 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); | 586 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); |
611 | 587 | ||
612 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | 588 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); |
613 | if (r) { | 589 | if (r) { |
614 | drm_gem_object_unreference_unlocked(gobj); | 590 | drm_gem_object_unreference_unlocked(gobj); |
615 | return r; | 591 | return r; |
@@ -640,10 +616,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
640 | default: | 616 | default: |
641 | break; | 617 | break; |
642 | } | 618 | } |
643 | ttm_eu_backoff_reservation(&ticket, &list); | ||
644 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && | 619 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && |
645 | !amdgpu_vm_debug) | 620 | !amdgpu_vm_debug) |
646 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 621 | amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation); |
622 | ttm_eu_backoff_reservation(&ticket, &list); | ||
647 | 623 | ||
648 | drm_gem_object_unreference_unlocked(gobj); | 624 | drm_gem_object_unreference_unlocked(gobj); |
649 | return r; | 625 | return r; |