aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c90
1 files changed, 67 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ea25164e7f4b..dc0a8be98043 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -281,6 +281,44 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
281 *cpu_addr = NULL; 281 *cpu_addr = NULL;
282} 282}
283 283
284/* Validate bo size is bit bigger then the request domain */
285static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
286 unsigned long size, u32 domain)
287{
288 struct ttm_mem_type_manager *man = NULL;
289
290 /*
291 * If GTT is part of requested domains the check must succeed to
292 * allow fall back to GTT
293 */
294 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
295 man = &adev->mman.bdev.man[TTM_PL_TT];
296
297 if (size < (man->size << PAGE_SHIFT))
298 return true;
299 else
300 goto fail;
301 }
302
303 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
304 man = &adev->mman.bdev.man[TTM_PL_VRAM];
305
306 if (size < (man->size << PAGE_SHIFT))
307 return true;
308 else
309 goto fail;
310 }
311
312
313 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
314 return true;
315
316fail:
317 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
318 man->size << PAGE_SHIFT);
319 return false;
320}
321
284static int amdgpu_bo_do_create(struct amdgpu_device *adev, 322static int amdgpu_bo_do_create(struct amdgpu_device *adev,
285 unsigned long size, int byte_align, 323 unsigned long size, int byte_align,
286 bool kernel, u32 domain, u64 flags, 324 bool kernel, u32 domain, u64 flags,
@@ -289,16 +327,19 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
289 uint64_t init_value, 327 uint64_t init_value,
290 struct amdgpu_bo **bo_ptr) 328 struct amdgpu_bo **bo_ptr)
291{ 329{
330 struct ttm_operation_ctx ctx = { !kernel, false };
292 struct amdgpu_bo *bo; 331 struct amdgpu_bo *bo;
293 enum ttm_bo_type type; 332 enum ttm_bo_type type;
294 unsigned long page_align; 333 unsigned long page_align;
295 u64 initial_bytes_moved, bytes_moved;
296 size_t acc_size; 334 size_t acc_size;
297 int r; 335 int r;
298 336
299 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 337 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
300 size = ALIGN(size, PAGE_SIZE); 338 size = ALIGN(size, PAGE_SIZE);
301 339
340 if (!amdgpu_bo_validate_size(adev, size, domain))
341 return -ENOMEM;
342
302 if (kernel) { 343 if (kernel) {
303 type = ttm_bo_type_kernel; 344 type = ttm_bo_type_kernel;
304 } else if (sg) { 345 } else if (sg) {
@@ -364,22 +405,19 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
364 bo->tbo.bdev = &adev->mman.bdev; 405 bo->tbo.bdev = &adev->mman.bdev;
365 amdgpu_ttm_placement_from_domain(bo, domain); 406 amdgpu_ttm_placement_from_domain(bo, domain);
366 407
367 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
368 /* Kernel allocation are uninterruptible */
369 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 408 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
370 &bo->placement, page_align, !kernel, NULL, 409 &bo->placement, page_align, &ctx, NULL,
371 acc_size, sg, resv, &amdgpu_ttm_bo_destroy); 410 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
372 if (unlikely(r != 0)) 411 if (unlikely(r != 0))
373 return r; 412 return r;
374 413
375 bytes_moved = atomic64_read(&adev->num_bytes_moved) -
376 initial_bytes_moved;
377 if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 414 if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
378 bo->tbo.mem.mem_type == TTM_PL_VRAM && 415 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
379 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) 416 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
380 amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved); 417 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
418 ctx.bytes_moved);
381 else 419 else
382 amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); 420 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
383 421
384 if (kernel) 422 if (kernel)
385 bo->tbo.priority = 1; 423 bo->tbo.priority = 1;
@@ -511,6 +549,7 @@ err:
511 549
512int amdgpu_bo_validate(struct amdgpu_bo *bo) 550int amdgpu_bo_validate(struct amdgpu_bo *bo)
513{ 551{
552 struct ttm_operation_ctx ctx = { false, false };
514 uint32_t domain; 553 uint32_t domain;
515 int r; 554 int r;
516 555
@@ -521,7 +560,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
521 560
522retry: 561retry:
523 amdgpu_ttm_placement_from_domain(bo, domain); 562 amdgpu_ttm_placement_from_domain(bo, domain);
524 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 563 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
525 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 564 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
526 domain = bo->allowed_domains; 565 domain = bo->allowed_domains;
527 goto retry; 566 goto retry;
@@ -632,6 +671,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
632 u64 *gpu_addr) 671 u64 *gpu_addr)
633{ 672{
634 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 673 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
674 struct ttm_operation_ctx ctx = { false, false };
635 int r, i; 675 int r, i;
636 676
637 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 677 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -647,7 +687,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
647 if (bo->pin_count) { 687 if (bo->pin_count) {
648 uint32_t mem_type = bo->tbo.mem.mem_type; 688 uint32_t mem_type = bo->tbo.mem.mem_type;
649 689
650 if (domain != amdgpu_mem_type_to_domain(mem_type)) 690 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
651 return -EINVAL; 691 return -EINVAL;
652 692
653 bo->pin_count++; 693 bo->pin_count++;
@@ -682,21 +722,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
682 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 722 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
683 } 723 }
684 724
685 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 725 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
686 if (unlikely(r)) { 726 if (unlikely(r)) {
687 dev_err(adev->dev, "%p pin failed\n", bo); 727 dev_err(adev->dev, "%p pin failed\n", bo);
688 goto error; 728 goto error;
689 } 729 }
690 730
731 r = amdgpu_ttm_alloc_gart(&bo->tbo);
732 if (unlikely(r)) {
733 dev_err(adev->dev, "%p bind failed\n", bo);
734 goto error;
735 }
736
691 bo->pin_count = 1; 737 bo->pin_count = 1;
692 if (gpu_addr != NULL) { 738 if (gpu_addr != NULL)
693 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
694 if (unlikely(r)) {
695 dev_err(adev->dev, "%p bind failed\n", bo);
696 goto error;
697 }
698 *gpu_addr = amdgpu_bo_gpu_offset(bo); 739 *gpu_addr = amdgpu_bo_gpu_offset(bo);
699 } 740
741 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
700 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 742 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
701 adev->vram_pin_size += amdgpu_bo_size(bo); 743 adev->vram_pin_size += amdgpu_bo_size(bo);
702 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 744 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -717,6 +759,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
717int amdgpu_bo_unpin(struct amdgpu_bo *bo) 759int amdgpu_bo_unpin(struct amdgpu_bo *bo)
718{ 760{
719 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 761 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
762 struct ttm_operation_ctx ctx = { false, false };
720 int r, i; 763 int r, i;
721 764
722 if (!bo->pin_count) { 765 if (!bo->pin_count) {
@@ -730,7 +773,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
730 bo->placements[i].lpfn = 0; 773 bo->placements[i].lpfn = 0;
731 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 774 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
732 } 775 }
733 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 776 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
734 if (unlikely(r)) { 777 if (unlikely(r)) {
735 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 778 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
736 goto error; 779 goto error;
@@ -779,8 +822,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
779 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, 822 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
780 adev->mc.aper_size); 823 adev->mc.aper_size);
781 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 824 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
782 adev->mc.mc_vram_size >> 20, 825 adev->mc.mc_vram_size >> 20,
783 (unsigned long long)adev->mc.aper_size >> 20); 826 (unsigned long long)adev->mc.aper_size >> 20);
784 DRM_INFO("RAM width %dbits %s\n", 827 DRM_INFO("RAM width %dbits %s\n",
785 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); 828 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
786 return amdgpu_ttm_init(adev); 829 return amdgpu_ttm_init(adev);
@@ -902,6 +945,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
902int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 945int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
903{ 946{
904 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 947 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
948 struct ttm_operation_ctx ctx = { false, false };
905 struct amdgpu_bo *abo; 949 struct amdgpu_bo *abo;
906 unsigned long offset, size; 950 unsigned long offset, size;
907 int r; 951 int r;
@@ -935,7 +979,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
935 abo->placement.num_busy_placement = 1; 979 abo->placement.num_busy_placement = 1;
936 abo->placement.busy_placement = &abo->placements[1]; 980 abo->placement.busy_placement = &abo->placements[1];
937 981
938 r = ttm_bo_validate(bo, &abo->placement, false, false); 982 r = ttm_bo_validate(bo, &abo->placement, &ctx);
939 if (unlikely(r != 0)) 983 if (unlikely(r != 0))
940 return r; 984 return r;
941 985
@@ -980,7 +1024,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
980{ 1024{
981 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1025 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
982 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && 1026 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
983 !amdgpu_ttm_is_bound(bo->tbo.ttm)); 1027 !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
984 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && 1028 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
985 !bo->pin_count); 1029 !bo->pin_count);
986 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1030 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);