aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-06-22 15:20:35 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-06-22 15:20:35 -0400
commit7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch)
tree879f18ccbe274122f2d4f095b43cbc7f953e0ada /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
parent48e315618dc4dc8904182cd221e3d395d5d97005 (diff)
parent9ffc59d57228d74809700be6f7ecb1db10292f05 (diff)
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c127
1 files changed, 79 insertions, 48 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6d08cde8443c..5e4e1bd90383 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
191 u32 domain, struct amdgpu_bo **bo_ptr, 191 u32 domain, struct amdgpu_bo **bo_ptr,
192 u64 *gpu_addr, void **cpu_addr) 192 u64 *gpu_addr, void **cpu_addr)
193{ 193{
194 struct amdgpu_bo_param bp;
194 bool free = false; 195 bool free = false;
195 int r; 196 int r;
196 197
198 memset(&bp, 0, sizeof(bp));
199 bp.size = size;
200 bp.byte_align = align;
201 bp.domain = domain;
202 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
203 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
204 bp.type = ttm_bo_type_kernel;
205 bp.resv = NULL;
206
197 if (!*bo_ptr) { 207 if (!*bo_ptr) {
198 r = amdgpu_bo_create(adev, size, align, domain, 208 r = amdgpu_bo_create(adev, &bp, bo_ptr);
199 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
200 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
201 ttm_bo_type_kernel, NULL, bo_ptr);
202 if (r) { 209 if (r) {
203 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 210 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
204 r); 211 r);
@@ -341,27 +348,25 @@ fail:
341 return false; 348 return false;
342} 349}
343 350
344static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, 351static int amdgpu_bo_do_create(struct amdgpu_device *adev,
345 int byte_align, u32 domain, 352 struct amdgpu_bo_param *bp,
346 u64 flags, enum ttm_bo_type type,
347 struct reservation_object *resv,
348 struct amdgpu_bo **bo_ptr) 353 struct amdgpu_bo **bo_ptr)
349{ 354{
350 struct ttm_operation_ctx ctx = { 355 struct ttm_operation_ctx ctx = {
351 .interruptible = (type != ttm_bo_type_kernel), 356 .interruptible = (bp->type != ttm_bo_type_kernel),
352 .no_wait_gpu = false, 357 .no_wait_gpu = false,
353 .resv = resv, 358 .resv = bp->resv,
354 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT 359 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
355 }; 360 };
356 struct amdgpu_bo *bo; 361 struct amdgpu_bo *bo;
357 unsigned long page_align; 362 unsigned long page_align, size = bp->size;
358 size_t acc_size; 363 size_t acc_size;
359 int r; 364 int r;
360 365
361 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 366 page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
362 size = ALIGN(size, PAGE_SIZE); 367 size = ALIGN(size, PAGE_SIZE);
363 368
364 if (!amdgpu_bo_validate_size(adev, size, domain)) 369 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
365 return -ENOMEM; 370 return -ENOMEM;
366 371
367 *bo_ptr = NULL; 372 *bo_ptr = NULL;
@@ -375,18 +380,14 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
375 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); 380 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
376 INIT_LIST_HEAD(&bo->shadow_list); 381 INIT_LIST_HEAD(&bo->shadow_list);
377 INIT_LIST_HEAD(&bo->va); 382 INIT_LIST_HEAD(&bo->va);
378 bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | 383 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
379 AMDGPU_GEM_DOMAIN_GTT | 384 bp->domain;
380 AMDGPU_GEM_DOMAIN_CPU |
381 AMDGPU_GEM_DOMAIN_GDS |
382 AMDGPU_GEM_DOMAIN_GWS |
383 AMDGPU_GEM_DOMAIN_OA);
384 bo->allowed_domains = bo->preferred_domains; 385 bo->allowed_domains = bo->preferred_domains;
385 if (type != ttm_bo_type_kernel && 386 if (bp->type != ttm_bo_type_kernel &&
386 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 387 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
387 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 388 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
388 389
389 bo->flags = flags; 390 bo->flags = bp->flags;
390 391
391#ifdef CONFIG_X86_32 392#ifdef CONFIG_X86_32
392 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 393 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
@@ -417,11 +418,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
417#endif 418#endif
418 419
419 bo->tbo.bdev = &adev->mman.bdev; 420 bo->tbo.bdev = &adev->mman.bdev;
420 amdgpu_ttm_placement_from_domain(bo, domain); 421 amdgpu_ttm_placement_from_domain(bo, bp->domain);
422 if (bp->type == ttm_bo_type_kernel)
423 bo->tbo.priority = 1;
421 424
422 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 425 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
423 &bo->placement, page_align, &ctx, acc_size, 426 &bo->placement, page_align, &ctx, acc_size,
424 NULL, resv, &amdgpu_ttm_bo_destroy); 427 NULL, bp->resv, &amdgpu_ttm_bo_destroy);
425 if (unlikely(r != 0)) 428 if (unlikely(r != 0))
426 return r; 429 return r;
427 430
@@ -433,10 +436,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
433 else 436 else
434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 437 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
435 438
436 if (type == ttm_bo_type_kernel) 439 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
437 bo->tbo.priority = 1;
438
439 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
440 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 440 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
441 struct dma_fence *fence; 441 struct dma_fence *fence;
442 442
@@ -449,20 +449,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
449 bo->tbo.moving = dma_fence_get(fence); 449 bo->tbo.moving = dma_fence_get(fence);
450 dma_fence_put(fence); 450 dma_fence_put(fence);
451 } 451 }
452 if (!resv) 452 if (!bp->resv)
453 amdgpu_bo_unreserve(bo); 453 amdgpu_bo_unreserve(bo);
454 *bo_ptr = bo; 454 *bo_ptr = bo;
455 455
456 trace_amdgpu_bo_create(bo); 456 trace_amdgpu_bo_create(bo);
457 457
458 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ 458 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
459 if (type == ttm_bo_type_device) 459 if (bp->type == ttm_bo_type_device)
460 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 460 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
461 461
462 return 0; 462 return 0;
463 463
464fail_unreserve: 464fail_unreserve:
465 if (!resv) 465 if (!bp->resv)
466 ww_mutex_unlock(&bo->tbo.resv->lock); 466 ww_mutex_unlock(&bo->tbo.resv->lock);
467 amdgpu_bo_unref(&bo); 467 amdgpu_bo_unref(&bo);
468 return r; 468 return r;
@@ -472,16 +472,22 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
472 unsigned long size, int byte_align, 472 unsigned long size, int byte_align,
473 struct amdgpu_bo *bo) 473 struct amdgpu_bo *bo)
474{ 474{
475 struct amdgpu_bo_param bp;
475 int r; 476 int r;
476 477
477 if (bo->shadow) 478 if (bo->shadow)
478 return 0; 479 return 0;
479 480
480 r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT, 481 memset(&bp, 0, sizeof(bp));
481 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 482 bp.size = size;
482 AMDGPU_GEM_CREATE_SHADOW, 483 bp.byte_align = byte_align;
483 ttm_bo_type_kernel, 484 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
484 bo->tbo.resv, &bo->shadow); 485 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
486 AMDGPU_GEM_CREATE_SHADOW;
487 bp.type = ttm_bo_type_kernel;
488 bp.resv = bo->tbo.resv;
489
490 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
485 if (!r) { 491 if (!r) {
486 bo->shadow->parent = amdgpu_bo_ref(bo); 492 bo->shadow->parent = amdgpu_bo_ref(bo);
487 mutex_lock(&adev->shadow_list_lock); 493 mutex_lock(&adev->shadow_list_lock);
@@ -492,28 +498,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
492 return r; 498 return r;
493} 499}
494 500
495int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, 501int amdgpu_bo_create(struct amdgpu_device *adev,
496 int byte_align, u32 domain, 502 struct amdgpu_bo_param *bp,
497 u64 flags, enum ttm_bo_type type,
498 struct reservation_object *resv,
499 struct amdgpu_bo **bo_ptr) 503 struct amdgpu_bo **bo_ptr)
500{ 504{
501 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; 505 u64 flags = bp->flags;
502 int r; 506 int r;
503 507
504 r = amdgpu_bo_do_create(adev, size, byte_align, domain, 508 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
505 parent_flags, type, resv, bo_ptr); 509 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
506 if (r) 510 if (r)
507 return r; 511 return r;
508 512
509 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { 513 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
510 if (!resv) 514 if (!bp->resv)
511 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, 515 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
512 NULL)); 516 NULL));
513 517
514 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); 518 r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
515 519
516 if (!resv) 520 if (!bp->resv)
517 reservation_object_unlock((*bo_ptr)->tbo.resv); 521 reservation_object_unlock((*bo_ptr)->tbo.resv);
518 522
519 if (r) 523 if (r)
@@ -689,8 +693,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
689 return -EINVAL; 693 return -EINVAL;
690 694
691 /* A shared bo cannot be migrated to VRAM */ 695 /* A shared bo cannot be migrated to VRAM */
692 if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM)) 696 if (bo->prime_shared_count) {
693 return -EINVAL; 697 if (domain & AMDGPU_GEM_DOMAIN_GTT)
698 domain = AMDGPU_GEM_DOMAIN_GTT;
699 else
700 return -EINVAL;
701 }
702
703 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
704 * See function amdgpu_display_supported_domains()
705 */
706 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
694 707
695 if (bo->pin_count) { 708 if (bo->pin_count) {
696 uint32_t mem_type = bo->tbo.mem.mem_type; 709 uint32_t mem_type = bo->tbo.mem.mem_type;
@@ -838,6 +851,13 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
838 return amdgpu_ttm_init(adev); 851 return amdgpu_ttm_init(adev);
839} 852}
840 853
854int amdgpu_bo_late_init(struct amdgpu_device *adev)
855{
856 amdgpu_ttm_late_init(adev);
857
858 return 0;
859}
860
841void amdgpu_bo_fini(struct amdgpu_device *adev) 861void amdgpu_bo_fini(struct amdgpu_device *adev)
842{ 862{
843 amdgpu_ttm_fini(adev); 863 amdgpu_ttm_fini(adev);
@@ -1042,3 +1062,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1042 1062
1043 return bo->tbo.offset; 1063 return bo->tbo.offset;
1044} 1064}
1065
1066uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1067 uint32_t domain)
1068{
1069 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1070 domain = AMDGPU_GEM_DOMAIN_VRAM;
1071 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1072 domain = AMDGPU_GEM_DOMAIN_GTT;
1073 }
1074 return domain;
1075}