aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-02-16 09:47:26 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-02-16 09:47:26 -0500
commit6dee6ae9d62642e81def4d461d71f13a6496ab59 (patch)
tree6c75d416c427a59f190e197ad83fe59b7bebf656 /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
parent1beaeacdc88b537703d04d5536235d0bbb36db93 (diff)
parent0b24a0bbe2147815d982d9335c41bb10c04f40bc (diff)
Merge tag 'irqchip-4.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent
Pull irqchip updates for 4.16-rc2 from Marc Zyngier - A MIPS GIC fix for spurious, masked interrupts - A fix for a subtle IPI bug in GICv3 - Do not probe GICv3 ITSs that are marked as disabled - Multi-MSI support for GICv2m - Various cleanups
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c107
1 files changed, 84 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ea25164e7f4b..5c4c3e0d527b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -37,6 +37,18 @@
37#include "amdgpu.h" 37#include "amdgpu.h"
38#include "amdgpu_trace.h" 38#include "amdgpu_trace.h"
39 39
40static bool amdgpu_need_backup(struct amdgpu_device *adev)
41{
42 if (adev->flags & AMD_IS_APU)
43 return false;
44
45 if (amdgpu_gpu_recovery == 0 ||
46 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
47 return false;
48
49 return true;
50}
51
40static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 52static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
41{ 53{
42 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 54 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -281,6 +293,44 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
281 *cpu_addr = NULL; 293 *cpu_addr = NULL;
282} 294}
283 295
296/* Validate bo size is bit bigger then the request domain */
297static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
298 unsigned long size, u32 domain)
299{
300 struct ttm_mem_type_manager *man = NULL;
301
302 /*
303 * If GTT is part of requested domains the check must succeed to
304 * allow fall back to GTT
305 */
306 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
307 man = &adev->mman.bdev.man[TTM_PL_TT];
308
309 if (size < (man->size << PAGE_SHIFT))
310 return true;
311 else
312 goto fail;
313 }
314
315 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
316 man = &adev->mman.bdev.man[TTM_PL_VRAM];
317
318 if (size < (man->size << PAGE_SHIFT))
319 return true;
320 else
321 goto fail;
322 }
323
324
325 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
326 return true;
327
328fail:
329 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
330 man->size << PAGE_SHIFT);
331 return false;
332}
333
284static int amdgpu_bo_do_create(struct amdgpu_device *adev, 334static int amdgpu_bo_do_create(struct amdgpu_device *adev,
285 unsigned long size, int byte_align, 335 unsigned long size, int byte_align,
286 bool kernel, u32 domain, u64 flags, 336 bool kernel, u32 domain, u64 flags,
@@ -289,16 +339,24 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
289 uint64_t init_value, 339 uint64_t init_value,
290 struct amdgpu_bo **bo_ptr) 340 struct amdgpu_bo **bo_ptr)
291{ 341{
342 struct ttm_operation_ctx ctx = {
343 .interruptible = !kernel,
344 .no_wait_gpu = false,
345 .allow_reserved_eviction = true,
346 .resv = resv
347 };
292 struct amdgpu_bo *bo; 348 struct amdgpu_bo *bo;
293 enum ttm_bo_type type; 349 enum ttm_bo_type type;
294 unsigned long page_align; 350 unsigned long page_align;
295 u64 initial_bytes_moved, bytes_moved;
296 size_t acc_size; 351 size_t acc_size;
297 int r; 352 int r;
298 353
299 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 354 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
300 size = ALIGN(size, PAGE_SIZE); 355 size = ALIGN(size, PAGE_SIZE);
301 356
357 if (!amdgpu_bo_validate_size(adev, size, domain))
358 return -ENOMEM;
359
302 if (kernel) { 360 if (kernel) {
303 type = ttm_bo_type_kernel; 361 type = ttm_bo_type_kernel;
304 } else if (sg) { 362 } else if (sg) {
@@ -364,22 +422,19 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
364 bo->tbo.bdev = &adev->mman.bdev; 422 bo->tbo.bdev = &adev->mman.bdev;
365 amdgpu_ttm_placement_from_domain(bo, domain); 423 amdgpu_ttm_placement_from_domain(bo, domain);
366 424
367 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
368 /* Kernel allocation are uninterruptible */
369 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 425 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
370 &bo->placement, page_align, !kernel, NULL, 426 &bo->placement, page_align, &ctx, NULL,
371 acc_size, sg, resv, &amdgpu_ttm_bo_destroy); 427 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
372 if (unlikely(r != 0)) 428 if (unlikely(r != 0))
373 return r; 429 return r;
374 430
375 bytes_moved = atomic64_read(&adev->num_bytes_moved) -
376 initial_bytes_moved;
377 if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 431 if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
378 bo->tbo.mem.mem_type == TTM_PL_VRAM && 432 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
379 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) 433 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
380 amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved); 434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
435 ctx.bytes_moved);
381 else 436 else
382 amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); 437 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
383 438
384 if (kernel) 439 if (kernel)
385 bo->tbo.priority = 1; 440 bo->tbo.priority = 1;
@@ -511,6 +566,7 @@ err:
511 566
512int amdgpu_bo_validate(struct amdgpu_bo *bo) 567int amdgpu_bo_validate(struct amdgpu_bo *bo)
513{ 568{
569 struct ttm_operation_ctx ctx = { false, false };
514 uint32_t domain; 570 uint32_t domain;
515 int r; 571 int r;
516 572
@@ -521,7 +577,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
521 577
522retry: 578retry:
523 amdgpu_ttm_placement_from_domain(bo, domain); 579 amdgpu_ttm_placement_from_domain(bo, domain);
524 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 580 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
525 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 581 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
526 domain = bo->allowed_domains; 582 domain = bo->allowed_domains;
527 goto retry; 583 goto retry;
@@ -632,6 +688,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
632 u64 *gpu_addr) 688 u64 *gpu_addr)
633{ 689{
634 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 690 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
691 struct ttm_operation_ctx ctx = { false, false };
635 int r, i; 692 int r, i;
636 693
637 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 694 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -647,7 +704,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
647 if (bo->pin_count) { 704 if (bo->pin_count) {
648 uint32_t mem_type = bo->tbo.mem.mem_type; 705 uint32_t mem_type = bo->tbo.mem.mem_type;
649 706
650 if (domain != amdgpu_mem_type_to_domain(mem_type)) 707 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
651 return -EINVAL; 708 return -EINVAL;
652 709
653 bo->pin_count++; 710 bo->pin_count++;
@@ -682,21 +739,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
682 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 739 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
683 } 740 }
684 741
685 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 742 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
686 if (unlikely(r)) { 743 if (unlikely(r)) {
687 dev_err(adev->dev, "%p pin failed\n", bo); 744 dev_err(adev->dev, "%p pin failed\n", bo);
688 goto error; 745 goto error;
689 } 746 }
690 747
748 r = amdgpu_ttm_alloc_gart(&bo->tbo);
749 if (unlikely(r)) {
750 dev_err(adev->dev, "%p bind failed\n", bo);
751 goto error;
752 }
753
691 bo->pin_count = 1; 754 bo->pin_count = 1;
692 if (gpu_addr != NULL) { 755 if (gpu_addr != NULL)
693 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
694 if (unlikely(r)) {
695 dev_err(adev->dev, "%p bind failed\n", bo);
696 goto error;
697 }
698 *gpu_addr = amdgpu_bo_gpu_offset(bo); 756 *gpu_addr = amdgpu_bo_gpu_offset(bo);
699 } 757
758 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
700 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 759 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
701 adev->vram_pin_size += amdgpu_bo_size(bo); 760 adev->vram_pin_size += amdgpu_bo_size(bo);
702 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 761 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -717,6 +776,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
717int amdgpu_bo_unpin(struct amdgpu_bo *bo) 776int amdgpu_bo_unpin(struct amdgpu_bo *bo)
718{ 777{
719 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 778 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
779 struct ttm_operation_ctx ctx = { false, false };
720 int r, i; 780 int r, i;
721 781
722 if (!bo->pin_count) { 782 if (!bo->pin_count) {
@@ -730,7 +790,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
730 bo->placements[i].lpfn = 0; 790 bo->placements[i].lpfn = 0;
731 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 791 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
732 } 792 }
733 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 793 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
734 if (unlikely(r)) { 794 if (unlikely(r)) {
735 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 795 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
736 goto error; 796 goto error;
@@ -779,8 +839,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
779 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, 839 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
780 adev->mc.aper_size); 840 adev->mc.aper_size);
781 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 841 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
782 adev->mc.mc_vram_size >> 20, 842 adev->mc.mc_vram_size >> 20,
783 (unsigned long long)adev->mc.aper_size >> 20); 843 (unsigned long long)adev->mc.aper_size >> 20);
784 DRM_INFO("RAM width %dbits %s\n", 844 DRM_INFO("RAM width %dbits %s\n",
785 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); 845 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
786 return amdgpu_ttm_init(adev); 846 return amdgpu_ttm_init(adev);
@@ -902,6 +962,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
902int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 962int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
903{ 963{
904 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 964 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
965 struct ttm_operation_ctx ctx = { false, false };
905 struct amdgpu_bo *abo; 966 struct amdgpu_bo *abo;
906 unsigned long offset, size; 967 unsigned long offset, size;
907 int r; 968 int r;
@@ -935,7 +996,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
935 abo->placement.num_busy_placement = 1; 996 abo->placement.num_busy_placement = 1;
936 abo->placement.busy_placement = &abo->placements[1]; 997 abo->placement.busy_placement = &abo->placements[1];
937 998
938 r = ttm_bo_validate(bo, &abo->placement, false, false); 999 r = ttm_bo_validate(bo, &abo->placement, &ctx);
939 if (unlikely(r != 0)) 1000 if (unlikely(r != 0))
940 return r; 1001 return r;
941 1002
@@ -980,7 +1041,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
980{ 1041{
981 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1042 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
982 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && 1043 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
983 !amdgpu_ttm_is_bound(bo->tbo.ttm)); 1044 !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
984 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && 1045 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
985 !bo->pin_count); 1046 !bo->pin_count);
986 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1047 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);