diff options
author | Tony Lindgren <tony@atomide.com> | 2017-11-28 11:12:32 -0500 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2017-11-28 11:12:32 -0500 |
commit | bc686442f8a601bccac1f22506ecdb4b0d62cadd (patch) | |
tree | b224ab4aa2350b233da640f5850f48bc6bfeb2d0 /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |
parent | 60636a5d0fa2f8bc6d0c23c4027100ba20866f9b (diff) | |
parent | ca41e244517d6d3f1600c229ff7ca615049c1e9c (diff) |
Merge branch 'dts-fixes' into omap-for-v4.15/fixes-dt
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 122 |
1 files changed, 38 insertions, 84 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9e495da0bb03..ea25164e7f4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -40,9 +40,7 @@ | |||
40 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | 40 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
41 | { | 41 | { |
42 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); | 42 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
43 | struct amdgpu_bo *bo; | 43 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
44 | |||
45 | bo = container_of(tbo, struct amdgpu_bo, tbo); | ||
46 | 44 | ||
47 | amdgpu_bo_kunmap(bo); | 45 | amdgpu_bo_kunmap(bo); |
48 | 46 | ||
@@ -64,11 +62,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) | |||
64 | return false; | 62 | return false; |
65 | } | 63 | } |
66 | 64 | ||
67 | static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | 65 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
68 | struct ttm_placement *placement, | ||
69 | struct ttm_place *places, | ||
70 | u32 domain, u64 flags) | ||
71 | { | 66 | { |
67 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); | ||
68 | struct ttm_placement *placement = &abo->placement; | ||
69 | struct ttm_place *places = abo->placements; | ||
70 | u64 flags = abo->flags; | ||
72 | u32 c = 0; | 71 | u32 c = 0; |
73 | 72 | ||
74 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { | 73 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
@@ -151,27 +150,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
151 | placement->busy_placement = places; | 150 | placement->busy_placement = places; |
152 | } | 151 | } |
153 | 152 | ||
154 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) | ||
155 | { | ||
156 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); | ||
157 | |||
158 | amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements, | ||
159 | domain, abo->flags); | ||
160 | } | ||
161 | |||
162 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | ||
163 | struct ttm_placement *placement) | ||
164 | { | ||
165 | BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1)); | ||
166 | |||
167 | memcpy(bo->placements, placement->placement, | ||
168 | placement->num_placement * sizeof(struct ttm_place)); | ||
169 | bo->placement.num_placement = placement->num_placement; | ||
170 | bo->placement.num_busy_placement = placement->num_busy_placement; | ||
171 | bo->placement.placement = bo->placements; | ||
172 | bo->placement.busy_placement = bo->placements; | ||
173 | } | ||
174 | |||
175 | /** | 153 | /** |
176 | * amdgpu_bo_create_reserved - create reserved BO for kernel use | 154 | * amdgpu_bo_create_reserved - create reserved BO for kernel use |
177 | * | 155 | * |
@@ -303,14 +281,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, | |||
303 | *cpu_addr = NULL; | 281 | *cpu_addr = NULL; |
304 | } | 282 | } |
305 | 283 | ||
306 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | 284 | static int amdgpu_bo_do_create(struct amdgpu_device *adev, |
307 | unsigned long size, int byte_align, | 285 | unsigned long size, int byte_align, |
308 | bool kernel, u32 domain, u64 flags, | 286 | bool kernel, u32 domain, u64 flags, |
309 | struct sg_table *sg, | 287 | struct sg_table *sg, |
310 | struct ttm_placement *placement, | 288 | struct reservation_object *resv, |
311 | struct reservation_object *resv, | 289 | uint64_t init_value, |
312 | uint64_t init_value, | 290 | struct amdgpu_bo **bo_ptr) |
313 | struct amdgpu_bo **bo_ptr) | ||
314 | { | 291 | { |
315 | struct amdgpu_bo *bo; | 292 | struct amdgpu_bo *bo; |
316 | enum ttm_bo_type type; | 293 | enum ttm_bo_type type; |
@@ -384,13 +361,17 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
384 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; | 361 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
385 | #endif | 362 | #endif |
386 | 363 | ||
387 | amdgpu_fill_placement_to_bo(bo, placement); | 364 | bo->tbo.bdev = &adev->mman.bdev; |
388 | /* Kernel allocation are uninterruptible */ | 365 | amdgpu_ttm_placement_from_domain(bo, domain); |
389 | 366 | ||
390 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); | 367 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); |
368 | /* Kernel allocation are uninterruptible */ | ||
391 | r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, | 369 | r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, |
392 | &bo->placement, page_align, !kernel, NULL, | 370 | &bo->placement, page_align, !kernel, NULL, |
393 | acc_size, sg, resv, &amdgpu_ttm_bo_destroy); | 371 | acc_size, sg, resv, &amdgpu_ttm_bo_destroy); |
372 | if (unlikely(r != 0)) | ||
373 | return r; | ||
374 | |||
394 | bytes_moved = atomic64_read(&adev->num_bytes_moved) - | 375 | bytes_moved = atomic64_read(&adev->num_bytes_moved) - |
395 | initial_bytes_moved; | 376 | initial_bytes_moved; |
396 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size && | 377 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size && |
@@ -400,9 +381,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
400 | else | 381 | else |
401 | amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); | 382 | amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); |
402 | 383 | ||
403 | if (unlikely(r != 0)) | ||
404 | return r; | ||
405 | |||
406 | if (kernel) | 384 | if (kernel) |
407 | bo->tbo.priority = 1; | 385 | bo->tbo.priority = 1; |
408 | 386 | ||
@@ -442,27 +420,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
442 | unsigned long size, int byte_align, | 420 | unsigned long size, int byte_align, |
443 | struct amdgpu_bo *bo) | 421 | struct amdgpu_bo *bo) |
444 | { | 422 | { |
445 | struct ttm_placement placement = {0}; | ||
446 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | ||
447 | int r; | 423 | int r; |
448 | 424 | ||
449 | if (bo->shadow) | 425 | if (bo->shadow) |
450 | return 0; | 426 | return 0; |
451 | 427 | ||
452 | memset(&placements, 0, sizeof(placements)); | 428 | r = amdgpu_bo_do_create(adev, size, byte_align, true, |
453 | amdgpu_ttm_placement_init(adev, &placement, placements, | 429 | AMDGPU_GEM_DOMAIN_GTT, |
454 | AMDGPU_GEM_DOMAIN_GTT, | 430 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | |
455 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | | 431 | AMDGPU_GEM_CREATE_SHADOW, |
456 | AMDGPU_GEM_CREATE_SHADOW); | 432 | NULL, bo->tbo.resv, 0, |
457 | 433 | &bo->shadow); | |
458 | r = amdgpu_bo_create_restricted(adev, size, byte_align, true, | ||
459 | AMDGPU_GEM_DOMAIN_GTT, | ||
460 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | | ||
461 | AMDGPU_GEM_CREATE_SHADOW, | ||
462 | NULL, &placement, | ||
463 | bo->tbo.resv, | ||
464 | 0, | ||
465 | &bo->shadow); | ||
466 | if (!r) { | 434 | if (!r) { |
467 | bo->shadow->parent = amdgpu_bo_ref(bo); | 435 | bo->shadow->parent = amdgpu_bo_ref(bo); |
468 | mutex_lock(&adev->shadow_list_lock); | 436 | mutex_lock(&adev->shadow_list_lock); |
@@ -484,18 +452,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
484 | uint64_t init_value, | 452 | uint64_t init_value, |
485 | struct amdgpu_bo **bo_ptr) | 453 | struct amdgpu_bo **bo_ptr) |
486 | { | 454 | { |
487 | struct ttm_placement placement = {0}; | ||
488 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | ||
489 | uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; | 455 | uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; |
490 | int r; | 456 | int r; |
491 | 457 | ||
492 | memset(&placements, 0, sizeof(placements)); | 458 | r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, |
493 | amdgpu_ttm_placement_init(adev, &placement, placements, | 459 | parent_flags, sg, resv, init_value, bo_ptr); |
494 | domain, parent_flags); | ||
495 | |||
496 | r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, | ||
497 | parent_flags, sg, &placement, resv, | ||
498 | init_value, bo_ptr); | ||
499 | if (r) | 460 | if (r) |
500 | return r; | 461 | return r; |
501 | 462 | ||
@@ -672,7 +633,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
672 | { | 633 | { |
673 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | 634 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
674 | int r, i; | 635 | int r, i; |
675 | unsigned fpfn, lpfn; | ||
676 | 636 | ||
677 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) | 637 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) |
678 | return -EPERM; | 638 | return -EPERM; |
@@ -704,22 +664,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
704 | } | 664 | } |
705 | 665 | ||
706 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | 666 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
667 | /* force to pin into visible video ram */ | ||
668 | if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) | ||
669 | bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | ||
707 | amdgpu_ttm_placement_from_domain(bo, domain); | 670 | amdgpu_ttm_placement_from_domain(bo, domain); |
708 | for (i = 0; i < bo->placement.num_placement; i++) { | 671 | for (i = 0; i < bo->placement.num_placement; i++) { |
709 | /* force to pin into visible video ram */ | 672 | unsigned fpfn, lpfn; |
710 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | 673 | |
711 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && | 674 | fpfn = min_offset >> PAGE_SHIFT; |
712 | (!max_offset || max_offset > | 675 | lpfn = max_offset >> PAGE_SHIFT; |
713 | adev->mc.visible_vram_size)) { | 676 | |
714 | if (WARN_ON_ONCE(min_offset > | ||
715 | adev->mc.visible_vram_size)) | ||
716 | return -EINVAL; | ||
717 | fpfn = min_offset >> PAGE_SHIFT; | ||
718 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | ||
719 | } else { | ||
720 | fpfn = min_offset >> PAGE_SHIFT; | ||
721 | lpfn = max_offset >> PAGE_SHIFT; | ||
722 | } | ||
723 | if (fpfn > bo->placements[i].fpfn) | 677 | if (fpfn > bo->placements[i].fpfn) |
724 | bo->placements[i].fpfn = fpfn; | 678 | bo->placements[i].fpfn = fpfn; |
725 | if (!bo->placements[i].lpfn || | 679 | if (!bo->placements[i].lpfn || |
@@ -928,8 +882,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | |||
928 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | 882 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
929 | return; | 883 | return; |
930 | 884 | ||
931 | abo = container_of(bo, struct amdgpu_bo, tbo); | 885 | abo = ttm_to_amdgpu_bo(bo); |
932 | amdgpu_vm_bo_invalidate(adev, abo); | 886 | amdgpu_vm_bo_invalidate(adev, abo, evict); |
933 | 887 | ||
934 | amdgpu_bo_kunmap(abo); | 888 | amdgpu_bo_kunmap(abo); |
935 | 889 | ||
@@ -955,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
955 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | 909 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
956 | return 0; | 910 | return 0; |
957 | 911 | ||
958 | abo = container_of(bo, struct amdgpu_bo, tbo); | 912 | abo = ttm_to_amdgpu_bo(bo); |
959 | 913 | ||
960 | /* Remember that this BO was accessed by the CPU */ | 914 | /* Remember that this BO was accessed by the CPU */ |
961 | abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | 915 | abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |