aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c112
1 files changed, 52 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5c4c3e0d527b..6d08cde8443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -36,6 +36,7 @@
36#include <drm/drm_cache.h> 36#include <drm/drm_cache.h>
37#include "amdgpu.h" 37#include "amdgpu.h"
38#include "amdgpu_trace.h" 38#include "amdgpu_trace.h"
39#include "amdgpu_amdkfd.h"
39 40
40static bool amdgpu_need_backup(struct amdgpu_device *adev) 41static bool amdgpu_need_backup(struct amdgpu_device *adev)
41{ 42{
@@ -54,8 +55,13 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
54 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 55 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
55 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 56 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
56 57
58 if (bo->kfd_bo)
59 amdgpu_amdkfd_unreserve_system_memory_limit(bo);
60
57 amdgpu_bo_kunmap(bo); 61 amdgpu_bo_kunmap(bo);
58 62
63 if (bo->gem_base.import_attach)
64 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
59 drm_gem_object_release(&bo->gem_base); 65 drm_gem_object_release(&bo->gem_base);
60 amdgpu_bo_unref(&bo->parent); 66 amdgpu_bo_unref(&bo->parent);
61 if (!list_empty(&bo->shadow_list)) { 67 if (!list_empty(&bo->shadow_list)) {
@@ -83,7 +89,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
83 u32 c = 0; 89 u32 c = 0;
84 90
85 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 91 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
86 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 92 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
87 93
88 places[c].fpfn = 0; 94 places[c].fpfn = 0;
89 places[c].lpfn = 0; 95 places[c].lpfn = 0;
@@ -103,7 +109,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
103 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 109 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
104 places[c].fpfn = 0; 110 places[c].fpfn = 0;
105 if (flags & AMDGPU_GEM_CREATE_SHADOW) 111 if (flags & AMDGPU_GEM_CREATE_SHADOW)
106 places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; 112 places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
107 else 113 else
108 places[c].lpfn = 0; 114 places[c].lpfn = 0;
109 places[c].flags = TTM_PL_FLAG_TT; 115 places[c].flags = TTM_PL_FLAG_TT;
@@ -169,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
169 * @size: size for the new BO 175 * @size: size for the new BO
170 * @align: alignment for the new BO 176 * @align: alignment for the new BO
171 * @domain: where to place it 177 * @domain: where to place it
172 * @bo_ptr: resulting BO 178 * @bo_ptr: used to initialize BOs in structures
173 * @gpu_addr: GPU addr of the pinned BO 179 * @gpu_addr: GPU addr of the pinned BO
174 * @cpu_addr: optional CPU address mapping 180 * @cpu_addr: optional CPU address mapping
175 * 181 *
176 * Allocates and pins a BO for kernel internal use, and returns it still 182 * Allocates and pins a BO for kernel internal use, and returns it still
177 * reserved. 183 * reserved.
178 * 184 *
185 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
186 *
179 * Returns 0 on success, negative error code otherwise. 187 * Returns 0 on success, negative error code otherwise.
180 */ 188 */
181int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 189int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
@@ -187,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
187 int r; 195 int r;
188 196
189 if (!*bo_ptr) { 197 if (!*bo_ptr) {
190 r = amdgpu_bo_create(adev, size, align, true, domain, 198 r = amdgpu_bo_create(adev, size, align, domain,
191 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 199 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
192 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 200 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
193 NULL, NULL, 0, bo_ptr); 201 ttm_bo_type_kernel, NULL, bo_ptr);
194 if (r) { 202 if (r) {
195 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 203 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
196 r); 204 r);
@@ -238,12 +246,14 @@ error_free:
238 * @size: size for the new BO 246 * @size: size for the new BO
239 * @align: alignment for the new BO 247 * @align: alignment for the new BO
240 * @domain: where to place it 248 * @domain: where to place it
241 * @bo_ptr: resulting BO 249 * @bo_ptr: used to initialize BOs in structures
242 * @gpu_addr: GPU addr of the pinned BO 250 * @gpu_addr: GPU addr of the pinned BO
243 * @cpu_addr: optional CPU address mapping 251 * @cpu_addr: optional CPU address mapping
244 * 252 *
245 * Allocates and pins a BO for kernel internal use. 253 * Allocates and pins a BO for kernel internal use.
246 * 254 *
255 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
256 *
247 * Returns 0 on success, negative error code otherwise. 257 * Returns 0 on success, negative error code otherwise.
248 */ 258 */
249int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 259int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
@@ -331,22 +341,19 @@ fail:
331 return false; 341 return false;
332} 342}
333 343
334static int amdgpu_bo_do_create(struct amdgpu_device *adev, 344static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
335 unsigned long size, int byte_align, 345 int byte_align, u32 domain,
336 bool kernel, u32 domain, u64 flags, 346 u64 flags, enum ttm_bo_type type,
337 struct sg_table *sg,
338 struct reservation_object *resv, 347 struct reservation_object *resv,
339 uint64_t init_value,
340 struct amdgpu_bo **bo_ptr) 348 struct amdgpu_bo **bo_ptr)
341{ 349{
342 struct ttm_operation_ctx ctx = { 350 struct ttm_operation_ctx ctx = {
343 .interruptible = !kernel, 351 .interruptible = (type != ttm_bo_type_kernel),
344 .no_wait_gpu = false, 352 .no_wait_gpu = false,
345 .allow_reserved_eviction = true, 353 .resv = resv,
346 .resv = resv 354 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
347 }; 355 };
348 struct amdgpu_bo *bo; 356 struct amdgpu_bo *bo;
349 enum ttm_bo_type type;
350 unsigned long page_align; 357 unsigned long page_align;
351 size_t acc_size; 358 size_t acc_size;
352 int r; 359 int r;
@@ -357,13 +364,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
357 if (!amdgpu_bo_validate_size(adev, size, domain)) 364 if (!amdgpu_bo_validate_size(adev, size, domain))
358 return -ENOMEM; 365 return -ENOMEM;
359 366
360 if (kernel) {
361 type = ttm_bo_type_kernel;
362 } else if (sg) {
363 type = ttm_bo_type_sg;
364 } else {
365 type = ttm_bo_type_device;
366 }
367 *bo_ptr = NULL; 367 *bo_ptr = NULL;
368 368
369 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 369 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
@@ -372,11 +372,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
372 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); 372 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
373 if (bo == NULL) 373 if (bo == NULL)
374 return -ENOMEM; 374 return -ENOMEM;
375 r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); 375 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
376 if (unlikely(r)) {
377 kfree(bo);
378 return r;
379 }
380 INIT_LIST_HEAD(&bo->shadow_list); 376 INIT_LIST_HEAD(&bo->shadow_list);
381 INIT_LIST_HEAD(&bo->va); 377 INIT_LIST_HEAD(&bo->va);
382 bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | 378 bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -386,7 +382,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
386 AMDGPU_GEM_DOMAIN_GWS | 382 AMDGPU_GEM_DOMAIN_GWS |
387 AMDGPU_GEM_DOMAIN_OA); 383 AMDGPU_GEM_DOMAIN_OA);
388 bo->allowed_domains = bo->preferred_domains; 384 bo->allowed_domains = bo->preferred_domains;
389 if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 385 if (type != ttm_bo_type_kernel &&
386 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
390 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 387 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
391 388
392 bo->flags = flags; 389 bo->flags = flags;
@@ -423,27 +420,27 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
423 amdgpu_ttm_placement_from_domain(bo, domain); 420 amdgpu_ttm_placement_from_domain(bo, domain);
424 421
425 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 422 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
426 &bo->placement, page_align, &ctx, NULL, 423 &bo->placement, page_align, &ctx, acc_size,
427 acc_size, sg, resv, &amdgpu_ttm_bo_destroy); 424 NULL, resv, &amdgpu_ttm_bo_destroy);
428 if (unlikely(r != 0)) 425 if (unlikely(r != 0))
429 return r; 426 return r;
430 427
431 if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 428 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
432 bo->tbo.mem.mem_type == TTM_PL_VRAM && 429 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
433 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) 430 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 431 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
435 ctx.bytes_moved); 432 ctx.bytes_moved);
436 else 433 else
437 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
438 435
439 if (kernel) 436 if (type == ttm_bo_type_kernel)
440 bo->tbo.priority = 1; 437 bo->tbo.priority = 1;
441 438
442 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 439 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
443 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 440 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
444 struct dma_fence *fence; 441 struct dma_fence *fence;
445 442
446 r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); 443 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
447 if (unlikely(r)) 444 if (unlikely(r))
448 goto fail_unreserve; 445 goto fail_unreserve;
449 446
@@ -480,12 +477,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
480 if (bo->shadow) 477 if (bo->shadow)
481 return 0; 478 return 0;
482 479
483 r = amdgpu_bo_do_create(adev, size, byte_align, true, 480 r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
484 AMDGPU_GEM_DOMAIN_GTT,
485 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 481 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
486 AMDGPU_GEM_CREATE_SHADOW, 482 AMDGPU_GEM_CREATE_SHADOW,
487 NULL, bo->tbo.resv, 0, 483 ttm_bo_type_kernel,
488 &bo->shadow); 484 bo->tbo.resv, &bo->shadow);
489 if (!r) { 485 if (!r) {
490 bo->shadow->parent = amdgpu_bo_ref(bo); 486 bo->shadow->parent = amdgpu_bo_ref(bo);
491 mutex_lock(&adev->shadow_list_lock); 487 mutex_lock(&adev->shadow_list_lock);
@@ -496,22 +492,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
496 return r; 492 return r;
497} 493}
498 494
499/* init_value will only take effect when flags contains 495int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
500 * AMDGPU_GEM_CREATE_VRAM_CLEARED. 496 int byte_align, u32 domain,
501 */ 497 u64 flags, enum ttm_bo_type type,
502int amdgpu_bo_create(struct amdgpu_device *adev,
503 unsigned long size, int byte_align,
504 bool kernel, u32 domain, u64 flags,
505 struct sg_table *sg,
506 struct reservation_object *resv, 498 struct reservation_object *resv,
507 uint64_t init_value,
508 struct amdgpu_bo **bo_ptr) 499 struct amdgpu_bo **bo_ptr)
509{ 500{
510 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; 501 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
511 int r; 502 int r;
512 503
513 r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, 504 r = amdgpu_bo_do_create(adev, size, byte_align, domain,
514 parent_flags, sg, resv, init_value, bo_ptr); 505 parent_flags, type, resv, bo_ptr);
515 if (r) 506 if (r)
516 return r; 507 return r;
517 508
@@ -826,31 +817,32 @@ static const char *amdgpu_vram_names[] = {
826 "GDDR4", 817 "GDDR4",
827 "GDDR5", 818 "GDDR5",
828 "HBM", 819 "HBM",
829 "DDR3" 820 "DDR3",
821 "DDR4",
830}; 822};
831 823
832int amdgpu_bo_init(struct amdgpu_device *adev) 824int amdgpu_bo_init(struct amdgpu_device *adev)
833{ 825{
834 /* reserve PAT memory space to WC for VRAM */ 826 /* reserve PAT memory space to WC for VRAM */
835 arch_io_reserve_memtype_wc(adev->mc.aper_base, 827 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
836 adev->mc.aper_size); 828 adev->gmc.aper_size);
837 829
838 /* Add an MTRR for the VRAM */ 830 /* Add an MTRR for the VRAM */
839 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, 831 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
840 adev->mc.aper_size); 832 adev->gmc.aper_size);
841 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 833 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
842 adev->mc.mc_vram_size >> 20, 834 adev->gmc.mc_vram_size >> 20,
843 (unsigned long long)adev->mc.aper_size >> 20); 835 (unsigned long long)adev->gmc.aper_size >> 20);
844 DRM_INFO("RAM width %dbits %s\n", 836 DRM_INFO("RAM width %dbits %s\n",
845 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); 837 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
846 return amdgpu_ttm_init(adev); 838 return amdgpu_ttm_init(adev);
847} 839}
848 840
849void amdgpu_bo_fini(struct amdgpu_device *adev) 841void amdgpu_bo_fini(struct amdgpu_device *adev)
850{ 842{
851 amdgpu_ttm_fini(adev); 843 amdgpu_ttm_fini(adev);
852 arch_phys_wc_del(adev->mc.vram_mtrr); 844 arch_phys_wc_del(adev->gmc.vram_mtrr);
853 arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size); 845 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
854} 846}
855 847
856int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 848int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
@@ -980,7 +972,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
980 972
981 size = bo->mem.num_pages << PAGE_SHIFT; 973 size = bo->mem.num_pages << PAGE_SHIFT;
982 offset = bo->mem.start << PAGE_SHIFT; 974 offset = bo->mem.start << PAGE_SHIFT;
983 if ((offset + size) <= adev->mc.visible_vram_size) 975 if ((offset + size) <= adev->gmc.visible_vram_size)
984 return 0; 976 return 0;
985 977
986 /* Can't move a pinned BO to visible VRAM */ 978 /* Can't move a pinned BO to visible VRAM */
@@ -1003,7 +995,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1003 offset = bo->mem.start << PAGE_SHIFT; 995 offset = bo->mem.start << PAGE_SHIFT;
1004 /* this should never happen */ 996 /* this should never happen */
1005 if (bo->mem.mem_type == TTM_PL_VRAM && 997 if (bo->mem.mem_type == TTM_PL_VRAM &&
1006 (offset + size) > adev->mc.visible_vram_size) 998 (offset + size) > adev->gmc.visible_vram_size)
1007 return -EINVAL; 999 return -EINVAL;
1008 1000
1009 return 0; 1001 return 0;