diff options
author | Dave Airlie <airlied@redhat.com> | 2018-03-20 21:46:05 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-03-20 21:46:05 -0400 |
commit | 287d2ac36b6f2830ea4ef66c110abc0f47a9a658 (patch) | |
tree | 04214f156461a95c2f7ca5a8821063cad7fc515e /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |
parent | 963976cfe9c54d4d9e725e61c90c47a4af6b5ea2 (diff) | |
parent | 6da2b9332c572fcda94de9631f8fa514f574388a (diff) |
Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Continued cleanup and restructuring of powerplay
- Fetch VRAM type from vbios rather than hardcoding for SOC15 asics
- Allow ttm to drop its backing store when drivers don't need it
- DC bandwidth calc updates
- Enable DC backlight control pre-DCE11 asics
- Enable DC on all supported asics
- DC Fixes for planes due to the way our hw is ordered vs what drm expects
- DC CTM/regamma fixes
- Misc cleanup and bug fixes
* 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (89 commits)
amdgpu/dm: Default PRE_VEGA ASIC support to 'y'
drm/amd/pp: Remove the cgs wrapper for notify smu version on APU
drm/amd/display: fix dereferencing possible ERR_PTR()
drm/amd/display: Refine disable VGA
drm/amdgpu: Improve documentation of bo_ptr in amdgpu_bo_create_kernel
drm/radeon: Don't turn off DP sink when disconnected
drm/amd/pp: Rename file name cz_* to smu8_*
drm/amd/pp: Replace function/struct name cz_* with smu8_*
drm/amd/pp: Remove unneeded void * casts in cz_hwmgr.c/cz_smumgr.c
drm/amd/pp: Mv cz uvd/vce pg/dpm functions to cz_hwmgr.c
drm/amd/pp: Remove dead header file pp_asicblocks.h
drm/amd/pp: Delete dead code on cz_clockpowergating.c
drm/amdgpu: Call amdgpu_ucode_fini_bo in amd_powerplay.c
drm/amdgpu: Remove wrapper layer of smu ip functions
drm/amdgpu: Don't compared ip_block_type with ip_block_index
drm/amdgpu: Plus NULL function pointer check
drm/amd/pp: Move helper functions to smu_help.c
drm/amd/pp: Replace rv_* with smu10_*
drm/amd/pp: Fix function parameter not correct
drm/amd/pp: Add rv_copy_table_from/to_smc to smu backend function table
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 59 |
1 files changed, 28 insertions, 31 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9157745fce14..6d08cde8443c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -60,6 +60,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
60 | 60 | ||
61 | amdgpu_bo_kunmap(bo); | 61 | amdgpu_bo_kunmap(bo); |
62 | 62 | ||
63 | if (bo->gem_base.import_attach) | ||
64 | drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); | ||
63 | drm_gem_object_release(&bo->gem_base); | 65 | drm_gem_object_release(&bo->gem_base); |
64 | amdgpu_bo_unref(&bo->parent); | 66 | amdgpu_bo_unref(&bo->parent); |
65 | if (!list_empty(&bo->shadow_list)) { | 67 | if (!list_empty(&bo->shadow_list)) { |
@@ -173,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) | |||
173 | * @size: size for the new BO | 175 | * @size: size for the new BO |
174 | * @align: alignment for the new BO | 176 | * @align: alignment for the new BO |
175 | * @domain: where to place it | 177 | * @domain: where to place it |
176 | * @bo_ptr: resulting BO | 178 | * @bo_ptr: used to initialize BOs in structures |
177 | * @gpu_addr: GPU addr of the pinned BO | 179 | * @gpu_addr: GPU addr of the pinned BO |
178 | * @cpu_addr: optional CPU address mapping | 180 | * @cpu_addr: optional CPU address mapping |
179 | * | 181 | * |
180 | * Allocates and pins a BO for kernel internal use, and returns it still | 182 | * Allocates and pins a BO for kernel internal use, and returns it still |
181 | * reserved. | 183 | * reserved. |
182 | * | 184 | * |
185 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. | ||
186 | * | ||
183 | * Returns 0 on success, negative error code otherwise. | 187 | * Returns 0 on success, negative error code otherwise. |
184 | */ | 188 | */ |
185 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, | 189 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
@@ -191,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, | |||
191 | int r; | 195 | int r; |
192 | 196 | ||
193 | if (!*bo_ptr) { | 197 | if (!*bo_ptr) { |
194 | r = amdgpu_bo_create(adev, size, align, true, domain, | 198 | r = amdgpu_bo_create(adev, size, align, domain, |
195 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 199 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
196 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 200 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
197 | NULL, NULL, bo_ptr); | 201 | ttm_bo_type_kernel, NULL, bo_ptr); |
198 | if (r) { | 202 | if (r) { |
199 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", | 203 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", |
200 | r); | 204 | r); |
@@ -242,12 +246,14 @@ error_free: | |||
242 | * @size: size for the new BO | 246 | * @size: size for the new BO |
243 | * @align: alignment for the new BO | 247 | * @align: alignment for the new BO |
244 | * @domain: where to place it | 248 | * @domain: where to place it |
245 | * @bo_ptr: resulting BO | 249 | * @bo_ptr: used to initialize BOs in structures |
246 | * @gpu_addr: GPU addr of the pinned BO | 250 | * @gpu_addr: GPU addr of the pinned BO |
247 | * @cpu_addr: optional CPU address mapping | 251 | * @cpu_addr: optional CPU address mapping |
248 | * | 252 | * |
249 | * Allocates and pins a BO for kernel internal use. | 253 | * Allocates and pins a BO for kernel internal use. |
250 | * | 254 | * |
255 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. | ||
256 | * | ||
251 | * Returns 0 on success, negative error code otherwise. | 257 | * Returns 0 on success, negative error code otherwise. |
252 | */ | 258 | */ |
253 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | 259 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
@@ -335,21 +341,19 @@ fail: | |||
335 | return false; | 341 | return false; |
336 | } | 342 | } |
337 | 343 | ||
338 | static int amdgpu_bo_do_create(struct amdgpu_device *adev, | 344 | static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, |
339 | unsigned long size, int byte_align, | 345 | int byte_align, u32 domain, |
340 | bool kernel, u32 domain, u64 flags, | 346 | u64 flags, enum ttm_bo_type type, |
341 | struct sg_table *sg, | ||
342 | struct reservation_object *resv, | 347 | struct reservation_object *resv, |
343 | struct amdgpu_bo **bo_ptr) | 348 | struct amdgpu_bo **bo_ptr) |
344 | { | 349 | { |
345 | struct ttm_operation_ctx ctx = { | 350 | struct ttm_operation_ctx ctx = { |
346 | .interruptible = !kernel, | 351 | .interruptible = (type != ttm_bo_type_kernel), |
347 | .no_wait_gpu = false, | 352 | .no_wait_gpu = false, |
348 | .resv = resv, | 353 | .resv = resv, |
349 | .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT | 354 | .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT |
350 | }; | 355 | }; |
351 | struct amdgpu_bo *bo; | 356 | struct amdgpu_bo *bo; |
352 | enum ttm_bo_type type; | ||
353 | unsigned long page_align; | 357 | unsigned long page_align; |
354 | size_t acc_size; | 358 | size_t acc_size; |
355 | int r; | 359 | int r; |
@@ -360,13 +364,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, | |||
360 | if (!amdgpu_bo_validate_size(adev, size, domain)) | 364 | if (!amdgpu_bo_validate_size(adev, size, domain)) |
361 | return -ENOMEM; | 365 | return -ENOMEM; |
362 | 366 | ||
363 | if (kernel) { | ||
364 | type = ttm_bo_type_kernel; | ||
365 | } else if (sg) { | ||
366 | type = ttm_bo_type_sg; | ||
367 | } else { | ||
368 | type = ttm_bo_type_device; | ||
369 | } | ||
370 | *bo_ptr = NULL; | 367 | *bo_ptr = NULL; |
371 | 368 | ||
372 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | 369 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, |
@@ -385,7 +382,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, | |||
385 | AMDGPU_GEM_DOMAIN_GWS | | 382 | AMDGPU_GEM_DOMAIN_GWS | |
386 | AMDGPU_GEM_DOMAIN_OA); | 383 | AMDGPU_GEM_DOMAIN_OA); |
387 | bo->allowed_domains = bo->preferred_domains; | 384 | bo->allowed_domains = bo->preferred_domains; |
388 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | 385 | if (type != ttm_bo_type_kernel && |
386 | bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | ||
389 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | 387 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
390 | 388 | ||
391 | bo->flags = flags; | 389 | bo->flags = flags; |
@@ -423,7 +421,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, | |||
423 | 421 | ||
424 | r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, | 422 | r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, |
425 | &bo->placement, page_align, &ctx, acc_size, | 423 | &bo->placement, page_align, &ctx, acc_size, |
426 | sg, resv, &amdgpu_ttm_bo_destroy); | 424 | NULL, resv, &amdgpu_ttm_bo_destroy); |
427 | if (unlikely(r != 0)) | 425 | if (unlikely(r != 0)) |
428 | return r; | 426 | return r; |
429 | 427 | ||
@@ -435,7 +433,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, | |||
435 | else | 433 | else |
436 | amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); | 434 | amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); |
437 | 435 | ||
438 | if (kernel) | 436 | if (type == ttm_bo_type_kernel) |
439 | bo->tbo.priority = 1; | 437 | bo->tbo.priority = 1; |
440 | 438 | ||
441 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && | 439 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
@@ -479,12 +477,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
479 | if (bo->shadow) | 477 | if (bo->shadow) |
480 | return 0; | 478 | return 0; |
481 | 479 | ||
482 | r = amdgpu_bo_do_create(adev, size, byte_align, true, | 480 | r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT, |
483 | AMDGPU_GEM_DOMAIN_GTT, | ||
484 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | | 481 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | |
485 | AMDGPU_GEM_CREATE_SHADOW, | 482 | AMDGPU_GEM_CREATE_SHADOW, |
486 | NULL, bo->tbo.resv, | 483 | ttm_bo_type_kernel, |
487 | &bo->shadow); | 484 | bo->tbo.resv, &bo->shadow); |
488 | if (!r) { | 485 | if (!r) { |
489 | bo->shadow->parent = amdgpu_bo_ref(bo); | 486 | bo->shadow->parent = amdgpu_bo_ref(bo); |
490 | mutex_lock(&adev->shadow_list_lock); | 487 | mutex_lock(&adev->shadow_list_lock); |
@@ -495,18 +492,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
495 | return r; | 492 | return r; |
496 | } | 493 | } |
497 | 494 | ||
498 | int amdgpu_bo_create(struct amdgpu_device *adev, | 495 | int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, |
499 | unsigned long size, int byte_align, | 496 | int byte_align, u32 domain, |
500 | bool kernel, u32 domain, u64 flags, | 497 | u64 flags, enum ttm_bo_type type, |
501 | struct sg_table *sg, | ||
502 | struct reservation_object *resv, | 498 | struct reservation_object *resv, |
503 | struct amdgpu_bo **bo_ptr) | 499 | struct amdgpu_bo **bo_ptr) |
504 | { | 500 | { |
505 | uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; | 501 | uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; |
506 | int r; | 502 | int r; |
507 | 503 | ||
508 | r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, | 504 | r = amdgpu_bo_do_create(adev, size, byte_align, domain, |
509 | parent_flags, sg, resv, bo_ptr); | 505 | parent_flags, type, resv, bo_ptr); |
510 | if (r) | 506 | if (r) |
511 | return r; | 507 | return r; |
512 | 508 | ||
@@ -821,7 +817,8 @@ static const char *amdgpu_vram_names[] = { | |||
821 | "GDDR4", | 817 | "GDDR4", |
822 | "GDDR5", | 818 | "GDDR5", |
823 | "HBM", | 819 | "HBM", |
824 | "DDR3" | 820 | "DDR3", |
821 | "DDR4", | ||
825 | }; | 822 | }; |
826 | 823 | ||
827 | int amdgpu_bo_init(struct amdgpu_device *adev) | 824 | int amdgpu_bo_init(struct amdgpu_device *adev) |