diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2018-04-16 05:57:19 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-05-15 14:43:28 -0400 |
commit | a906dbb1e20f5791d728c7d9e2366b8acb4f1bb2 (patch) | |
tree | 0f3a5ab82570916bdfc9878feb6d90805e56937c /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |
parent | f0c0761b38ac30b04d4fed436ff10e894ec0e525 (diff) |
drm/amdgpu: add amdgpu_bo_param
amdgpu_bo_create has too many parameters, and used in
too many places. Collect them to one structure.
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 75 |
1 files changed, 42 insertions, 33 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 24f582c696cc..b33a7fdea7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -341,27 +341,25 @@ fail: | |||
341 | return false; | 341 | return false; |
342 | } | 342 | } |
343 | 343 | ||
344 | static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, | 344 | static int amdgpu_bo_do_create(struct amdgpu_device *adev, |
345 | int byte_align, u32 domain, | 345 | struct amdgpu_bo_param *bp, |
346 | u64 flags, enum ttm_bo_type type, | ||
347 | struct reservation_object *resv, | ||
348 | struct amdgpu_bo **bo_ptr) | 346 | struct amdgpu_bo **bo_ptr) |
349 | { | 347 | { |
350 | struct ttm_operation_ctx ctx = { | 348 | struct ttm_operation_ctx ctx = { |
351 | .interruptible = (type != ttm_bo_type_kernel), | 349 | .interruptible = (bp->type != ttm_bo_type_kernel), |
352 | .no_wait_gpu = false, | 350 | .no_wait_gpu = false, |
353 | .resv = resv, | 351 | .resv = bp->resv, |
354 | .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT | 352 | .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT |
355 | }; | 353 | }; |
356 | struct amdgpu_bo *bo; | 354 | struct amdgpu_bo *bo; |
357 | unsigned long page_align; | 355 | unsigned long page_align, size = bp->size; |
358 | size_t acc_size; | 356 | size_t acc_size; |
359 | int r; | 357 | int r; |
360 | 358 | ||
361 | page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | 359 | page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
362 | size = ALIGN(size, PAGE_SIZE); | 360 | size = ALIGN(size, PAGE_SIZE); |
363 | 361 | ||
364 | if (!amdgpu_bo_validate_size(adev, size, domain)) | 362 | if (!amdgpu_bo_validate_size(adev, size, bp->domain)) |
365 | return -ENOMEM; | 363 | return -ENOMEM; |
366 | 364 | ||
367 | *bo_ptr = NULL; | 365 | *bo_ptr = NULL; |
@@ -375,18 +373,18 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, | |||
375 | drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); | 373 | drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); |
376 | INIT_LIST_HEAD(&bo->shadow_list); | 374 | INIT_LIST_HEAD(&bo->shadow_list); |
377 | INIT_LIST_HEAD(&bo->va); | 375 | INIT_LIST_HEAD(&bo->va); |
378 | bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | | 376 | bo->preferred_domains = bp->domain & (AMDGPU_GEM_DOMAIN_VRAM | |
379 | AMDGPU_GEM_DOMAIN_GTT | | 377 | AMDGPU_GEM_DOMAIN_GTT | |
380 | AMDGPU_GEM_DOMAIN_CPU | | 378 | AMDGPU_GEM_DOMAIN_CPU | |
381 | AMDGPU_GEM_DOMAIN_GDS | | 379 | AMDGPU_GEM_DOMAIN_GDS | |
382 | AMDGPU_GEM_DOMAIN_GWS | | 380 | AMDGPU_GEM_DOMAIN_GWS | |
383 | AMDGPU_GEM_DOMAIN_OA); | 381 | AMDGPU_GEM_DOMAIN_OA); |
384 | bo->allowed_domains = bo->preferred_domains; | 382 | bo->allowed_domains = bo->preferred_domains; |
385 | if (type != ttm_bo_type_kernel && | 383 | if (bp->type != ttm_bo_type_kernel && |
386 | bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | 384 | bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
387 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | 385 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
388 | 386 | ||
389 | bo->flags = flags; | 387 | bo->flags = bp->flags; |
390 | 388 | ||
391 | #ifdef CONFIG_X86_32 | 389 | #ifdef CONFIG_X86_32 |
392 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit | 390 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
@@ -417,11 +415,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, | |||
417 | #endif | 415 | #endif |
418 | 416 | ||
419 | bo->tbo.bdev = &adev->mman.bdev; | 417 | bo->tbo.bdev = &adev->mman.bdev; |
420 | amdgpu_ttm_placement_from_domain(bo, domain); | 418 | amdgpu_ttm_placement_from_domain(bo, bp->domain); |
421 | 419 | ||
422 | r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, | 420 | r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, |
423 | &bo->placement, page_align, &ctx, acc_size, | 421 | &bo->placement, page_align, &ctx, acc_size, |
424 | NULL, resv, &amdgpu_ttm_bo_destroy); | 422 | NULL, bp->resv, &amdgpu_ttm_bo_destroy); |
425 | if (unlikely(r != 0)) | 423 | if (unlikely(r != 0)) |
426 | return r; | 424 | return r; |
427 | 425 | ||
@@ -433,10 +431,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, | |||
433 | else | 431 | else |
434 | amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); | 432 | amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); |
435 | 433 | ||
436 | if (type == ttm_bo_type_kernel) | 434 | if (bp->type == ttm_bo_type_kernel) |
437 | bo->tbo.priority = 1; | 435 | bo->tbo.priority = 1; |
438 | 436 | ||
439 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && | 437 | if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
440 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { | 438 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { |
441 | struct dma_fence *fence; | 439 | struct dma_fence *fence; |
442 | 440 | ||
@@ -449,20 +447,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, | |||
449 | bo->tbo.moving = dma_fence_get(fence); | 447 | bo->tbo.moving = dma_fence_get(fence); |
450 | dma_fence_put(fence); | 448 | dma_fence_put(fence); |
451 | } | 449 | } |
452 | if (!resv) | 450 | if (!bp->resv) |
453 | amdgpu_bo_unreserve(bo); | 451 | amdgpu_bo_unreserve(bo); |
454 | *bo_ptr = bo; | 452 | *bo_ptr = bo; |
455 | 453 | ||
456 | trace_amdgpu_bo_create(bo); | 454 | trace_amdgpu_bo_create(bo); |
457 | 455 | ||
458 | /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ | 456 | /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ |
459 | if (type == ttm_bo_type_device) | 457 | if (bp->type == ttm_bo_type_device) |
460 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | 458 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
461 | 459 | ||
462 | return 0; | 460 | return 0; |
463 | 461 | ||
464 | fail_unreserve: | 462 | fail_unreserve: |
465 | if (!resv) | 463 | if (!bp->resv) |
466 | ww_mutex_unlock(&bo->tbo.resv->lock); | 464 | ww_mutex_unlock(&bo->tbo.resv->lock); |
467 | amdgpu_bo_unref(&bo); | 465 | amdgpu_bo_unref(&bo); |
468 | return r; | 466 | return r; |
@@ -472,16 +470,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
472 | unsigned long size, int byte_align, | 470 | unsigned long size, int byte_align, |
473 | struct amdgpu_bo *bo) | 471 | struct amdgpu_bo *bo) |
474 | { | 472 | { |
473 | struct amdgpu_bo_param bp = { | ||
474 | .size = size, | ||
475 | .byte_align = byte_align, | ||
476 | .domain = AMDGPU_GEM_DOMAIN_GTT, | ||
477 | .flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | | ||
478 | AMDGPU_GEM_CREATE_SHADOW, | ||
479 | .type = ttm_bo_type_kernel, | ||
480 | .resv = bo->tbo.resv | ||
481 | }; | ||
475 | int r; | 482 | int r; |
476 | 483 | ||
477 | if (bo->shadow) | 484 | if (bo->shadow) |
478 | return 0; | 485 | return 0; |
479 | 486 | ||
480 | r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT, | 487 | r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); |
481 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | | ||
482 | AMDGPU_GEM_CREATE_SHADOW, | ||
483 | ttm_bo_type_kernel, | ||
484 | bo->tbo.resv, &bo->shadow); | ||
485 | if (!r) { | 488 | if (!r) { |
486 | bo->shadow->parent = amdgpu_bo_ref(bo); | 489 | bo->shadow->parent = amdgpu_bo_ref(bo); |
487 | mutex_lock(&adev->shadow_list_lock); | 490 | mutex_lock(&adev->shadow_list_lock); |
@@ -498,11 +501,17 @@ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, | |||
498 | struct reservation_object *resv, | 501 | struct reservation_object *resv, |
499 | struct amdgpu_bo **bo_ptr) | 502 | struct amdgpu_bo **bo_ptr) |
500 | { | 503 | { |
501 | uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; | 504 | struct amdgpu_bo_param bp = { |
505 | .size = size, | ||
506 | .byte_align = byte_align, | ||
507 | .domain = domain, | ||
508 | .flags = flags & ~AMDGPU_GEM_CREATE_SHADOW, | ||
509 | .type = type, | ||
510 | .resv = resv | ||
511 | }; | ||
502 | int r; | 512 | int r; |
503 | 513 | ||
504 | r = amdgpu_bo_do_create(adev, size, byte_align, domain, | 514 | r = amdgpu_bo_do_create(adev, &bp, bo_ptr); |
505 | parent_flags, type, resv, bo_ptr); | ||
506 | if (r) | 515 | if (r) |
507 | return r; | 516 | return r; |
508 | 517 | ||