aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-04-26 10:44:41 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-12-06 12:48:04 -0500
commitdfb8fa9828dba4a83545c8d854bb6bc003a95d95 (patch)
treed1776097e0c7298e2a6fdf2d48703d5e8a8517fa
parent3f3a7c8259312084291859d3b623db4317365a07 (diff)
drm/amdgpu: forward operation context to ttm_bo_mem_space
This way we can finally use some more stats. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de> Tested-by: Michel Dänzer <michel.daenzer@amd.com> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c30
1 files changed, 12 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0e90f64c2c09..3385694a97cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -466,12 +466,10 @@ error:
466 return r; 466 return r;
467} 467}
468 468
469static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, 469static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
470 bool evict, bool interruptible, 470 struct ttm_operation_ctx *ctx,
471 bool no_wait_gpu,
472 struct ttm_mem_reg *new_mem) 471 struct ttm_mem_reg *new_mem)
473{ 472{
474 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
475 struct amdgpu_device *adev; 473 struct amdgpu_device *adev;
476 struct ttm_mem_reg *old_mem = &bo->mem; 474 struct ttm_mem_reg *old_mem = &bo->mem;
477 struct ttm_mem_reg tmp_mem; 475 struct ttm_mem_reg tmp_mem;
@@ -489,7 +487,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
489 placements.fpfn = 0; 487 placements.fpfn = 0;
490 placements.lpfn = 0; 488 placements.lpfn = 0;
491 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 489 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
492 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); 490 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
493 if (unlikely(r)) { 491 if (unlikely(r)) {
494 return r; 492 return r;
495 } 493 }
@@ -503,22 +501,20 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
503 if (unlikely(r)) { 501 if (unlikely(r)) {
504 goto out_cleanup; 502 goto out_cleanup;
505 } 503 }
506 r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); 504 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
507 if (unlikely(r)) { 505 if (unlikely(r)) {
508 goto out_cleanup; 506 goto out_cleanup;
509 } 507 }
510 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); 508 r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem);
511out_cleanup: 509out_cleanup:
512 ttm_bo_mem_put(bo, &tmp_mem); 510 ttm_bo_mem_put(bo, &tmp_mem);
513 return r; 511 return r;
514} 512}
515 513
516static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, 514static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
517 bool evict, bool interruptible, 515 struct ttm_operation_ctx *ctx,
518 bool no_wait_gpu,
519 struct ttm_mem_reg *new_mem) 516 struct ttm_mem_reg *new_mem)
520{ 517{
521 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
522 struct amdgpu_device *adev; 518 struct amdgpu_device *adev;
523 struct ttm_mem_reg *old_mem = &bo->mem; 519 struct ttm_mem_reg *old_mem = &bo->mem;
524 struct ttm_mem_reg tmp_mem; 520 struct ttm_mem_reg tmp_mem;
@@ -536,15 +532,15 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
536 placements.fpfn = 0; 532 placements.fpfn = 0;
537 placements.lpfn = 0; 533 placements.lpfn = 0;
538 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 534 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
539 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); 535 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
540 if (unlikely(r)) { 536 if (unlikely(r)) {
541 return r; 537 return r;
542 } 538 }
543 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); 539 r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem);
544 if (unlikely(r)) { 540 if (unlikely(r)) {
545 goto out_cleanup; 541 goto out_cleanup;
546 } 542 }
547 r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); 543 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
548 if (unlikely(r)) { 544 if (unlikely(r)) {
549 goto out_cleanup; 545 goto out_cleanup;
550 } 546 }
@@ -590,12 +586,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
590 586
591 if (old_mem->mem_type == TTM_PL_VRAM && 587 if (old_mem->mem_type == TTM_PL_VRAM &&
592 new_mem->mem_type == TTM_PL_SYSTEM) { 588 new_mem->mem_type == TTM_PL_SYSTEM) {
593 r = amdgpu_move_vram_ram(bo, evict, ctx->interruptible, 589 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
594 ctx->no_wait_gpu, new_mem);
595 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 590 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
596 new_mem->mem_type == TTM_PL_VRAM) { 591 new_mem->mem_type == TTM_PL_VRAM) {
597 r = amdgpu_move_ram_vram(bo, evict, ctx->interruptible, 592 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
598 ctx->no_wait_gpu, new_mem);
599 } else { 593 } else {
600 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, 594 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
601 new_mem, old_mem); 595 new_mem, old_mem);