aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-04-03 08:16:07 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-04-04 23:34:09 -0400
commitc0e51931a1cf35be9f146b44e2b9e7ac575fa411 (patch)
tree9cfeb6ecfbea50589e51286d4fbd334c7df73ffe /drivers/gpu/drm/amd
parent641e94008970ef7dba764a3c672ec8bd40b9a533 (diff)
drm/amdgpu: cleanup coding style in amdgpu_vm_flush
Abort early if there is nothing todo and correctly indent the "if"s. Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c88
1 files changed, 45 insertions, 43 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5bedba30b434..99889fb486a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -597,60 +597,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
597 id->gws_size != job->gws_size || 597 id->gws_size != job->gws_size ||
598 id->oa_base != job->oa_base || 598 id->oa_base != job->oa_base ||
599 id->oa_size != job->oa_size); 599 id->oa_size != job->oa_size);
600 unsigned patch_offset = 0;
600 int r; 601 int r;
601 602
602 if (job->vm_needs_flush || gds_switch_needed || 603 if (!job->vm_needs_flush && !gds_switch_needed &&
603 amdgpu_vm_had_gpu_reset(adev, id) || 604 !amdgpu_vm_had_gpu_reset(adev, id) &&
604 amdgpu_vm_ring_has_compute_vm_bug(ring)) { 605 !amdgpu_vm_ring_has_compute_vm_bug(ring))
605 unsigned patch_offset = 0; 606 return 0;
606 607
607 if (ring->funcs->init_cond_exec)
608 patch_offset = amdgpu_ring_init_cond_exec(ring);
609 608
610 if (ring->funcs->emit_pipeline_sync && 609 if (ring->funcs->init_cond_exec)
611 (job->vm_needs_flush || gds_switch_needed || 610 patch_offset = amdgpu_ring_init_cond_exec(ring);
612 amdgpu_vm_ring_has_compute_vm_bug(ring)))
613 amdgpu_ring_emit_pipeline_sync(ring);
614 611
615 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || 612 if (ring->funcs->emit_pipeline_sync &&
616 amdgpu_vm_had_gpu_reset(adev, id))) { 613 (job->vm_needs_flush || gds_switch_needed ||
617 struct dma_fence *fence; 614 amdgpu_vm_ring_has_compute_vm_bug(ring)))
618 u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); 615 amdgpu_ring_emit_pipeline_sync(ring);
619 616
620 trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id); 617 if (ring->funcs->emit_vm_flush &&
621 amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); 618 (job->vm_needs_flush || amdgpu_vm_had_gpu_reset(adev, id))) {
622 619
623 r = amdgpu_fence_emit(ring, &fence); 620 u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
624 if (r) 621 struct dma_fence *fence;
625 return r;
626 622
627 mutex_lock(&adev->vm_manager.lock); 623 trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
628 dma_fence_put(id->last_flush); 624 amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
629 id->last_flush = fence;
630 mutex_unlock(&adev->vm_manager.lock);
631 }
632 625
633 if (gds_switch_needed) { 626 r = amdgpu_fence_emit(ring, &fence);
634 id->gds_base = job->gds_base; 627 if (r)
635 id->gds_size = job->gds_size; 628 return r;
636 id->gws_base = job->gws_base;
637 id->gws_size = job->gws_size;
638 id->oa_base = job->oa_base;
639 id->oa_size = job->oa_size;
640 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
641 job->gds_base, job->gds_size,
642 job->gws_base, job->gws_size,
643 job->oa_base, job->oa_size);
644 }
645 629
646 if (ring->funcs->patch_cond_exec) 630 mutex_lock(&adev->vm_manager.lock);
647 amdgpu_ring_patch_cond_exec(ring, patch_offset); 631 dma_fence_put(id->last_flush);
632 id->last_flush = fence;
633 mutex_unlock(&adev->vm_manager.lock);
634 }
648 635
649 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 636 if (gds_switch_needed) {
650 if (ring->funcs->emit_switch_buffer) { 637 id->gds_base = job->gds_base;
651 amdgpu_ring_emit_switch_buffer(ring); 638 id->gds_size = job->gds_size;
652 amdgpu_ring_emit_switch_buffer(ring); 639 id->gws_base = job->gws_base;
653 } 640 id->gws_size = job->gws_size;
641 id->oa_base = job->oa_base;
642 id->oa_size = job->oa_size;
643 amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
644 job->gds_size, job->gws_base,
645 job->gws_size, job->oa_base,
646 job->oa_size);
647 }
648
649 if (ring->funcs->patch_cond_exec)
650 amdgpu_ring_patch_cond_exec(ring, patch_offset);
651
652 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
653 if (ring->funcs->emit_switch_buffer) {
654 amdgpu_ring_emit_switch_buffer(ring);
655 amdgpu_ring_emit_switch_buffer(ring);
654 } 656 }
655 return 0; 657 return 0;
656} 658}