aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-08-03 06:19:38 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:51 -0400
commitbb1e38a4bead5025ecca90544f0f733f59996b13 (patch)
tree4316a5e89dc9ae44fdd3d794a1dc0abcfdaf3293
parente40a31159b72742224c249cf57c5313be7ccd629 (diff)
drm/amdgpu: use kernel fence for last_pt_update
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <jammy.zhou@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
3 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5b8e1aeae13b..371ff0845989 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -539,7 +539,7 @@ struct amdgpu_bo_va_mapping {
539struct amdgpu_bo_va { 539struct amdgpu_bo_va {
540 /* protected by bo being reserved */ 540 /* protected by bo being reserved */
541 struct list_head bo_list; 541 struct list_head bo_list;
542 struct amdgpu_fence *last_pt_update; 542 struct fence *last_pt_update;
543 unsigned ref_count; 543 unsigned ref_count;
544 544
545 /* protected by vm mutex and spinlock */ 545 /* protected by vm mutex and spinlock */
@@ -1241,7 +1241,7 @@ union amdgpu_sched_job_param {
1241 struct amdgpu_vm *vm; 1241 struct amdgpu_vm *vm;
1242 uint64_t start; 1242 uint64_t start;
1243 uint64_t last; 1243 uint64_t last;
1244 struct amdgpu_fence **fence; 1244 struct fence **fence;
1245 1245
1246 } vm_mapping; 1246 } vm_mapping;
1247 struct { 1247 struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index fe81b46266d9..aee59110735f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -581,7 +581,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
581 if (r) 581 if (r)
582 return r; 582 return r;
583 583
584 f = &bo_va->last_pt_update->base; 584 f = bo_va->last_pt_update;
585 r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); 585 r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
586 if (r) 586 if (r)
587 return r; 587 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8745d4cc7ae0..d90254f5ca6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -737,7 +737,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
737 */ 737 */
738static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, 738static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
739 uint64_t start, uint64_t end, 739 uint64_t start, uint64_t end,
740 struct amdgpu_fence *fence) 740 struct fence *fence)
741{ 741{
742 unsigned i; 742 unsigned i;
743 743
@@ -745,20 +745,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
745 end >>= amdgpu_vm_block_size; 745 end >>= amdgpu_vm_block_size;
746 746
747 for (i = start; i <= end; ++i) 747 for (i = start; i <= end; ++i)
748 amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true); 748 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
749} 749}
750 750
751static int amdgpu_vm_bo_update_mapping_run_job( 751static int amdgpu_vm_bo_update_mapping_run_job(
752 struct amdgpu_cs_parser *sched_job) 752 struct amdgpu_cs_parser *sched_job)
753{ 753{
754 struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence; 754 struct fence **fence = sched_job->job_param.vm_mapping.fence;
755 amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm, 755 amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
756 sched_job->job_param.vm_mapping.start, 756 sched_job->job_param.vm_mapping.start,
757 sched_job->job_param.vm_mapping.last + 1, 757 sched_job->job_param.vm_mapping.last + 1,
758 sched_job->ibs[sched_job->num_ibs -1].fence); 758 &sched_job->ibs[sched_job->num_ibs -1].fence->base);
759 if (fence) { 759 if (fence) {
760 amdgpu_fence_unref(fence); 760 fence_put(*fence);
761 *fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence); 761 *fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
762 } 762 }
763 return 0; 763 return 0;
764} 764}
@@ -781,7 +781,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
781 struct amdgpu_vm *vm, 781 struct amdgpu_vm *vm,
782 struct amdgpu_bo_va_mapping *mapping, 782 struct amdgpu_bo_va_mapping *mapping,
783 uint64_t addr, uint32_t gtt_flags, 783 uint64_t addr, uint32_t gtt_flags,
784 struct amdgpu_fence **fence) 784 struct fence **fence)
785{ 785{
786 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 786 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
787 unsigned nptes, ncmds, ndw; 787 unsigned nptes, ncmds, ndw;
@@ -902,10 +902,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
902 } 902 }
903 903
904 amdgpu_vm_fence_pts(vm, mapping->it.start, 904 amdgpu_vm_fence_pts(vm, mapping->it.start,
905 mapping->it.last + 1, ib->fence); 905 mapping->it.last + 1, &ib->fence->base);
906 if (fence) { 906 if (fence) {
907 amdgpu_fence_unref(fence); 907 fence_put(*fence);
908 *fence = amdgpu_fence_ref(ib->fence); 908 *fence = fence_get(&ib->fence->base);
909 } 909 }
910 910
911 amdgpu_ib_free(adev, ib); 911 amdgpu_ib_free(adev, ib);
@@ -1038,7 +1038,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1038 spin_unlock(&vm->status_lock); 1038 spin_unlock(&vm->status_lock);
1039 1039
1040 if (bo_va) 1040 if (bo_va)
1041 r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base); 1041 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1042 1042
1043 return r; 1043 return r;
1044} 1044}
@@ -1318,7 +1318,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1318 kfree(mapping); 1318 kfree(mapping);
1319 } 1319 }
1320 1320
1321 amdgpu_fence_unref(&bo_va->last_pt_update); 1321 fence_put(bo_va->last_pt_update);
1322 kfree(bo_va); 1322 kfree(bo_va);
1323 1323
1324 mutex_unlock(&vm->mutex); 1324 mutex_unlock(&vm->mutex);