diff options
author | Debarshi Dutta <ddutta@nvidia.com> | 2017-08-08 02:38:03 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-08-24 04:10:37 -0400 |
commit | 3fa47b877db1edc16018d662e7b9915d92354745 (patch) | |
tree | c1d9a8734e7d92b5ae647fbc3f582a01207a23f6 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |
parent | 8662fae334f2419da2e7fd220f7734217ec52433 (diff) |
gpu: nvgpu: Replace kref for refcounting in nvgpu
- added wrapper struct nvgpu_ref over nvgpu_atomic_t
- added nvgpu_ref_* APIs to access the above struct
JIRA NVGPU-140
Change-Id: Id47f897995dd4721751f7610b6d4d4fbfe4d6b9a
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1540899
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 2ce78cef..3030c170 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -811,7 +811,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | |||
811 | mapped_buffer = mapped_buffer_from_rbtree_node(node); | 811 | mapped_buffer = mapped_buffer_from_rbtree_node(node); |
812 | if (mapped_buffer->user_mapped) { | 812 | if (mapped_buffer->user_mapped) { |
813 | buffer_list[i] = mapped_buffer; | 813 | buffer_list[i] = mapped_buffer; |
814 | kref_get(&mapped_buffer->ref); | 814 | nvgpu_ref_get(&mapped_buffer->ref); |
815 | i++; | 815 | i++; |
816 | } | 816 | } |
817 | nvgpu_rbtree_enum_next(&node, node); | 817 | nvgpu_rbtree_enum_next(&node, node); |
@@ -827,7 +827,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | |||
827 | return 0; | 827 | return 0; |
828 | } | 828 | } |
829 | 829 | ||
830 | void gk20a_vm_unmap_locked_kref(struct kref *ref) | 830 | void gk20a_vm_unmap_locked_ref(struct nvgpu_ref *ref) |
831 | { | 831 | { |
832 | struct nvgpu_mapped_buf *mapped_buffer = | 832 | struct nvgpu_mapped_buf *mapped_buffer = |
833 | container_of(ref, struct nvgpu_mapped_buf, ref); | 833 | container_of(ref, struct nvgpu_mapped_buf, ref); |
@@ -849,8 +849,8 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | |||
849 | vm->kref_put_batch = &batch; | 849 | vm->kref_put_batch = &batch; |
850 | 850 | ||
851 | for (i = 0; i < num_buffers; ++i) | 851 | for (i = 0; i < num_buffers; ++i) |
852 | kref_put(&mapped_buffers[i]->ref, | 852 | nvgpu_ref_put(&mapped_buffers[i]->ref, |
853 | gk20a_vm_unmap_locked_kref); | 853 | gk20a_vm_unmap_locked_ref); |
854 | 854 | ||
855 | vm->kref_put_batch = NULL; | 855 | vm->kref_put_batch = NULL; |
856 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); | 856 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); |
@@ -882,8 +882,9 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
882 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, | 882 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, |
883 | NVGPU_TIMER_RETRY_TIMER); | 883 | NVGPU_TIMER_RETRY_TIMER); |
884 | do { | 884 | do { |
885 | if (atomic_read(&mapped_buffer->ref.refcount) == 1) | 885 | if (nvgpu_atomic_read( |
886 | break; | 886 | &mapped_buffer->ref.refcount) == 1) |
887 | break; | ||
887 | nvgpu_udelay(5); | 888 | nvgpu_udelay(5); |
888 | } while (!nvgpu_timeout_expired_msg(&timeout, | 889 | } while (!nvgpu_timeout_expired_msg(&timeout, |
889 | "sync-unmap failed on 0x%llx")); | 890 | "sync-unmap failed on 0x%llx")); |
@@ -902,7 +903,7 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
902 | vm->num_user_mapped_buffers--; | 903 | vm->num_user_mapped_buffers--; |
903 | 904 | ||
904 | vm->kref_put_batch = batch; | 905 | vm->kref_put_batch = batch; |
905 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); | 906 | nvgpu_ref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_ref); |
906 | vm->kref_put_batch = NULL; | 907 | vm->kref_put_batch = NULL; |
907 | 908 | ||
908 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 909 | nvgpu_mutex_release(&vm->update_gmmu_lock); |