diff options
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 12 |
1 files changed, 4 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index e3ec3f10..f5ce886a 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -810,7 +810,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm, | |||
810 | static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset) | 810 | static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset) |
811 | { | 811 | { |
812 | struct device *d = dev_from_vm(vm); | 812 | struct device *d = dev_from_vm(vm); |
813 | int retries; | 813 | int retries = 1000; |
814 | struct mapped_buffer_node *mapped_buffer; | 814 | struct mapped_buffer_node *mapped_buffer; |
815 | 815 | ||
816 | mutex_lock(&vm->update_gmmu_lock); | 816 | mutex_lock(&vm->update_gmmu_lock); |
@@ -825,17 +825,13 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset) | |||
825 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | 825 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { |
826 | mutex_unlock(&vm->update_gmmu_lock); | 826 | mutex_unlock(&vm->update_gmmu_lock); |
827 | 827 | ||
828 | if (tegra_platform_is_silicon()) | 828 | while (retries >= 0 || !tegra_platform_is_silicon()) { |
829 | retries = 1000; | ||
830 | else | ||
831 | retries = 1000000; | ||
832 | while (retries) { | ||
833 | if (atomic_read(&mapped_buffer->ref.refcount) == 1) | 829 | if (atomic_read(&mapped_buffer->ref.refcount) == 1) |
834 | break; | 830 | break; |
835 | retries--; | 831 | retries--; |
836 | udelay(50); | 832 | udelay(5); |
837 | } | 833 | } |
838 | if (!retries) | 834 | if (retries < 0 && tegra_platform_is_silicon()) |
839 | gk20a_err(d, "sync-unmap failed on 0x%llx", | 835 | gk20a_err(d, "sync-unmap failed on 0x%llx", |
840 | offset); | 836 | offset); |
841 | mutex_lock(&vm->update_gmmu_lock); | 837 | mutex_lock(&vm->update_gmmu_lock); |