summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 3d9dd174..3b682e28 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -75,8 +75,9 @@ int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm)
75 * heirarchy: the last level is PTEs so we really want the level 75 * heirarchy: the last level is PTEs so we really want the level
76 * before that which is the last level of PDEs. 76 * before that which is the last level of PDEs.
77 */ 77 */
78 while (vm->mmu_levels[final_pde_level + 2].update_entry) 78 while (vm->mmu_levels[final_pde_level + 2].update_entry) {
79 final_pde_level++; 79 final_pde_level++;
80 }
80 81
81 return vm->mmu_levels[final_pde_level].lo_bit[0]; 82 return vm->mmu_levels[final_pde_level].lo_bit[0];
82} 83}
@@ -93,9 +94,10 @@ static void __nvgpu_vm_free_entries(struct vm_gk20a *vm,
93 } 94 }
94 95
95 if (pd->entries) { 96 if (pd->entries) {
96 for (i = 0; i < pd->num_entries; i++) 97 for (i = 0; i < pd->num_entries; i++) {
97 __nvgpu_vm_free_entries(vm, &pd->entries[i], 98 __nvgpu_vm_free_entries(vm, &pd->entries[i],
98 level + 1); 99 level + 1);
100 }
99 nvgpu_vfree(vm->mm->g, pd->entries); 101 nvgpu_vfree(vm->mm->g, pd->entries);
100 pd->entries = NULL; 102 pd->entries = NULL;
101 } 103 }
@@ -112,8 +114,9 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
112 if (!pdb->entries) 114 if (!pdb->entries)
113 return; 115 return;
114 116
115 for (i = 0; i < pdb->num_entries; i++) 117 for (i = 0; i < pdb->num_entries; i++) {
116 __nvgpu_vm_free_entries(vm, &pdb->entries[i], 1); 118 __nvgpu_vm_free_entries(vm, &pdb->entries[i], 1);
119 }
117 120
118 nvgpu_vfree(g, pdb->entries); 121 nvgpu_vfree(g, pdb->entries);
119 pdb->entries = NULL; 122 pdb->entries = NULL;
@@ -750,8 +753,9 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
750 nvgpu_vm_mapping_batch_start(&batch); 753 nvgpu_vm_mapping_batch_start(&batch);
751 vm->kref_put_batch = &batch; 754 vm->kref_put_batch = &batch;
752 755
753 for (i = 0; i < num_buffers; ++i) 756 for (i = 0; i < num_buffers; ++i) {
754 nvgpu_ref_put(&mapped_buffers[i]->ref, __nvgpu_vm_unmap_ref); 757 nvgpu_ref_put(&mapped_buffers[i]->ref, __nvgpu_vm_unmap_ref);
758 }
755 759
756 vm->kref_put_batch = NULL; 760 vm->kref_put_batch = NULL;
757 nvgpu_vm_mapping_batch_finish_locked(vm, &batch); 761 nvgpu_vm_mapping_batch_finish_locked(vm, &batch);