summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm_area.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm_area.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c34
1 files changed, 22 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index 5a28b7bc..b8fecbfc 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -34,8 +34,9 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr)
34 nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list, 34 nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list,
35 nvgpu_vm_area, vm_area_list) { 35 nvgpu_vm_area, vm_area_list) {
36 if (addr >= vm_area->addr && 36 if (addr >= vm_area->addr &&
37 addr < (u64)vm_area->addr + (u64)vm_area->size) 37 addr < (u64)vm_area->addr + (u64)vm_area->size) {
38 return vm_area; 38 return vm_area;
39 }
39 } 40 }
40 41
41 return NULL; 42 return NULL;
@@ -105,12 +106,14 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
105 page_size, pages, *addr, flags); 106 page_size, pages, *addr, flags);
106 107
107 for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) { 108 for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) {
108 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) 109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) {
109 break; 110 break;
111 }
110 } 112 }
111 113
112 if (pgsz_idx > gmmu_page_size_big) 114 if (pgsz_idx > gmmu_page_size_big) {
113 return -EINVAL; 115 return -EINVAL;
116 }
114 117
115 /* 118 /*
116 * pgsz_idx isn't likely to get too crazy, since it starts at 0 and 119 * pgsz_idx isn't likely to get too crazy, since it starts at 0 and
@@ -119,26 +122,30 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
119 */ 122 */
120 nvgpu_speculation_barrier(); 123 nvgpu_speculation_barrier();
121 124
122 if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) 125 if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) {
123 return -EINVAL; 126 return -EINVAL;
127 }
124 128
125 vm_area = nvgpu_kzalloc(g, sizeof(*vm_area)); 129 vm_area = nvgpu_kzalloc(g, sizeof(*vm_area));
126 if (!vm_area) 130 if (!vm_area) {
127 goto clean_up_err; 131 goto clean_up_err;
132 }
128 133
129 vma = vm->vma[pgsz_idx]; 134 vma = vm->vma[pgsz_idx];
130 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) 135 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) {
131 vaddr_start = nvgpu_alloc_fixed(vma, *addr, 136 vaddr_start = nvgpu_alloc_fixed(vma, *addr,
132 (u64)pages * 137 (u64)pages *
133 (u64)page_size, 138 (u64)page_size,
134 page_size); 139 page_size);
135 else 140 } else {
136 vaddr_start = nvgpu_alloc(vma, 141 vaddr_start = nvgpu_alloc(vma,
137 (u64)pages * 142 (u64)pages *
138 (u64)page_size); 143 (u64)page_size);
144 }
139 145
140 if (!vaddr_start) 146 if (!vaddr_start) {
141 goto clean_up_err; 147 goto clean_up_err;
148 }
142 149
143 vm_area->flags = flags; 150 vm_area->flags = flags;
144 vm_area->addr = vaddr_start; 151 vm_area->addr = vaddr_start;
@@ -179,10 +186,12 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
179 return 0; 186 return 0;
180 187
181clean_up_err: 188clean_up_err:
182 if (vaddr_start) 189 if (vaddr_start) {
183 nvgpu_free(vma, vaddr_start); 190 nvgpu_free(vma, vaddr_start);
184 if (vm_area) 191 }
192 if (vm_area) {
185 nvgpu_kfree(g, vm_area); 193 nvgpu_kfree(g, vm_area);
194 }
186 return -ENOMEM; 195 return -ENOMEM;
187} 196}
188 197
@@ -219,7 +228,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr)
219 } 228 }
220 229
221 /* if this was a sparse mapping, free the va */ 230 /* if this was a sparse mapping, free the va */
222 if (vm_area->sparse) 231 if (vm_area->sparse) {
223 g->ops.mm.gmmu_unmap(vm, 232 g->ops.mm.gmmu_unmap(vm,
224 vm_area->addr, 233 vm_area->addr,
225 vm_area->size, 234 vm_area->size,
@@ -228,6 +237,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr)
228 gk20a_mem_flag_none, 237 gk20a_mem_flag_none,
229 true, 238 true,
230 NULL); 239 NULL);
240 }
231 241
232 nvgpu_mutex_release(&vm->update_gmmu_lock); 242 nvgpu_mutex_release(&vm->update_gmmu_lock);
233 243