diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vm.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/vm.c | 76 |
1 files changed, 65 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c index 58e2da13..86d8bec9 100644 --- a/drivers/gpu/nvgpu/common/linux/vm.c +++ b/drivers/gpu/nvgpu/common/linux/vm.c | |||
@@ -177,11 +177,46 @@ static u64 __nvgpu_vm_find_mapping(struct vm_gk20a *vm, | |||
177 | return mapped_buffer->addr; | 177 | return mapped_buffer->addr; |
178 | } | 178 | } |
179 | 179 | ||
180 | static int setup_bfr_kind_fields(struct buffer_attrs *bfr, s16 compr_kind, | ||
181 | s16 incompr_kind, u32 flags) | ||
182 | { | ||
183 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) { | ||
184 | /* were we supplied with a kind in either parameter? */ | ||
185 | if ((compr_kind < 0 || compr_kind >= NV_KIND_ATTR_SIZE) && | ||
186 | (incompr_kind < 0 || incompr_kind >= NV_KIND_ATTR_SIZE)) | ||
187 | return -EINVAL; | ||
188 | |||
189 | if (compr_kind != NV_KIND_INVALID) { | ||
190 | bfr->use_kind_v = true; | ||
191 | bfr->kind_v = (u8)compr_kind; | ||
192 | } | ||
193 | |||
194 | if (incompr_kind != NV_KIND_INVALID) { | ||
195 | bfr->use_uc_kind_v = true; | ||
196 | bfr->uc_kind_v = (u8)incompr_kind; | ||
197 | } | ||
198 | } else { | ||
199 | if (compr_kind < 0 || compr_kind >= NV_KIND_ATTR_SIZE) | ||
200 | return -EINVAL; | ||
201 | |||
202 | bfr->use_kind_v = true; | ||
203 | bfr->kind_v = (u8)compr_kind; | ||
204 | |||
205 | /* | ||
206 | * Note: setup_buffer_kind_and_compression() will | ||
207 | * figure out uc_kind_v or return an error | ||
208 | */ | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
180 | u64 nvgpu_vm_map(struct vm_gk20a *vm, | 214 | u64 nvgpu_vm_map(struct vm_gk20a *vm, |
181 | struct dma_buf *dmabuf, | 215 | struct dma_buf *dmabuf, |
182 | u64 offset_align, | 216 | u64 offset_align, |
183 | u32 flags, | 217 | u32 flags, |
184 | int kind, | 218 | s16 compr_kind, |
219 | s16 incompr_kind, | ||
185 | bool user_mapped, | 220 | bool user_mapped, |
186 | int rw_flag, | 221 | int rw_flag, |
187 | u64 buffer_offset, | 222 | u64 buffer_offset, |
@@ -203,6 +238,22 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, | |||
203 | u32 ctag_offset; | 238 | u32 ctag_offset; |
204 | enum nvgpu_aperture aperture; | 239 | enum nvgpu_aperture aperture; |
205 | 240 | ||
241 | /* | ||
242 | * The kind used as part of the key for map caching. HW may | ||
243 | * actually be programmed with the fallback kind in case the | ||
244 | * key kind is compressible but we're out of comptags. | ||
245 | */ | ||
246 | s16 map_key_kind; | ||
247 | |||
248 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) { | ||
249 | if (compr_kind != NV_KIND_INVALID) | ||
250 | map_key_kind = compr_kind; | ||
251 | else | ||
252 | map_key_kind = incompr_kind; | ||
253 | } else { | ||
254 | map_key_kind = compr_kind; | ||
255 | } | ||
256 | |||
206 | if (user_mapped && vm->userspace_managed && | 257 | if (user_mapped && vm->userspace_managed && |
207 | !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { | 258 | !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { |
208 | nvgpu_err(g, "non-fixed-offset mapping not available on " | 259 | nvgpu_err(g, "non-fixed-offset mapping not available on " |
@@ -216,7 +267,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, | |||
216 | if (!vm->userspace_managed) { | 267 | if (!vm->userspace_managed) { |
217 | map_offset = __nvgpu_vm_find_mapping( | 268 | map_offset = __nvgpu_vm_find_mapping( |
218 | vm, dmabuf, offset_align, | 269 | vm, dmabuf, offset_align, |
219 | flags, kind, | 270 | flags, map_key_kind, |
220 | user_mapped, rw_flag); | 271 | user_mapped, rw_flag); |
221 | if (map_offset) { | 272 | if (map_offset) { |
222 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 273 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
@@ -239,12 +290,10 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, | |||
239 | goto clean_up; | 290 | goto clean_up; |
240 | } | 291 | } |
241 | 292 | ||
242 | if (kind >= NV_KIND_ATTR_SIZE) { | 293 | err = setup_bfr_kind_fields(&bfr, compr_kind, incompr_kind, flags); |
243 | err = -EINVAL; | 294 | if (err) |
244 | goto clean_up; | 295 | goto clean_up; |
245 | } else { | 296 | |
246 | bfr.kind_v = (u8)kind; | ||
247 | } | ||
248 | bfr.size = dmabuf->size; | 297 | bfr.size = dmabuf->size; |
249 | sgl = bfr.sgt->sgl; | 298 | sgl = bfr.sgt->sgl; |
250 | 299 | ||
@@ -306,10 +355,15 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, | |||
306 | err = gk20a_alloc_comptags(g, dev, dmabuf, | 355 | err = gk20a_alloc_comptags(g, dev, dmabuf, |
307 | ctag_allocator, | 356 | ctag_allocator, |
308 | bfr.ctag_lines); | 357 | bfr.ctag_lines); |
309 | if (err) { | 358 | if (unlikely(err)) { |
310 | /* ok to fall back here if we ran out */ | ||
311 | /* TBD: we can partially alloc ctags as well... */ | 359 | /* TBD: we can partially alloc ctags as well... */ |
312 | bfr.kind_v = bfr.uc_kind_v; | 360 | if (bfr.use_uc_kind_v) { |
361 | /* no comptags, but fallback kind available */ | ||
362 | bfr.kind_v = bfr.uc_kind_v; | ||
363 | } else { | ||
364 | nvgpu_err(g, "comptag alloc failed and no fallback kind specified"); | ||
365 | goto clean_up; | ||
366 | } | ||
313 | } else { | 367 | } else { |
314 | gk20a_get_comptags(dev, | 368 | gk20a_get_comptags(dev, |
315 | dmabuf, &comptags); | 369 | dmabuf, &comptags); |
@@ -371,7 +425,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, | |||
371 | mapped_buffer->ctag_allocated_lines = bfr.ctag_allocated_lines; | 425 | mapped_buffer->ctag_allocated_lines = bfr.ctag_allocated_lines; |
372 | mapped_buffer->vm = vm; | 426 | mapped_buffer->vm = vm; |
373 | mapped_buffer->flags = flags; | 427 | mapped_buffer->flags = flags; |
374 | mapped_buffer->kind = kind; | 428 | mapped_buffer->kind = map_key_kind; |
375 | mapped_buffer->va_allocated = va_allocated; | 429 | mapped_buffer->va_allocated = va_allocated; |
376 | mapped_buffer->user_mapped = user_mapped ? 1 : 0; | 430 | mapped_buffer->user_mapped = user_mapped ? 1 : 0; |
377 | mapped_buffer->own_mem_ref = user_mapped; | 431 | mapped_buffer->own_mem_ref = user_mapped; |