diff options
author | Alex Waterman <alexw@nvidia.com> | 2018-08-14 14:30:48 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-09-05 23:38:42 -0400 |
commit | b44c7fdb114a63ab98fffc0f246776b56399ff64 (patch) | |
tree | c523c2ea516aaed3b68271a77cf88ffa132e329d /drivers/gpu/nvgpu/os/linux | |
parent | ef851272e5201f343c9b287a9eacfc25d4912276 (diff) |
gpu: nvgpu: Move common DMA code to common/mm
This migrates the common DMA code (os agnostic) to the
common directory. This new unit will be the common DMA
allocator that lets users allocate SYSMEM, VIDMEM, or
either. Other units will be responsible for actually
handling the mechanics of allocating VIDMEM or SYSMEM.
Also update the names of the DMA related files so that
tmake doesn't complain about duplicate C file names. To
do this call the common DMA file dma.c and prepend the
OS to the other DMA files. So now we have:
common/mm/dma.c
os/posix/posix-dma.c
os/linux/linux-dma.c
JIRA NVGPU-990
Change-Id: I22d2d41803ad89be7d9c28f87864ce4fedf10836
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1799807
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux')
-rw-r--r-- | drivers/gpu/nvgpu/os/linux/linux-dma.c (renamed from drivers/gpu/nvgpu/os/linux/dma.c) | 173 |
1 files changed, 2 insertions, 171 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/dma.c b/drivers/gpu/nvgpu/os/linux/linux-dma.c index 77669493..a42e7cb5 100644 --- a/drivers/gpu/nvgpu/os/linux/dma.c +++ b/drivers/gpu/nvgpu/os/linux/linux-dma.c | |||
@@ -174,45 +174,6 @@ static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs, | |||
174 | #undef ATTR_ARG | 174 | #undef ATTR_ARG |
175 | } | 175 | } |
176 | 176 | ||
177 | int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem) | ||
178 | { | ||
179 | return nvgpu_dma_alloc_flags(g, 0, size, mem); | ||
180 | } | ||
181 | |||
182 | int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, | ||
183 | struct nvgpu_mem *mem) | ||
184 | { | ||
185 | if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { | ||
186 | /* | ||
187 | * Force the no-kernel-mapping flag on because we don't support | ||
188 | * the lack of it for vidmem - the user should not care when | ||
189 | * using nvgpu_gmmu_alloc_map and it's vidmem, or if there's a | ||
190 | * difference, the user should use the flag explicitly anyway. | ||
191 | * | ||
192 | * Incoming flags are ignored here, since bits other than the | ||
193 | * no-kernel-mapping flag are ignored by the vidmem mapping | ||
194 | * functions anyway. | ||
195 | */ | ||
196 | int err = nvgpu_dma_alloc_flags_vid(g, | ||
197 | NVGPU_DMA_NO_KERNEL_MAPPING, | ||
198 | size, mem); | ||
199 | |||
200 | if (!err) | ||
201 | return 0; | ||
202 | /* | ||
203 | * Fall back to sysmem (which may then also fail) in case | ||
204 | * vidmem is exhausted. | ||
205 | */ | ||
206 | } | ||
207 | |||
208 | return nvgpu_dma_alloc_flags_sys(g, flags, size, mem); | ||
209 | } | ||
210 | |||
211 | int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem) | ||
212 | { | ||
213 | return nvgpu_dma_alloc_flags_sys(g, 0, size, mem); | ||
214 | } | ||
215 | |||
216 | int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | 177 | int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, |
217 | size_t size, struct nvgpu_mem *mem) | 178 | size_t size, struct nvgpu_mem *mem) |
218 | { | 179 | { |
@@ -302,25 +263,6 @@ fail_free_dma: | |||
302 | return err; | 263 | return err; |
303 | } | 264 | } |
304 | 265 | ||
305 | int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem) | ||
306 | { | ||
307 | return nvgpu_dma_alloc_flags_vid(g, | ||
308 | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); | ||
309 | } | ||
310 | |||
311 | int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags, | ||
312 | size_t size, struct nvgpu_mem *mem) | ||
313 | { | ||
314 | return nvgpu_dma_alloc_flags_vid_at(g, flags, size, mem, 0); | ||
315 | } | ||
316 | |||
317 | int nvgpu_dma_alloc_vid_at(struct gk20a *g, | ||
318 | size_t size, struct nvgpu_mem *mem, u64 at) | ||
319 | { | ||
320 | return nvgpu_dma_alloc_flags_vid_at(g, | ||
321 | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem, at); | ||
322 | } | ||
323 | |||
324 | int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, | 266 | int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, |
325 | size_t size, struct nvgpu_mem *mem, u64 at) | 267 | size_t size, struct nvgpu_mem *mem, u64 at) |
326 | { | 268 | { |
@@ -405,97 +347,7 @@ fail_physfree: | |||
405 | #endif | 347 | #endif |
406 | } | 348 | } |
407 | 349 | ||
408 | int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size, | 350 | void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) |
409 | struct nvgpu_mem *mem) | ||
410 | { | ||
411 | return nvgpu_dma_alloc_map_flags(vm, 0, size, mem); | ||
412 | } | ||
413 | |||
414 | int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, | ||
415 | size_t size, struct nvgpu_mem *mem) | ||
416 | { | ||
417 | if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) { | ||
418 | /* | ||
419 | * Force the no-kernel-mapping flag on because we don't support | ||
420 | * the lack of it for vidmem - the user should not care when | ||
421 | * using nvgpu_dma_alloc_map and it's vidmem, or if there's a | ||
422 | * difference, the user should use the flag explicitly anyway. | ||
423 | */ | ||
424 | int err = nvgpu_dma_alloc_map_flags_vid(vm, | ||
425 | flags | NVGPU_DMA_NO_KERNEL_MAPPING, | ||
426 | size, mem); | ||
427 | |||
428 | if (!err) | ||
429 | return 0; | ||
430 | /* | ||
431 | * Fall back to sysmem (which may then also fail) in case | ||
432 | * vidmem is exhausted. | ||
433 | */ | ||
434 | } | ||
435 | |||
436 | return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem); | ||
437 | } | ||
438 | |||
439 | int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size, | ||
440 | struct nvgpu_mem *mem) | ||
441 | { | ||
442 | return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem); | ||
443 | } | ||
444 | |||
445 | int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, | ||
446 | size_t size, struct nvgpu_mem *mem) | ||
447 | { | ||
448 | int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem); | ||
449 | |||
450 | if (err) | ||
451 | return err; | ||
452 | |||
453 | mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0, | ||
454 | gk20a_mem_flag_none, false, | ||
455 | mem->aperture); | ||
456 | if (!mem->gpu_va) { | ||
457 | err = -ENOMEM; | ||
458 | goto fail_free; | ||
459 | } | ||
460 | |||
461 | return 0; | ||
462 | |||
463 | fail_free: | ||
464 | nvgpu_dma_free(vm->mm->g, mem); | ||
465 | return err; | ||
466 | } | ||
467 | |||
468 | int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size, | ||
469 | struct nvgpu_mem *mem) | ||
470 | { | ||
471 | return nvgpu_dma_alloc_map_flags_vid(vm, | ||
472 | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); | ||
473 | } | ||
474 | |||
475 | int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, | ||
476 | size_t size, struct nvgpu_mem *mem) | ||
477 | { | ||
478 | int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem); | ||
479 | |||
480 | if (err) | ||
481 | return err; | ||
482 | |||
483 | mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0, | ||
484 | gk20a_mem_flag_none, false, | ||
485 | mem->aperture); | ||
486 | if (!mem->gpu_va) { | ||
487 | err = -ENOMEM; | ||
488 | goto fail_free; | ||
489 | } | ||
490 | |||
491 | return 0; | ||
492 | |||
493 | fail_free: | ||
494 | nvgpu_dma_free(vm->mm->g, mem); | ||
495 | return err; | ||
496 | } | ||
497 | |||
498 | static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) | ||
499 | { | 351 | { |
500 | struct device *d = dev_from_gk20a(g); | 352 | struct device *d = dev_from_gk20a(g); |
501 | 353 | ||
@@ -551,7 +403,7 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) | |||
551 | mem->aperture = APERTURE_INVALID; | 403 | mem->aperture = APERTURE_INVALID; |
552 | } | 404 | } |
553 | 405 | ||
554 | static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) | 406 | void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) |
555 | { | 407 | { |
556 | #if defined(CONFIG_GK20A_VIDMEM) | 408 | #if defined(CONFIG_GK20A_VIDMEM) |
557 | size_t mem_size = mem->size; | 409 | size_t mem_size = mem->size; |
@@ -590,27 +442,6 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) | |||
590 | #endif | 442 | #endif |
591 | } | 443 | } |
592 | 444 | ||
593 | void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem) | ||
594 | { | ||
595 | switch (mem->aperture) { | ||
596 | case APERTURE_SYSMEM: | ||
597 | return nvgpu_dma_free_sys(g, mem); | ||
598 | case APERTURE_VIDMEM: | ||
599 | return nvgpu_dma_free_vid(g, mem); | ||
600 | default: | ||
601 | break; /* like free() on "null" memory */ | ||
602 | } | ||
603 | } | ||
604 | |||
605 | void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem) | ||
606 | { | ||
607 | if (mem->gpu_va) | ||
608 | nvgpu_gmmu_unmap(vm, mem, mem->gpu_va); | ||
609 | mem->gpu_va = 0; | ||
610 | |||
611 | nvgpu_dma_free(vm->mm->g, mem); | ||
612 | } | ||
613 | |||
614 | int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt, | 445 | int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt, |
615 | void *cpuva, u64 iova, size_t size, unsigned long flags) | 446 | void *cpuva, u64 iova, size_t size, unsigned long flags) |
616 | { | 447 | { |