summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/dma.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-08-14 14:30:48 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 23:38:42 -0400
commitb44c7fdb114a63ab98fffc0f246776b56399ff64 (patch)
treec523c2ea516aaed3b68271a77cf88ffa132e329d /drivers/gpu/nvgpu/os/linux/dma.c
parentef851272e5201f343c9b287a9eacfc25d4912276 (diff)
gpu: nvgpu: Move common DMA code to common/mm
This migrates the common DMA code (os agnostic) to the common directory. This new unit will be the common DMA allocator that lets users allocate SYSMEM, VIDMEM, or either. Other units will be responsible for actually handling the mechanics of allocating VIDMEM or SYSMEM. Also update the names of the DMA related files so that tmake doesn't complain about duplicate C file names. To do this call the common DMA file dma.c and prepend the OS to the other DMA files. So now we have: common/mm/dma.c os/posix/posix-dma.c os/linux/linux-dma.c JIRA NVGPU-990 Change-Id: I22d2d41803ad89be7d9c28f87864ce4fedf10836 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1799807 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/dma.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/dma.c702
1 files changed, 0 insertions, 702 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/dma.c b/drivers/gpu/nvgpu/os/linux/dma.c
deleted file mode 100644
index 77669493..00000000
--- a/drivers/gpu/nvgpu/os/linux/dma.c
+++ /dev/null
@@ -1,702 +0,0 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/dma-mapping.h>
18#include <linux/version.h>
19
20#include <nvgpu/log.h>
21#include <nvgpu/dma.h>
22#include <nvgpu/lock.h>
23#include <nvgpu/bug.h>
24#include <nvgpu/gmmu.h>
25#include <nvgpu/kmem.h>
26#include <nvgpu/enabled.h>
27#include <nvgpu/vidmem.h>
28
29#include <nvgpu/linux/dma.h>
30
31#include "gk20a/gk20a.h"
32
33#include "platform_gk20a.h"
34#include "os_linux.h"
35#include "dmabuf_vidmem.h"
36
37#ifdef __DMA_ATTRS_LONGS
38#define NVGPU_DEFINE_DMA_ATTRS(x) \
39 struct dma_attrs x = { \
40 .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \
41 }
42#define NVGPU_DMA_ATTR(attrs) &attrs
43#else
44#define NVGPU_DEFINE_DMA_ATTRS(attrs) unsigned long attrs = 0
45#define NVGPU_DMA_ATTR(attrs) attrs
46#endif
47
48/*
49 * Enough to hold all the possible flags in string form. When a new flag is
50 * added it must be added here as well!!
51 */
52#define NVGPU_DMA_STR_SIZE \
53 sizeof("NO_KERNEL_MAPPING FORCE_CONTIGUOUS")
54
55/*
56 * The returned string is kmalloc()ed here but must be freed by the caller.
57 */
58static char *nvgpu_dma_flags_to_str(struct gk20a *g, unsigned long flags)
59{
60 char *buf = nvgpu_kzalloc(g, NVGPU_DMA_STR_SIZE);
61 int bytes_available = NVGPU_DMA_STR_SIZE;
62
63 /*
64 * Return the empty buffer if there's no flags. Makes it easier on the
65 * calling code to just print it instead of any if (NULL) type logic.
66 */
67 if (!flags)
68 return buf;
69
70#define APPEND_FLAG(flag, str_flag) \
71 do { \
72 if (flags & flag) { \
73 strncat(buf, str_flag, bytes_available); \
74 bytes_available -= strlen(str_flag); \
75 } \
76 } while (0)
77
78 APPEND_FLAG(NVGPU_DMA_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING ");
79 APPEND_FLAG(NVGPU_DMA_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS ");
80#undef APPEND_FLAG
81
82 return buf;
83}
84
85/**
86 * __dma_dbg - Debug print for DMA allocs and frees.
87 *
88 * @g - The GPU.
89 * @size - The requested size of the alloc (size_t).
90 * @flags - The flags (unsigned long).
91 * @type - A string describing the type (i.e: sysmem or vidmem).
92 * @what - A string with 'alloc' or 'free'.
93 *
94 * @flags is the DMA flags. If there are none or it doesn't make sense to print
95 * flags just pass 0.
96 *
97 * Please use dma_dbg_alloc() and dma_dbg_free() instead of this function.
98 */
99static void __dma_dbg(struct gk20a *g, size_t size, unsigned long flags,
100 const char *type, const char *what,
101 const char *func, int line)
102{
103 char *flags_str = NULL;
104
105 /*
106 * Don't bother making the flags_str if debugging is
107 * not enabled. This saves a malloc and a free.
108 */
109 if (!nvgpu_log_mask_enabled(g, gpu_dbg_dma))
110 return;
111
112 flags_str = nvgpu_dma_flags_to_str(g, flags);
113
114 __nvgpu_log_dbg(g, gpu_dbg_dma,
115 func, line,
116 "DMA %s: [%s] size=%-7zu "
117 "aligned=%-7zu total=%-10llukB %s",
118 what, type,
119 size, PAGE_ALIGN(size),
120 g->dma_memory_used >> 10,
121 flags_str);
122
123 if (flags_str)
124 nvgpu_kfree(g, flags_str);
125}
126
127#define dma_dbg_alloc(g, size, flags, type) \
128 __dma_dbg(g, size, flags, type, "alloc", __func__, __LINE__)
129#define dma_dbg_free(g, size, flags, type) \
130 __dma_dbg(g, size, flags, type, "free", __func__, __LINE__)
131
132/*
133 * For after the DMA alloc is done.
134 */
135#define __dma_dbg_done(g, size, type, what) \
136 nvgpu_log(g, gpu_dbg_dma, \
137 "DMA %s: [%s] size=%-7zu Done!", \
138 what, type, size); \
139
140#define dma_dbg_alloc_done(g, size, type) \
141 __dma_dbg_done(g, size, type, "alloc")
142#define dma_dbg_free_done(g, size, type) \
143 __dma_dbg_done(g, size, type, "free")
144
145#if defined(CONFIG_GK20A_VIDMEM)
146static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at,
147 size_t size)
148{
149 u64 addr = 0;
150
151 if (at)
152 addr = nvgpu_alloc_fixed(allocator, at, size, 0);
153 else
154 addr = nvgpu_alloc(allocator, size);
155
156 return addr;
157}
158#endif
159
160#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
161static void nvgpu_dma_flags_to_attrs(unsigned long *attrs,
162 unsigned long flags)
163#define ATTR_ARG(x) *x
164#else
165static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs,
166 unsigned long flags)
167#define ATTR_ARG(x) x
168#endif
169{
170 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
171 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, ATTR_ARG(attrs));
172 if (flags & NVGPU_DMA_FORCE_CONTIGUOUS)
173 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, ATTR_ARG(attrs));
174#undef ATTR_ARG
175}
176
177int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
178{
179 return nvgpu_dma_alloc_flags(g, 0, size, mem);
180}
181
182int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
183 struct nvgpu_mem *mem)
184{
185 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) {
186 /*
187 * Force the no-kernel-mapping flag on because we don't support
188 * the lack of it for vidmem - the user should not care when
189 * using nvgpu_gmmu_alloc_map and it's vidmem, or if there's a
190 * difference, the user should use the flag explicitly anyway.
191 *
192 * Incoming flags are ignored here, since bits other than the
193 * no-kernel-mapping flag are ignored by the vidmem mapping
194 * functions anyway.
195 */
196 int err = nvgpu_dma_alloc_flags_vid(g,
197 NVGPU_DMA_NO_KERNEL_MAPPING,
198 size, mem);
199
200 if (!err)
201 return 0;
202 /*
203 * Fall back to sysmem (which may then also fail) in case
204 * vidmem is exhausted.
205 */
206 }
207
208 return nvgpu_dma_alloc_flags_sys(g, flags, size, mem);
209}
210
211int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
212{
213 return nvgpu_dma_alloc_flags_sys(g, 0, size, mem);
214}
215
216int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
217 size_t size, struct nvgpu_mem *mem)
218{
219 struct device *d = dev_from_gk20a(g);
220 int err;
221 dma_addr_t iova;
222 NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
223 void *alloc_ret;
224
225 if (nvgpu_mem_is_valid(mem)) {
226 nvgpu_warn(g, "memory leak !!");
227 WARN_ON(1);
228 }
229
230 /*
231 * WAR for IO coherent chips: the DMA API does not seem to generate
232 * mappings that work correctly. Unclear why - Bug ID: 2040115.
233 *
234 * Basically we just tell the DMA API not to map with NO_KERNEL_MAPPING
235 * and then make a vmap() ourselves.
236 */
237 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
238 flags |= NVGPU_DMA_NO_KERNEL_MAPPING;
239
240 /*
241 * Before the debug print so we see this in the total. But during
242 * cleanup in the fail path this has to be subtracted.
243 */
244 g->dma_memory_used += PAGE_ALIGN(size);
245
246 dma_dbg_alloc(g, size, flags, "sysmem");
247
248 /*
249 * Save the old size but for actual allocation purposes the size is
250 * going to be page aligned.
251 */
252 mem->size = size;
253 size = PAGE_ALIGN(size);
254
255 nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
256
257 alloc_ret = dma_alloc_attrs(d, size, &iova,
258 GFP_KERNEL|__GFP_ZERO,
259 NVGPU_DMA_ATTR(dma_attrs));
260 if (!alloc_ret)
261 return -ENOMEM;
262
263 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
264 mem->priv.pages = alloc_ret;
265 err = nvgpu_get_sgtable_from_pages(g, &mem->priv.sgt,
266 mem->priv.pages,
267 iova, size);
268 } else {
269 mem->cpu_va = alloc_ret;
270 err = nvgpu_get_sgtable_attrs(g, &mem->priv.sgt, mem->cpu_va,
271 iova, size, flags);
272 }
273 if (err)
274 goto fail_free_dma;
275
276 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) {
277 mem->cpu_va = vmap(mem->priv.pages,
278 size >> PAGE_SHIFT,
279 0, PAGE_KERNEL);
280 if (!mem->cpu_va) {
281 err = -ENOMEM;
282 goto fail_free_sgt;
283 }
284 }
285
286 mem->aligned_size = size;
287 mem->aperture = APERTURE_SYSMEM;
288 mem->priv.flags = flags;
289
290 dma_dbg_alloc_done(g, mem->size, "sysmem");
291
292 return 0;
293
294fail_free_sgt:
295 nvgpu_free_sgtable(g, &mem->priv.sgt);
296fail_free_dma:
297 dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs));
298 mem->cpu_va = NULL;
299 mem->priv.sgt = NULL;
300 mem->size = 0;
301 g->dma_memory_used -= mem->aligned_size;
302 return err;
303}
304
305int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
306{
307 return nvgpu_dma_alloc_flags_vid(g,
308 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
309}
310
311int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
312 size_t size, struct nvgpu_mem *mem)
313{
314 return nvgpu_dma_alloc_flags_vid_at(g, flags, size, mem, 0);
315}
316
317int nvgpu_dma_alloc_vid_at(struct gk20a *g,
318 size_t size, struct nvgpu_mem *mem, u64 at)
319{
320 return nvgpu_dma_alloc_flags_vid_at(g,
321 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem, at);
322}
323
324int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
325 size_t size, struct nvgpu_mem *mem, u64 at)
326{
327#if defined(CONFIG_GK20A_VIDMEM)
328 u64 addr;
329 int err;
330 struct nvgpu_allocator *vidmem_alloc = g->mm.vidmem.cleared ?
331 &g->mm.vidmem.allocator :
332 &g->mm.vidmem.bootstrap_allocator;
333 u64 before_pending;
334
335 if (nvgpu_mem_is_valid(mem)) {
336 nvgpu_warn(g, "memory leak !!");
337 WARN_ON(1);
338 }
339
340 dma_dbg_alloc(g, size, flags, "vidmem");
341
342 mem->size = size;
343 size = PAGE_ALIGN(size);
344
345 if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
346 return -ENOSYS;
347
348 /*
349 * Our own allocator doesn't have any flags yet, and we can't
350 * kernel-map these, so require explicit flags.
351 */
352 WARN_ON(flags != NVGPU_DMA_NO_KERNEL_MAPPING);
353
354 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
355 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending.atomic_var);
356 addr = __nvgpu_dma_alloc(vidmem_alloc, at, size);
357 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
358 if (!addr) {
359 /*
360 * If memory is known to be freed soon, let the user know that
361 * it may be available after a while.
362 */
363 if (before_pending)
364 return -EAGAIN;
365 else
366 return -ENOMEM;
367 }
368
369 if (at)
370 mem->mem_flags |= NVGPU_MEM_FLAG_FIXED;
371
372 mem->priv.sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
373 if (!mem->priv.sgt) {
374 err = -ENOMEM;
375 goto fail_physfree;
376 }
377
378 err = sg_alloc_table(mem->priv.sgt, 1, GFP_KERNEL);
379 if (err)
380 goto fail_kfree;
381
382 nvgpu_vidmem_set_page_alloc(mem->priv.sgt->sgl, addr);
383 sg_set_page(mem->priv.sgt->sgl, NULL, size, 0);
384
385 mem->aligned_size = size;
386 mem->aperture = APERTURE_VIDMEM;
387 mem->vidmem_alloc = (struct nvgpu_page_alloc *)(uintptr_t)addr;
388 mem->allocator = vidmem_alloc;
389 mem->priv.flags = flags;
390
391 nvgpu_init_list_node(&mem->clear_list_entry);
392
393 dma_dbg_alloc_done(g, mem->size, "vidmem");
394
395 return 0;
396
397fail_kfree:
398 nvgpu_kfree(g, mem->priv.sgt);
399fail_physfree:
400 nvgpu_free(&g->mm.vidmem.allocator, addr);
401 mem->size = 0;
402 return err;
403#else
404 return -ENOSYS;
405#endif
406}
407
408int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
409 struct nvgpu_mem *mem)
410{
411 return nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
412}
413
414int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
415 size_t size, struct nvgpu_mem *mem)
416{
417 if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) {
418 /*
419 * Force the no-kernel-mapping flag on because we don't support
420 * the lack of it for vidmem - the user should not care when
421 * using nvgpu_dma_alloc_map and it's vidmem, or if there's a
422 * difference, the user should use the flag explicitly anyway.
423 */
424 int err = nvgpu_dma_alloc_map_flags_vid(vm,
425 flags | NVGPU_DMA_NO_KERNEL_MAPPING,
426 size, mem);
427
428 if (!err)
429 return 0;
430 /*
431 * Fall back to sysmem (which may then also fail) in case
432 * vidmem is exhausted.
433 */
434 }
435
436 return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
437}
438
439int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
440 struct nvgpu_mem *mem)
441{
442 return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
443}
444
445int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
446 size_t size, struct nvgpu_mem *mem)
447{
448 int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem);
449
450 if (err)
451 return err;
452
453 mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
454 gk20a_mem_flag_none, false,
455 mem->aperture);
456 if (!mem->gpu_va) {
457 err = -ENOMEM;
458 goto fail_free;
459 }
460
461 return 0;
462
463fail_free:
464 nvgpu_dma_free(vm->mm->g, mem);
465 return err;
466}
467
468int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
469 struct nvgpu_mem *mem)
470{
471 return nvgpu_dma_alloc_map_flags_vid(vm,
472 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
473}
474
475int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
476 size_t size, struct nvgpu_mem *mem)
477{
478 int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem);
479
480 if (err)
481 return err;
482
483 mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
484 gk20a_mem_flag_none, false,
485 mem->aperture);
486 if (!mem->gpu_va) {
487 err = -ENOMEM;
488 goto fail_free;
489 }
490
491 return 0;
492
493fail_free:
494 nvgpu_dma_free(vm->mm->g, mem);
495 return err;
496}
497
498static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
499{
500 struct device *d = dev_from_gk20a(g);
501
502 g->dma_memory_used -= mem->aligned_size;
503
504 dma_dbg_free(g, mem->size, mem->priv.flags, "sysmem");
505
506 if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) &&
507 !(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) &&
508 (mem->cpu_va || mem->priv.pages)) {
509 /*
510 * Free side of WAR for bug 2040115.
511 */
512 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
513 vunmap(mem->cpu_va);
514
515 if (mem->priv.flags) {
516 NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
517
518 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags);
519
520 if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
521 dma_free_attrs(d, mem->aligned_size, mem->priv.pages,
522 sg_dma_address(mem->priv.sgt->sgl),
523 NVGPU_DMA_ATTR(dma_attrs));
524 } else {
525 dma_free_attrs(d, mem->aligned_size, mem->cpu_va,
526 sg_dma_address(mem->priv.sgt->sgl),
527 NVGPU_DMA_ATTR(dma_attrs));
528 }
529 } else {
530 dma_free_coherent(d, mem->aligned_size, mem->cpu_va,
531 sg_dma_address(mem->priv.sgt->sgl));
532 }
533 mem->cpu_va = NULL;
534 mem->priv.pages = NULL;
535 }
536
537 /*
538 * When this flag is set we expect that pages is still populated but not
539 * by the DMA API.
540 */
541 if (mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA)
542 nvgpu_kfree(g, mem->priv.pages);
543
544 if (mem->priv.sgt)
545 nvgpu_free_sgtable(g, &mem->priv.sgt);
546
547 dma_dbg_free_done(g, mem->size, "sysmem");
548
549 mem->size = 0;
550 mem->aligned_size = 0;
551 mem->aperture = APERTURE_INVALID;
552}
553
554static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
555{
556#if defined(CONFIG_GK20A_VIDMEM)
557 size_t mem_size = mem->size;
558
559 dma_dbg_free(g, mem->size, mem->priv.flags, "vidmem");
560
561 /* Sanity check - only this supported when allocating. */
562 WARN_ON(mem->priv.flags != NVGPU_DMA_NO_KERNEL_MAPPING);
563
564 if (mem->mem_flags & NVGPU_MEM_FLAG_USER_MEM) {
565 int err = nvgpu_vidmem_clear_list_enqueue(g, mem);
566
567 /*
568 * If there's an error here then that means we can't clear the
569 * vidmem. That's too bad; however, we still own the nvgpu_mem
570 * buf so we have to free that.
571 *
572 * We don't need to worry about the vidmem allocator itself
573 * since when that gets cleaned up in the driver shutdown path
574 * all the outstanding allocs are force freed.
575 */
576 if (err)
577 nvgpu_kfree(g, mem);
578 } else {
579 nvgpu_memset(g, mem, 0, 0, mem->aligned_size);
580 nvgpu_free(mem->allocator,
581 (u64)nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl));
582 nvgpu_free_sgtable(g, &mem->priv.sgt);
583
584 mem->size = 0;
585 mem->aligned_size = 0;
586 mem->aperture = APERTURE_INVALID;
587 }
588
589 dma_dbg_free_done(g, mem_size, "vidmem");
590#endif
591}
592
593void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
594{
595 switch (mem->aperture) {
596 case APERTURE_SYSMEM:
597 return nvgpu_dma_free_sys(g, mem);
598 case APERTURE_VIDMEM:
599 return nvgpu_dma_free_vid(g, mem);
600 default:
601 break; /* like free() on "null" memory */
602 }
603}
604
605void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
606{
607 if (mem->gpu_va)
608 nvgpu_gmmu_unmap(vm, mem, mem->gpu_va);
609 mem->gpu_va = 0;
610
611 nvgpu_dma_free(vm->mm->g, mem);
612}
613
614int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt,
615 void *cpuva, u64 iova, size_t size, unsigned long flags)
616{
617 int err = 0;
618 struct sg_table *tbl;
619 NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
620
621 tbl = nvgpu_kzalloc(g, sizeof(struct sg_table));
622 if (!tbl) {
623 err = -ENOMEM;
624 goto fail;
625 }
626
627 nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
628 err = dma_get_sgtable_attrs(dev_from_gk20a(g), tbl, cpuva, iova,
629 size, NVGPU_DMA_ATTR(dma_attrs));
630 if (err)
631 goto fail;
632
633 sg_dma_address(tbl->sgl) = iova;
634 *sgt = tbl;
635
636 return 0;
637
638fail:
639 if (tbl)
640 nvgpu_kfree(g, tbl);
641
642 return err;
643}
644
645int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt,
646 void *cpuva, u64 iova, size_t size)
647{
648 return nvgpu_get_sgtable_attrs(g, sgt, cpuva, iova, size, 0);
649}
650
651int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt,
652 struct page **pages, u64 iova, size_t size)
653{
654 int err = 0;
655 struct sg_table *tbl;
656
657 tbl = nvgpu_kzalloc(g, sizeof(struct sg_table));
658 if (!tbl) {
659 err = -ENOMEM;
660 goto fail;
661 }
662
663 err = sg_alloc_table_from_pages(tbl, pages,
664 DIV_ROUND_UP(size, PAGE_SIZE),
665 0, size, GFP_KERNEL);
666 if (err)
667 goto fail;
668
669 sg_dma_address(tbl->sgl) = iova;
670 *sgt = tbl;
671
672 return 0;
673
674fail:
675 if (tbl)
676 nvgpu_kfree(g, tbl);
677
678 return err;
679}
680
681void nvgpu_free_sgtable(struct gk20a *g, struct sg_table **sgt)
682{
683 sg_free_table(*sgt);
684 nvgpu_kfree(g, *sgt);
685 *sgt = NULL;
686}
687
688bool nvgpu_iommuable(struct gk20a *g)
689{
690#ifdef CONFIG_TEGRA_GK20A
691 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
692
693 /*
694 * Check against the nvgpu device to see if it's been marked as
695 * IOMMU'able.
696 */
697 if (!device_is_iommuable(l->dev))
698 return false;
699#endif
700
701 return true;
702}