diff options
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/linux-dma.c')
-rw-r--r-- | drivers/gpu/nvgpu/os/linux/linux-dma.c | 533 |
1 files changed, 533 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/linux-dma.c b/drivers/gpu/nvgpu/os/linux/linux-dma.c new file mode 100644 index 00000000..a42e7cb5 --- /dev/null +++ b/drivers/gpu/nvgpu/os/linux/linux-dma.c | |||
@@ -0,0 +1,533 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | |||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/version.h> | ||
19 | |||
20 | #include <nvgpu/log.h> | ||
21 | #include <nvgpu/dma.h> | ||
22 | #include <nvgpu/lock.h> | ||
23 | #include <nvgpu/bug.h> | ||
24 | #include <nvgpu/gmmu.h> | ||
25 | #include <nvgpu/kmem.h> | ||
26 | #include <nvgpu/enabled.h> | ||
27 | #include <nvgpu/vidmem.h> | ||
28 | |||
29 | #include <nvgpu/linux/dma.h> | ||
30 | |||
31 | #include "gk20a/gk20a.h" | ||
32 | |||
33 | #include "platform_gk20a.h" | ||
34 | #include "os_linux.h" | ||
35 | #include "dmabuf_vidmem.h" | ||
36 | |||
37 | #ifdef __DMA_ATTRS_LONGS | ||
38 | #define NVGPU_DEFINE_DMA_ATTRS(x) \ | ||
39 | struct dma_attrs x = { \ | ||
40 | .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ | ||
41 | } | ||
42 | #define NVGPU_DMA_ATTR(attrs) &attrs | ||
43 | #else | ||
44 | #define NVGPU_DEFINE_DMA_ATTRS(attrs) unsigned long attrs = 0 | ||
45 | #define NVGPU_DMA_ATTR(attrs) attrs | ||
46 | #endif | ||
47 | |||
48 | /* | ||
49 | * Enough to hold all the possible flags in string form. When a new flag is | ||
50 | * added it must be added here as well!! | ||
51 | */ | ||
52 | #define NVGPU_DMA_STR_SIZE \ | ||
53 | sizeof("NO_KERNEL_MAPPING FORCE_CONTIGUOUS") | ||
54 | |||
55 | /* | ||
56 | * The returned string is kmalloc()ed here but must be freed by the caller. | ||
57 | */ | ||
58 | static char *nvgpu_dma_flags_to_str(struct gk20a *g, unsigned long flags) | ||
59 | { | ||
60 | char *buf = nvgpu_kzalloc(g, NVGPU_DMA_STR_SIZE); | ||
61 | int bytes_available = NVGPU_DMA_STR_SIZE; | ||
62 | |||
63 | /* | ||
64 | * Return the empty buffer if there's no flags. Makes it easier on the | ||
65 | * calling code to just print it instead of any if (NULL) type logic. | ||
66 | */ | ||
67 | if (!flags) | ||
68 | return buf; | ||
69 | |||
70 | #define APPEND_FLAG(flag, str_flag) \ | ||
71 | do { \ | ||
72 | if (flags & flag) { \ | ||
73 | strncat(buf, str_flag, bytes_available); \ | ||
74 | bytes_available -= strlen(str_flag); \ | ||
75 | } \ | ||
76 | } while (0) | ||
77 | |||
78 | APPEND_FLAG(NVGPU_DMA_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING "); | ||
79 | APPEND_FLAG(NVGPU_DMA_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS "); | ||
80 | #undef APPEND_FLAG | ||
81 | |||
82 | return buf; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * __dma_dbg - Debug print for DMA allocs and frees. | ||
87 | * | ||
88 | * @g - The GPU. | ||
89 | * @size - The requested size of the alloc (size_t). | ||
90 | * @flags - The flags (unsigned long). | ||
91 | * @type - A string describing the type (i.e: sysmem or vidmem). | ||
92 | * @what - A string with 'alloc' or 'free'. | ||
93 | * | ||
94 | * @flags is the DMA flags. If there are none or it doesn't make sense to print | ||
95 | * flags just pass 0. | ||
96 | * | ||
97 | * Please use dma_dbg_alloc() and dma_dbg_free() instead of this function. | ||
98 | */ | ||
99 | static void __dma_dbg(struct gk20a *g, size_t size, unsigned long flags, | ||
100 | const char *type, const char *what, | ||
101 | const char *func, int line) | ||
102 | { | ||
103 | char *flags_str = NULL; | ||
104 | |||
105 | /* | ||
106 | * Don't bother making the flags_str if debugging is | ||
107 | * not enabled. This saves a malloc and a free. | ||
108 | */ | ||
109 | if (!nvgpu_log_mask_enabled(g, gpu_dbg_dma)) | ||
110 | return; | ||
111 | |||
112 | flags_str = nvgpu_dma_flags_to_str(g, flags); | ||
113 | |||
114 | __nvgpu_log_dbg(g, gpu_dbg_dma, | ||
115 | func, line, | ||
116 | "DMA %s: [%s] size=%-7zu " | ||
117 | "aligned=%-7zu total=%-10llukB %s", | ||
118 | what, type, | ||
119 | size, PAGE_ALIGN(size), | ||
120 | g->dma_memory_used >> 10, | ||
121 | flags_str); | ||
122 | |||
123 | if (flags_str) | ||
124 | nvgpu_kfree(g, flags_str); | ||
125 | } | ||
126 | |||
127 | #define dma_dbg_alloc(g, size, flags, type) \ | ||
128 | __dma_dbg(g, size, flags, type, "alloc", __func__, __LINE__) | ||
129 | #define dma_dbg_free(g, size, flags, type) \ | ||
130 | __dma_dbg(g, size, flags, type, "free", __func__, __LINE__) | ||
131 | |||
132 | /* | ||
133 | * For after the DMA alloc is done. | ||
134 | */ | ||
135 | #define __dma_dbg_done(g, size, type, what) \ | ||
136 | nvgpu_log(g, gpu_dbg_dma, \ | ||
137 | "DMA %s: [%s] size=%-7zu Done!", \ | ||
138 | what, type, size); \ | ||
139 | |||
140 | #define dma_dbg_alloc_done(g, size, type) \ | ||
141 | __dma_dbg_done(g, size, type, "alloc") | ||
142 | #define dma_dbg_free_done(g, size, type) \ | ||
143 | __dma_dbg_done(g, size, type, "free") | ||
144 | |||
145 | #if defined(CONFIG_GK20A_VIDMEM) | ||
146 | static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at, | ||
147 | size_t size) | ||
148 | { | ||
149 | u64 addr = 0; | ||
150 | |||
151 | if (at) | ||
152 | addr = nvgpu_alloc_fixed(allocator, at, size, 0); | ||
153 | else | ||
154 | addr = nvgpu_alloc(allocator, size); | ||
155 | |||
156 | return addr; | ||
157 | } | ||
158 | #endif | ||
159 | |||
160 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) | ||
161 | static void nvgpu_dma_flags_to_attrs(unsigned long *attrs, | ||
162 | unsigned long flags) | ||
163 | #define ATTR_ARG(x) *x | ||
164 | #else | ||
165 | static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs, | ||
166 | unsigned long flags) | ||
167 | #define ATTR_ARG(x) x | ||
168 | #endif | ||
169 | { | ||
170 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) | ||
171 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, ATTR_ARG(attrs)); | ||
172 | if (flags & NVGPU_DMA_FORCE_CONTIGUOUS) | ||
173 | dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, ATTR_ARG(attrs)); | ||
174 | #undef ATTR_ARG | ||
175 | } | ||
176 | |||
177 | int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | ||
178 | size_t size, struct nvgpu_mem *mem) | ||
179 | { | ||
180 | struct device *d = dev_from_gk20a(g); | ||
181 | int err; | ||
182 | dma_addr_t iova; | ||
183 | NVGPU_DEFINE_DMA_ATTRS(dma_attrs); | ||
184 | void *alloc_ret; | ||
185 | |||
186 | if (nvgpu_mem_is_valid(mem)) { | ||
187 | nvgpu_warn(g, "memory leak !!"); | ||
188 | WARN_ON(1); | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * WAR for IO coherent chips: the DMA API does not seem to generate | ||
193 | * mappings that work correctly. Unclear why - Bug ID: 2040115. | ||
194 | * | ||
195 | * Basically we just tell the DMA API not to map with NO_KERNEL_MAPPING | ||
196 | * and then make a vmap() ourselves. | ||
197 | */ | ||
198 | if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) | ||
199 | flags |= NVGPU_DMA_NO_KERNEL_MAPPING; | ||
200 | |||
201 | /* | ||
202 | * Before the debug print so we see this in the total. But during | ||
203 | * cleanup in the fail path this has to be subtracted. | ||
204 | */ | ||
205 | g->dma_memory_used += PAGE_ALIGN(size); | ||
206 | |||
207 | dma_dbg_alloc(g, size, flags, "sysmem"); | ||
208 | |||
209 | /* | ||
210 | * Save the old size but for actual allocation purposes the size is | ||
211 | * going to be page aligned. | ||
212 | */ | ||
213 | mem->size = size; | ||
214 | size = PAGE_ALIGN(size); | ||
215 | |||
216 | nvgpu_dma_flags_to_attrs(&dma_attrs, flags); | ||
217 | |||
218 | alloc_ret = dma_alloc_attrs(d, size, &iova, | ||
219 | GFP_KERNEL|__GFP_ZERO, | ||
220 | NVGPU_DMA_ATTR(dma_attrs)); | ||
221 | if (!alloc_ret) | ||
222 | return -ENOMEM; | ||
223 | |||
224 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { | ||
225 | mem->priv.pages = alloc_ret; | ||
226 | err = nvgpu_get_sgtable_from_pages(g, &mem->priv.sgt, | ||
227 | mem->priv.pages, | ||
228 | iova, size); | ||
229 | } else { | ||
230 | mem->cpu_va = alloc_ret; | ||
231 | err = nvgpu_get_sgtable_attrs(g, &mem->priv.sgt, mem->cpu_va, | ||
232 | iova, size, flags); | ||
233 | } | ||
234 | if (err) | ||
235 | goto fail_free_dma; | ||
236 | |||
237 | if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) { | ||
238 | mem->cpu_va = vmap(mem->priv.pages, | ||
239 | size >> PAGE_SHIFT, | ||
240 | 0, PAGE_KERNEL); | ||
241 | if (!mem->cpu_va) { | ||
242 | err = -ENOMEM; | ||
243 | goto fail_free_sgt; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | mem->aligned_size = size; | ||
248 | mem->aperture = APERTURE_SYSMEM; | ||
249 | mem->priv.flags = flags; | ||
250 | |||
251 | dma_dbg_alloc_done(g, mem->size, "sysmem"); | ||
252 | |||
253 | return 0; | ||
254 | |||
255 | fail_free_sgt: | ||
256 | nvgpu_free_sgtable(g, &mem->priv.sgt); | ||
257 | fail_free_dma: | ||
258 | dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs)); | ||
259 | mem->cpu_va = NULL; | ||
260 | mem->priv.sgt = NULL; | ||
261 | mem->size = 0; | ||
262 | g->dma_memory_used -= mem->aligned_size; | ||
263 | return err; | ||
264 | } | ||
265 | |||
266 | int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, | ||
267 | size_t size, struct nvgpu_mem *mem, u64 at) | ||
268 | { | ||
269 | #if defined(CONFIG_GK20A_VIDMEM) | ||
270 | u64 addr; | ||
271 | int err; | ||
272 | struct nvgpu_allocator *vidmem_alloc = g->mm.vidmem.cleared ? | ||
273 | &g->mm.vidmem.allocator : | ||
274 | &g->mm.vidmem.bootstrap_allocator; | ||
275 | u64 before_pending; | ||
276 | |||
277 | if (nvgpu_mem_is_valid(mem)) { | ||
278 | nvgpu_warn(g, "memory leak !!"); | ||
279 | WARN_ON(1); | ||
280 | } | ||
281 | |||
282 | dma_dbg_alloc(g, size, flags, "vidmem"); | ||
283 | |||
284 | mem->size = size; | ||
285 | size = PAGE_ALIGN(size); | ||
286 | |||
287 | if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator)) | ||
288 | return -ENOSYS; | ||
289 | |||
290 | /* | ||
291 | * Our own allocator doesn't have any flags yet, and we can't | ||
292 | * kernel-map these, so require explicit flags. | ||
293 | */ | ||
294 | WARN_ON(flags != NVGPU_DMA_NO_KERNEL_MAPPING); | ||
295 | |||
296 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); | ||
297 | before_pending = atomic64_read(&g->mm.vidmem.bytes_pending.atomic_var); | ||
298 | addr = __nvgpu_dma_alloc(vidmem_alloc, at, size); | ||
299 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); | ||
300 | if (!addr) { | ||
301 | /* | ||
302 | * If memory is known to be freed soon, let the user know that | ||
303 | * it may be available after a while. | ||
304 | */ | ||
305 | if (before_pending) | ||
306 | return -EAGAIN; | ||
307 | else | ||
308 | return -ENOMEM; | ||
309 | } | ||
310 | |||
311 | if (at) | ||
312 | mem->mem_flags |= NVGPU_MEM_FLAG_FIXED; | ||
313 | |||
314 | mem->priv.sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); | ||
315 | if (!mem->priv.sgt) { | ||
316 | err = -ENOMEM; | ||
317 | goto fail_physfree; | ||
318 | } | ||
319 | |||
320 | err = sg_alloc_table(mem->priv.sgt, 1, GFP_KERNEL); | ||
321 | if (err) | ||
322 | goto fail_kfree; | ||
323 | |||
324 | nvgpu_vidmem_set_page_alloc(mem->priv.sgt->sgl, addr); | ||
325 | sg_set_page(mem->priv.sgt->sgl, NULL, size, 0); | ||
326 | |||
327 | mem->aligned_size = size; | ||
328 | mem->aperture = APERTURE_VIDMEM; | ||
329 | mem->vidmem_alloc = (struct nvgpu_page_alloc *)(uintptr_t)addr; | ||
330 | mem->allocator = vidmem_alloc; | ||
331 | mem->priv.flags = flags; | ||
332 | |||
333 | nvgpu_init_list_node(&mem->clear_list_entry); | ||
334 | |||
335 | dma_dbg_alloc_done(g, mem->size, "vidmem"); | ||
336 | |||
337 | return 0; | ||
338 | |||
339 | fail_kfree: | ||
340 | nvgpu_kfree(g, mem->priv.sgt); | ||
341 | fail_physfree: | ||
342 | nvgpu_free(&g->mm.vidmem.allocator, addr); | ||
343 | mem->size = 0; | ||
344 | return err; | ||
345 | #else | ||
346 | return -ENOSYS; | ||
347 | #endif | ||
348 | } | ||
349 | |||
350 | void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) | ||
351 | { | ||
352 | struct device *d = dev_from_gk20a(g); | ||
353 | |||
354 | g->dma_memory_used -= mem->aligned_size; | ||
355 | |||
356 | dma_dbg_free(g, mem->size, mem->priv.flags, "sysmem"); | ||
357 | |||
358 | if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) && | ||
359 | !(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) && | ||
360 | (mem->cpu_va || mem->priv.pages)) { | ||
361 | /* | ||
362 | * Free side of WAR for bug 2040115. | ||
363 | */ | ||
364 | if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) | ||
365 | vunmap(mem->cpu_va); | ||
366 | |||
367 | if (mem->priv.flags) { | ||
368 | NVGPU_DEFINE_DMA_ATTRS(dma_attrs); | ||
369 | |||
370 | nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags); | ||
371 | |||
372 | if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) { | ||
373 | dma_free_attrs(d, mem->aligned_size, mem->priv.pages, | ||
374 | sg_dma_address(mem->priv.sgt->sgl), | ||
375 | NVGPU_DMA_ATTR(dma_attrs)); | ||
376 | } else { | ||
377 | dma_free_attrs(d, mem->aligned_size, mem->cpu_va, | ||
378 | sg_dma_address(mem->priv.sgt->sgl), | ||
379 | NVGPU_DMA_ATTR(dma_attrs)); | ||
380 | } | ||
381 | } else { | ||
382 | dma_free_coherent(d, mem->aligned_size, mem->cpu_va, | ||
383 | sg_dma_address(mem->priv.sgt->sgl)); | ||
384 | } | ||
385 | mem->cpu_va = NULL; | ||
386 | mem->priv.pages = NULL; | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * When this flag is set we expect that pages is still populated but not | ||
391 | * by the DMA API. | ||
392 | */ | ||
393 | if (mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) | ||
394 | nvgpu_kfree(g, mem->priv.pages); | ||
395 | |||
396 | if (mem->priv.sgt) | ||
397 | nvgpu_free_sgtable(g, &mem->priv.sgt); | ||
398 | |||
399 | dma_dbg_free_done(g, mem->size, "sysmem"); | ||
400 | |||
401 | mem->size = 0; | ||
402 | mem->aligned_size = 0; | ||
403 | mem->aperture = APERTURE_INVALID; | ||
404 | } | ||
405 | |||
406 | void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) | ||
407 | { | ||
408 | #if defined(CONFIG_GK20A_VIDMEM) | ||
409 | size_t mem_size = mem->size; | ||
410 | |||
411 | dma_dbg_free(g, mem->size, mem->priv.flags, "vidmem"); | ||
412 | |||
413 | /* Sanity check - only this supported when allocating. */ | ||
414 | WARN_ON(mem->priv.flags != NVGPU_DMA_NO_KERNEL_MAPPING); | ||
415 | |||
416 | if (mem->mem_flags & NVGPU_MEM_FLAG_USER_MEM) { | ||
417 | int err = nvgpu_vidmem_clear_list_enqueue(g, mem); | ||
418 | |||
419 | /* | ||
420 | * If there's an error here then that means we can't clear the | ||
421 | * vidmem. That's too bad; however, we still own the nvgpu_mem | ||
422 | * buf so we have to free that. | ||
423 | * | ||
424 | * We don't need to worry about the vidmem allocator itself | ||
425 | * since when that gets cleaned up in the driver shutdown path | ||
426 | * all the outstanding allocs are force freed. | ||
427 | */ | ||
428 | if (err) | ||
429 | nvgpu_kfree(g, mem); | ||
430 | } else { | ||
431 | nvgpu_memset(g, mem, 0, 0, mem->aligned_size); | ||
432 | nvgpu_free(mem->allocator, | ||
433 | (u64)nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl)); | ||
434 | nvgpu_free_sgtable(g, &mem->priv.sgt); | ||
435 | |||
436 | mem->size = 0; | ||
437 | mem->aligned_size = 0; | ||
438 | mem->aperture = APERTURE_INVALID; | ||
439 | } | ||
440 | |||
441 | dma_dbg_free_done(g, mem_size, "vidmem"); | ||
442 | #endif | ||
443 | } | ||
444 | |||
445 | int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt, | ||
446 | void *cpuva, u64 iova, size_t size, unsigned long flags) | ||
447 | { | ||
448 | int err = 0; | ||
449 | struct sg_table *tbl; | ||
450 | NVGPU_DEFINE_DMA_ATTRS(dma_attrs); | ||
451 | |||
452 | tbl = nvgpu_kzalloc(g, sizeof(struct sg_table)); | ||
453 | if (!tbl) { | ||
454 | err = -ENOMEM; | ||
455 | goto fail; | ||
456 | } | ||
457 | |||
458 | nvgpu_dma_flags_to_attrs(&dma_attrs, flags); | ||
459 | err = dma_get_sgtable_attrs(dev_from_gk20a(g), tbl, cpuva, iova, | ||
460 | size, NVGPU_DMA_ATTR(dma_attrs)); | ||
461 | if (err) | ||
462 | goto fail; | ||
463 | |||
464 | sg_dma_address(tbl->sgl) = iova; | ||
465 | *sgt = tbl; | ||
466 | |||
467 | return 0; | ||
468 | |||
469 | fail: | ||
470 | if (tbl) | ||
471 | nvgpu_kfree(g, tbl); | ||
472 | |||
473 | return err; | ||
474 | } | ||
475 | |||
476 | int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt, | ||
477 | void *cpuva, u64 iova, size_t size) | ||
478 | { | ||
479 | return nvgpu_get_sgtable_attrs(g, sgt, cpuva, iova, size, 0); | ||
480 | } | ||
481 | |||
482 | int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt, | ||
483 | struct page **pages, u64 iova, size_t size) | ||
484 | { | ||
485 | int err = 0; | ||
486 | struct sg_table *tbl; | ||
487 | |||
488 | tbl = nvgpu_kzalloc(g, sizeof(struct sg_table)); | ||
489 | if (!tbl) { | ||
490 | err = -ENOMEM; | ||
491 | goto fail; | ||
492 | } | ||
493 | |||
494 | err = sg_alloc_table_from_pages(tbl, pages, | ||
495 | DIV_ROUND_UP(size, PAGE_SIZE), | ||
496 | 0, size, GFP_KERNEL); | ||
497 | if (err) | ||
498 | goto fail; | ||
499 | |||
500 | sg_dma_address(tbl->sgl) = iova; | ||
501 | *sgt = tbl; | ||
502 | |||
503 | return 0; | ||
504 | |||
505 | fail: | ||
506 | if (tbl) | ||
507 | nvgpu_kfree(g, tbl); | ||
508 | |||
509 | return err; | ||
510 | } | ||
511 | |||
512 | void nvgpu_free_sgtable(struct gk20a *g, struct sg_table **sgt) | ||
513 | { | ||
514 | sg_free_table(*sgt); | ||
515 | nvgpu_kfree(g, *sgt); | ||
516 | *sgt = NULL; | ||
517 | } | ||
518 | |||
519 | bool nvgpu_iommuable(struct gk20a *g) | ||
520 | { | ||
521 | #ifdef CONFIG_TEGRA_GK20A | ||
522 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
523 | |||
524 | /* | ||
525 | * Check against the nvgpu device to see if it's been marked as | ||
526 | * IOMMU'able. | ||
527 | */ | ||
528 | if (!device_is_iommuable(l->dev)) | ||
529 | return false; | ||
530 | #endif | ||
531 | |||
532 | return true; | ||
533 | } | ||