summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-21 18:34:50 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:15:04 -0400
commit50667e097b2be567e3d2f95e23b046243bca2bf6 (patch)
treee8fc42261868c6d69844f2e92fce33f6169434d4 /drivers/gpu/nvgpu
parent8f2d4a3f4a0acc81bae6725d30506e92651a42b5 (diff)
gpu: nvgpu: Rename nvgpu DMA APIs
Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*. This better reflects the purpose of the APIs (to allocate DMA suitable memory) and avoids confusion with GMMU related code. JIRA NVGPU-12 Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1325548 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c87
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c16
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c40
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c10
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c8
-rw-r--r--drivers/gpu/nvgpu/gp106/gr_gp106.c6
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c18
-rw-r--r--drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/dma.h88
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c8
20 files changed, 179 insertions, 178 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index 755848ea..92182a0d 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -23,7 +23,7 @@
23#include "gk20a/gk20a.h" 23#include "gk20a/gk20a.h"
24 24
25#if defined(CONFIG_GK20A_VIDMEM) 25#if defined(CONFIG_GK20A_VIDMEM)
26static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at, 26static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
27 size_t size) 27 size_t size)
28{ 28{
29 u64 addr = 0; 29 u64 addr = 0;
@@ -38,11 +38,11 @@ static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
38#endif 38#endif
39 39
40#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) 40#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
41static void gk20a_dma_flags_to_attrs(unsigned long *attrs, 41static void nvgpu_dma_flags_to_attrs(unsigned long *attrs,
42 unsigned long flags) 42 unsigned long flags)
43#define ATTR_ARG(x) *x 43#define ATTR_ARG(x) *x
44#else 44#else
45static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs, 45static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs,
46 unsigned long flags) 46 unsigned long flags)
47#define ATTR_ARG(x) x 47#define ATTR_ARG(x) x
48#endif 48#endif
@@ -56,12 +56,12 @@ static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
56#undef ATTR_ARG 56#undef ATTR_ARG
57} 57}
58 58
59int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem) 59int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
60{ 60{
61 return gk20a_gmmu_alloc_flags(g, 0, size, mem); 61 return nvgpu_dma_alloc_flags(g, 0, size, mem);
62} 62}
63 63
64int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, 64int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
65 struct nvgpu_mem *mem) 65 struct nvgpu_mem *mem)
66{ 66{
67 if (g->mm.vidmem_is_vidmem) { 67 if (g->mm.vidmem_is_vidmem) {
@@ -71,7 +71,7 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
71 * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a 71 * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a
72 * difference, the user should use the flag explicitly anyway. 72 * difference, the user should use the flag explicitly anyway.
73 */ 73 */
74 int err = gk20a_gmmu_alloc_flags_vid(g, 74 int err = nvgpu_dma_alloc_flags_vid(g,
75 flags | NVGPU_DMA_NO_KERNEL_MAPPING, 75 flags | NVGPU_DMA_NO_KERNEL_MAPPING,
76 size, mem); 76 size, mem);
77 77
@@ -83,15 +83,15 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
83 */ 83 */
84 } 84 }
85 85
86 return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem); 86 return nvgpu_dma_alloc_flags_sys(g, flags, size, mem);
87} 87}
88 88
89int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem) 89int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
90{ 90{
91 return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem); 91 return nvgpu_dma_alloc_flags_sys(g, 0, size, mem);
92} 92}
93 93
94int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, 94int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
95 size_t size, struct nvgpu_mem *mem) 95 size_t size, struct nvgpu_mem *mem)
96{ 96{
97 struct device *d = dev_from_gk20a(g); 97 struct device *d = dev_from_gk20a(g);
@@ -103,7 +103,7 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
103 if (flags) { 103 if (flags) {
104 DEFINE_DMA_ATTRS(dma_attrs); 104 DEFINE_DMA_ATTRS(dma_attrs);
105 105
106 gk20a_dma_flags_to_attrs(&dma_attrs, flags); 106 nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
107 107
108 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 108 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
109 mem->pages = dma_alloc_attrs(d, 109 mem->pages = dma_alloc_attrs(d,
@@ -149,19 +149,19 @@ fail_free:
149 return err; 149 return err;
150} 150}
151 151
152int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem) 152int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
153{ 153{
154 return gk20a_gmmu_alloc_flags_vid(g, 154 return nvgpu_dma_alloc_flags_vid(g,
155 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); 155 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
156} 156}
157 157
158int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, 158int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
159 size_t size, struct nvgpu_mem *mem) 159 size_t size, struct nvgpu_mem *mem)
160{ 160{
161 return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0); 161 return nvgpu_dma_alloc_flags_vid_at(g, flags, size, mem, 0);
162} 162}
163 163
164int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, 164int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
165 size_t size, struct nvgpu_mem *mem, dma_addr_t at) 165 size_t size, struct nvgpu_mem *mem, dma_addr_t at)
166{ 166{
167#if defined(CONFIG_GK20A_VIDMEM) 167#if defined(CONFIG_GK20A_VIDMEM)
@@ -185,7 +185,7 @@ int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
185 185
186 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); 186 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
187 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); 187 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending);
188 addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size); 188 addr = __nvgpu_dma_alloc(vidmem_alloc, at, size);
189 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); 189 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
190 if (!addr) { 190 if (!addr) {
191 /* 191 /*
@@ -237,23 +237,23 @@ fail_physfree:
237#endif 237#endif
238} 238}
239 239
240int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, 240int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
241 struct nvgpu_mem *mem) 241 struct nvgpu_mem *mem)
242{ 242{
243 return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem); 243 return nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
244} 244}
245 245
246int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, 246int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
247 size_t size, struct nvgpu_mem *mem) 247 size_t size, struct nvgpu_mem *mem)
248{ 248{
249 if (vm->mm->vidmem_is_vidmem) { 249 if (vm->mm->vidmem_is_vidmem) {
250 /* 250 /*
251 * Force the no-kernel-mapping flag on because we don't support 251 * Force the no-kernel-mapping flag on because we don't support
252 * the lack of it for vidmem - the user should not care when 252 * the lack of it for vidmem - the user should not care when
253 * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a 253 * using nvgpu_dma_alloc_map and it's vidmem, or if there's a
254 * difference, the user should use the flag explicitly anyway. 254 * difference, the user should use the flag explicitly anyway.
255 */ 255 */
256 int err = gk20a_gmmu_alloc_map_flags_vid(vm, 256 int err = nvgpu_dma_alloc_map_flags_vid(vm,
257 flags | NVGPU_DMA_NO_KERNEL_MAPPING, 257 flags | NVGPU_DMA_NO_KERNEL_MAPPING,
258 size, mem); 258 size, mem);
259 259
@@ -265,19 +265,19 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
265 */ 265 */
266 } 266 }
267 267
268 return gk20a_gmmu_alloc_map_flags_sys(vm, flags, size, mem); 268 return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
269} 269}
270 270
271int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, 271int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
272 struct nvgpu_mem *mem) 272 struct nvgpu_mem *mem)
273{ 273{
274 return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem); 274 return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
275} 275}
276 276
277int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, 277int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
278 size_t size, struct nvgpu_mem *mem) 278 size_t size, struct nvgpu_mem *mem)
279{ 279{
280 int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem); 280 int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem);
281 281
282 if (err) 282 if (err)
283 return err; 283 return err;
@@ -293,21 +293,21 @@ int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
293 return 0; 293 return 0;
294 294
295fail_free: 295fail_free:
296 gk20a_gmmu_free(vm->mm->g, mem); 296 nvgpu_dma_free(vm->mm->g, mem);
297 return err; 297 return err;
298} 298}
299 299
300int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, 300int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
301 struct nvgpu_mem *mem) 301 struct nvgpu_mem *mem)
302{ 302{
303 return gk20a_gmmu_alloc_map_flags_vid(vm, 303 return nvgpu_dma_alloc_map_flags_vid(vm,
304 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); 304 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
305} 305}
306 306
307int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, 307int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
308 size_t size, struct nvgpu_mem *mem) 308 size_t size, struct nvgpu_mem *mem)
309{ 309{
310 int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem); 310 int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem);
311 311
312 if (err) 312 if (err)
313 return err; 313 return err;
@@ -323,11 +323,11 @@ int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
323 return 0; 323 return 0;
324 324
325fail_free: 325fail_free:
326 gk20a_gmmu_free(vm->mm->g, mem); 326 nvgpu_dma_free(vm->mm->g, mem);
327 return err; 327 return err;
328} 328}
329 329
330static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem) 330static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
331{ 331{
332 struct device *d = dev_from_gk20a(g); 332 struct device *d = dev_from_gk20a(g);
333 333
@@ -335,7 +335,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
335 if (mem->flags) { 335 if (mem->flags) {
336 DEFINE_DMA_ATTRS(dma_attrs); 336 DEFINE_DMA_ATTRS(dma_attrs);
337 337
338 gk20a_dma_flags_to_attrs(&dma_attrs, mem->flags); 338 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags);
339 339
340 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 340 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
341 dma_free_attrs(d, mem->size, mem->pages, 341 dma_free_attrs(d, mem->size, mem->pages,
@@ -361,7 +361,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
361 mem->aperture = APERTURE_INVALID; 361 mem->aperture = APERTURE_INVALID;
362} 362}
363 363
364static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem) 364static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
365{ 365{
366#if defined(CONFIG_GK20A_VIDMEM) 366#if defined(CONFIG_GK20A_VIDMEM)
367 bool was_empty; 367 bool was_empty;
@@ -393,23 +393,24 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
393#endif 393#endif
394} 394}
395 395
396void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem) 396void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
397{ 397{
398 switch (mem->aperture) { 398 switch (mem->aperture) {
399 case APERTURE_SYSMEM: 399 case APERTURE_SYSMEM:
400 return gk20a_gmmu_free_sys(g, mem); 400 return nvgpu_dma_free_sys(g, mem);
401 case APERTURE_VIDMEM: 401 case APERTURE_VIDMEM:
402 return gk20a_gmmu_free_vid(g, mem); 402 return nvgpu_dma_free_vid(g, mem);
403 default: 403 default:
404 break; /* like free() on "null" memory */ 404 break; /* like free() on "null" memory */
405 } 405 }
406} 406}
407 407
408void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem) 408void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
409{ 409{
410 if (mem->gpu_va) 410 if (mem->gpu_va)
411 gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, gk20a_mem_flag_none); 411 gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size,
412 gk20a_mem_flag_none);
412 mem->gpu_va = 0; 413 mem->gpu_va = 0;
413 414
414 gk20a_gmmu_free(vm->mm->g, mem); 415 nvgpu_dma_free(vm->mm->g, mem);
415} 416}
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index cfe1149f..7c9bf9da 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -53,7 +53,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
53 53
54 __lock_sema_sea(sea); 54 __lock_sema_sea(sea);
55 55
56 ret = gk20a_gmmu_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING, 56 ret = nvgpu_dma_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING,
57 PAGE_SIZE * SEMAPHORE_POOL_COUNT, 57 PAGE_SIZE * SEMAPHORE_POOL_COUNT,
58 &sea->sea_mem); 58 &sea->sea_mem);
59 if (ret) 59 if (ret)
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index e70ee4a6..7c251e2d 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -54,7 +54,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
54 54
55 for (i = 0; i < cde_ctx->num_bufs; i++) { 55 for (i = 0; i < cde_ctx->num_bufs; i++) {
56 struct nvgpu_mem *mem = cde_ctx->mem + i; 56 struct nvgpu_mem *mem = cde_ctx->mem + i;
57 gk20a_gmmu_unmap_free(cde_ctx->vm, mem); 57 nvgpu_dma_unmap_free(cde_ctx->vm, mem);
58 } 58 }
59 59
60 nvgpu_kfree(cde_ctx->g, cde_ctx->init_convert_cmd); 60 nvgpu_kfree(cde_ctx->g, cde_ctx->init_convert_cmd);
@@ -247,7 +247,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
247 247
248 /* allocate buf */ 248 /* allocate buf */
249 mem = cde_ctx->mem + cde_ctx->num_bufs; 249 mem = cde_ctx->mem + cde_ctx->num_bufs;
250 err = gk20a_gmmu_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); 250 err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem);
251 if (err) { 251 if (err) {
252 gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", 252 gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d",
253 cde_ctx->num_bufs); 253 cde_ctx->num_bufs);
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index 9cc4b678..f3ac28ea 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -195,7 +195,7 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
195 195
196 if (ce_ctx->cmd_buf_mem.cpu_va) { 196 if (ce_ctx->cmd_buf_mem.cpu_va) {
197 gk20a_ce_free_command_buffer_stored_fence(ce_ctx); 197 gk20a_ce_free_command_buffer_stored_fence(ce_ctx);
198 gk20a_gmmu_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem); 198 nvgpu_dma_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem);
199 } 199 }
200 200
201 /* free the channel */ 201 /* free the channel */
@@ -479,7 +479,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
479 } 479 }
480 480
481 /* allocate command buffer (4096 should be more than enough) from sysmem*/ 481 /* allocate command buffer (4096 should be more than enough) from sysmem*/
482 err = gk20a_gmmu_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem); 482 err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem);
483 if (err) { 483 if (err) {
484 gk20a_err(ce_ctx->dev, 484 gk20a_err(ce_ctx->dev,
485 "ce: could not allocate command buffer for CE context"); 485 "ce: could not allocate command buffer for CE context");
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 6be616b3..81901c52 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -523,7 +523,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
523 523
524 gk20a_gr_flush_channel_tlb(gr); 524 gk20a_gr_flush_channel_tlb(gr);
525 525
526 gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem); 526 nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem);
527 nvgpu_big_free(g, ch->gpfifo.pipe); 527 nvgpu_big_free(g, ch->gpfifo.pipe);
528 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); 528 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
529 529
@@ -899,7 +899,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
899 size = roundup_pow_of_two(c->gpfifo.entry_num * 899 size = roundup_pow_of_two(c->gpfifo.entry_num *
900 2 * 18 * sizeof(u32) / 3); 900 2 * 18 * sizeof(u32) / 3);
901 901
902 err = gk20a_gmmu_alloc_map_sys(ch_vm, size, &q->mem); 902 err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
903 if (err) { 903 if (err) {
904 gk20a_err(d, "%s: memory allocation failed\n", __func__); 904 gk20a_err(d, "%s: memory allocation failed\n", __func__);
905 goto clean_up; 905 goto clean_up;
@@ -922,7 +922,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c)
922 if (q->size == 0) 922 if (q->size == 0)
923 return; 923 return;
924 924
925 gk20a_gmmu_unmap_free(ch_vm, &q->mem); 925 nvgpu_dma_unmap_free(ch_vm, &q->mem);
926 926
927 memset(q, 0, sizeof(struct priv_cmd_queue)); 927 memset(q, 0, sizeof(struct priv_cmd_queue));
928} 928}
@@ -1244,7 +1244,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1244 return -EEXIST; 1244 return -EEXIST;
1245 } 1245 }
1246 1246
1247 err = gk20a_gmmu_alloc_map_sys(ch_vm, 1247 err = nvgpu_dma_alloc_map_sys(ch_vm,
1248 gpfifo_size * sizeof(struct nvgpu_gpfifo), 1248 gpfifo_size * sizeof(struct nvgpu_gpfifo),
1249 &c->gpfifo.mem); 1249 &c->gpfifo.mem);
1250 if (err) { 1250 if (err) {
@@ -1331,7 +1331,7 @@ clean_up_sync:
1331 } 1331 }
1332clean_up_unmap: 1332clean_up_unmap:
1333 nvgpu_big_free(g, c->gpfifo.pipe); 1333 nvgpu_big_free(g, c->gpfifo.pipe);
1334 gk20a_gmmu_unmap_free(ch_vm, &c->gpfifo.mem); 1334 nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
1335clean_up: 1335clean_up:
1336 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); 1336 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
1337 gk20a_err(d, "fail"); 1337 gk20a_err(d, "fail");
diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
index 738e8c1c..e5910e7f 100644
--- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
@@ -143,7 +143,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch,
143 if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) 143 if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE)
144 snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE; 144 snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE;
145 145
146 ret = gk20a_gmmu_alloc_map_sys(&g->mm.pmu.vm, snapshot_size, 146 ret = nvgpu_dma_alloc_map_sys(&g->mm.pmu.vm, snapshot_size,
147 &data->hw_memdesc); 147 &data->hw_memdesc);
148 if (ret) 148 if (ret)
149 return ret; 149 return ret;
@@ -192,7 +192,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch,
192 192
193failed_allocation: 193failed_allocation:
194 if (data->hw_memdesc.size) { 194 if (data->hw_memdesc.size) {
195 gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); 195 nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc);
196 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); 196 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
197 } 197 }
198 data->hw_snapshot = NULL; 198 data->hw_snapshot = NULL;
@@ -220,7 +220,7 @@ static void css_hw_disable_snapshot(struct gr_gk20a *gr)
220 perf_pmasys_mem_block_valid_false_f() | 220 perf_pmasys_mem_block_valid_false_f() |
221 perf_pmasys_mem_block_target_f(0)); 221 perf_pmasys_mem_block_target_f(0));
222 222
223 gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); 223 nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc);
224 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); 224 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
225 data->hw_snapshot = NULL; 225 data->hw_snapshot = NULL;
226 226
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index d8fa7505..96b94ea7 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -400,7 +400,7 @@ static int gk20a_fecs_trace_alloc_ring(struct gk20a *g)
400{ 400{
401 struct gk20a_fecs_trace *trace = g->fecs_trace; 401 struct gk20a_fecs_trace *trace = g->fecs_trace;
402 402
403 return gk20a_gmmu_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS 403 return nvgpu_dma_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS
404 * ctxsw_prog_record_timestamp_record_size_in_bytes_v(), 404 * ctxsw_prog_record_timestamp_record_size_in_bytes_v(),
405 &trace->trace_buf); 405 &trace->trace_buf);
406} 406}
@@ -409,7 +409,7 @@ static void gk20a_fecs_trace_free_ring(struct gk20a *g)
409{ 409{
410 struct gk20a_fecs_trace *trace = g->fecs_trace; 410 struct gk20a_fecs_trace *trace = g->fecs_trace;
411 411
412 gk20a_gmmu_free(g, &trace->trace_buf); 412 nvgpu_dma_free(g, &trace->trace_buf);
413} 413}
414 414
415#ifdef CONFIG_DEBUG_FS 415#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c1f94eb3..ca09c22a 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -483,7 +483,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
483 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { 483 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
484 runlist = &f->runlist_info[runlist_id]; 484 runlist = &f->runlist_info[runlist_id];
485 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 485 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
486 gk20a_gmmu_free(g, &runlist->mem[i]); 486 nvgpu_dma_free(g, &runlist->mem[i]);
487 } 487 }
488 488
489 nvgpu_kfree(g, runlist->active_channels); 489 nvgpu_kfree(g, runlist->active_channels);
@@ -544,9 +544,9 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
544 nvgpu_vfree(g, f->channel); 544 nvgpu_vfree(g, f->channel);
545 nvgpu_vfree(g, f->tsg); 545 nvgpu_vfree(g, f->tsg);
546 if (g->ops.mm.is_bar1_supported(g)) 546 if (g->ops.mm.is_bar1_supported(g))
547 gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); 547 nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd);
548 else 548 else
549 gk20a_gmmu_free(g, &f->userd); 549 nvgpu_dma_free(g, &f->userd);
550 550
551 gk20a_fifo_delete_runlist(f); 551 gk20a_fifo_delete_runlist(f);
552 552
@@ -686,7 +686,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
686 f->num_runlist_entries, runlist_size); 686 f->num_runlist_entries, runlist_size);
687 687
688 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 688 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
689 int err = gk20a_gmmu_alloc_sys(g, runlist_size, 689 int err = nvgpu_dma_alloc_sys(g, runlist_size,
690 &runlist->mem[i]); 690 &runlist->mem[i]);
691 if (err) { 691 if (err) {
692 dev_err(d, "memory allocation failed\n"); 692 dev_err(d, "memory allocation failed\n");
@@ -940,12 +940,12 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
940 nvgpu_mutex_init(&f->free_chs_mutex); 940 nvgpu_mutex_init(&f->free_chs_mutex);
941 941
942 if (g->ops.mm.is_bar1_supported(g)) 942 if (g->ops.mm.is_bar1_supported(g))
943 err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm, 943 err = nvgpu_dma_alloc_map_sys(&g->mm.bar1.vm,
944 f->userd_entry_size * f->num_channels, 944 f->userd_entry_size * f->num_channels,
945 &f->userd); 945 &f->userd);
946 946
947 else 947 else
948 err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * 948 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
949 f->num_channels, &f->userd); 949 f->num_channels, &f->userd);
950 if (err) { 950 if (err) {
951 dev_err(d, "userd memory allocation failed\n"); 951 dev_err(d, "userd memory allocation failed\n");
@@ -980,9 +980,9 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
980clean_up: 980clean_up:
981 gk20a_dbg_fn("fail"); 981 gk20a_dbg_fn("fail");
982 if (g->ops.mm.is_bar1_supported(g)) 982 if (g->ops.mm.is_bar1_supported(g))
983 gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); 983 nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd);
984 else 984 else
985 gk20a_gmmu_free(g, &f->userd); 985 nvgpu_dma_free(g, &f->userd);
986 986
987 nvgpu_vfree(g, f->channel); 987 nvgpu_vfree(g, f->channel);
988 f->channel = NULL; 988 f->channel = NULL;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index a9b6a546..af02491e 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1938,7 +1938,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1938 if (enable_hwpm_ctxsw) { 1938 if (enable_hwpm_ctxsw) {
1939 /* Allocate buffer if necessary */ 1939 /* Allocate buffer if necessary */
1940 if (pm_ctx->mem.gpu_va == 0) { 1940 if (pm_ctx->mem.gpu_va == 0) {
1941 ret = gk20a_gmmu_alloc_flags_sys(g, 1941 ret = nvgpu_dma_alloc_flags_sys(g,
1942 NVGPU_DMA_NO_KERNEL_MAPPING, 1942 NVGPU_DMA_NO_KERNEL_MAPPING,
1943 g->gr.ctx_vars.pm_ctxsw_image_size, 1943 g->gr.ctx_vars.pm_ctxsw_image_size,
1944 &pm_ctx->mem); 1944 &pm_ctx->mem);
@@ -1958,7 +1958,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1958 if (!pm_ctx->mem.gpu_va) { 1958 if (!pm_ctx->mem.gpu_va) {
1959 gk20a_err(dev_from_gk20a(g), 1959 gk20a_err(dev_from_gk20a(g),
1960 "failed to map pm ctxt buffer"); 1960 "failed to map pm ctxt buffer");
1961 gk20a_gmmu_free(g, &pm_ctx->mem); 1961 nvgpu_dma_free(g, &pm_ctx->mem);
1962 c->g->ops.fifo.enable_channel(c); 1962 c->g->ops.fifo.enable_channel(c);
1963 return -ENOMEM; 1963 return -ENOMEM;
1964 } 1964 }
@@ -2018,7 +2018,7 @@ clean_up_mem:
2018cleanup_pm_buf: 2018cleanup_pm_buf:
2019 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 2019 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
2020 gk20a_mem_flag_none); 2020 gk20a_mem_flag_none);
2021 gk20a_gmmu_free(g, &pm_ctx->mem); 2021 nvgpu_dma_free(g, &pm_ctx->mem);
2022 memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem)); 2022 memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem));
2023 2023
2024 gk20a_enable_channel_tsg(g, c); 2024 gk20a_enable_channel_tsg(g, c);
@@ -2318,7 +2318,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2318 g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), 2318 g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32),
2319 g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); 2319 g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32));
2320 2320
2321 err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc); 2321 err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc);
2322 if (err) 2322 if (err)
2323 goto clean_up; 2323 goto clean_up;
2324 2324
@@ -2350,7 +2350,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2350 if (ucode_info->surface_desc.gpu_va) 2350 if (ucode_info->surface_desc.gpu_va)
2351 gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va, 2351 gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va,
2352 ucode_info->surface_desc.size, gk20a_mem_flag_none); 2352 ucode_info->surface_desc.size, gk20a_mem_flag_none);
2353 gk20a_gmmu_free(g, &ucode_info->surface_desc); 2353 nvgpu_dma_free(g, &ucode_info->surface_desc);
2354 2354
2355 release_firmware(gpccs_fw); 2355 release_firmware(gpccs_fw);
2356 gpccs_fw = NULL; 2356 gpccs_fw = NULL;
@@ -2700,7 +2700,7 @@ static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
2700{ 2700{
2701 if (!desc) 2701 if (!desc)
2702 return; 2702 return;
2703 gk20a_gmmu_free(g, &desc->mem); 2703 nvgpu_dma_free(g, &desc->mem);
2704 desc->destroy = NULL; 2704 desc->destroy = NULL;
2705} 2705}
2706 2706
@@ -2710,7 +2710,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
2710{ 2710{
2711 int err = 0; 2711 int err = 0;
2712 2712
2713 err = gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, 2713 err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
2714 size, &desc->mem); 2714 size, &desc->mem);
2715 if (err) 2715 if (err)
2716 return err; 2716 return err;
@@ -2953,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2953 if (!gr_ctx) 2953 if (!gr_ctx)
2954 return -ENOMEM; 2954 return -ENOMEM;
2955 2955
2956 err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, 2956 err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
2957 gr->ctx_vars.buffer_total_size, 2957 gr->ctx_vars.buffer_total_size,
2958 &gr_ctx->mem); 2958 &gr_ctx->mem);
2959 if (err) 2959 if (err)
@@ -2973,7 +2973,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2973 return 0; 2973 return 0;
2974 2974
2975 err_free_mem: 2975 err_free_mem:
2976 gk20a_gmmu_free(g, &gr_ctx->mem); 2976 nvgpu_dma_free(g, &gr_ctx->mem);
2977 err_free_ctx: 2977 err_free_ctx:
2978 nvgpu_kfree(g, gr_ctx); 2978 nvgpu_kfree(g, gr_ctx);
2979 gr_ctx = NULL; 2979 gr_ctx = NULL;
@@ -3022,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3022 3022
3023 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, 3023 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
3024 gr_ctx->mem.size, gk20a_mem_flag_none); 3024 gr_ctx->mem.size, gk20a_mem_flag_none);
3025 gk20a_gmmu_free(g, &gr_ctx->mem); 3025 nvgpu_dma_free(g, &gr_ctx->mem);
3026 nvgpu_kfree(g, gr_ctx); 3026 nvgpu_kfree(g, gr_ctx);
3027} 3027}
3028 3028
@@ -3051,7 +3051,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
3051 3051
3052 gk20a_dbg_fn(""); 3052 gk20a_dbg_fn("");
3053 3053
3054 err = gk20a_gmmu_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, 3054 err = nvgpu_dma_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING,
3055 128 * sizeof(u32), &patch_ctx->mem); 3055 128 * sizeof(u32), &patch_ctx->mem);
3056 if (err) 3056 if (err)
3057 return err; 3057 return err;
@@ -3071,7 +3071,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c)
3071 gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, 3071 gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va,
3072 patch_ctx->mem.size, gk20a_mem_flag_none); 3072 patch_ctx->mem.size, gk20a_mem_flag_none);
3073 3073
3074 gk20a_gmmu_free(g, &patch_ctx->mem); 3074 nvgpu_dma_free(g, &patch_ctx->mem);
3075 patch_ctx->data_count = 0; 3075 patch_ctx->data_count = 0;
3076} 3076}
3077 3077
@@ -3086,7 +3086,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c)
3086 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, 3086 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va,
3087 pm_ctx->mem.size, gk20a_mem_flag_none); 3087 pm_ctx->mem.size, gk20a_mem_flag_none);
3088 3088
3089 gk20a_gmmu_free(g, &pm_ctx->mem); 3089 nvgpu_dma_free(g, &pm_ctx->mem);
3090 } 3090 }
3091} 3091}
3092 3092
@@ -3366,10 +3366,10 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3366 3366
3367 gr_gk20a_free_global_ctx_buffers(g); 3367 gr_gk20a_free_global_ctx_buffers(g);
3368 3368
3369 gk20a_gmmu_free(g, &gr->mmu_wr_mem); 3369 nvgpu_dma_free(g, &gr->mmu_wr_mem);
3370 gk20a_gmmu_free(g, &gr->mmu_rd_mem); 3370 nvgpu_dma_free(g, &gr->mmu_rd_mem);
3371 3371
3372 gk20a_gmmu_free(g, &gr->compbit_store.mem); 3372 nvgpu_dma_free(g, &gr->compbit_store.mem);
3373 3373
3374 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); 3374 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc));
3375 3375
@@ -3658,17 +3658,17 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
3658{ 3658{
3659 int err; 3659 int err;
3660 3660
3661 err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); 3661 err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_wr_mem);
3662 if (err) 3662 if (err)
3663 goto err; 3663 goto err;
3664 3664
3665 err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); 3665 err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_rd_mem);
3666 if (err) 3666 if (err)
3667 goto err_free_wr_mem; 3667 goto err_free_wr_mem;
3668 return 0; 3668 return 0;
3669 3669
3670 err_free_wr_mem: 3670 err_free_wr_mem:
3671 gk20a_gmmu_free(g, &gr->mmu_wr_mem); 3671 nvgpu_dma_free(g, &gr->mmu_wr_mem);
3672 err: 3672 err:
3673 return -ENOMEM; 3673 return -ENOMEM;
3674} 3674}
@@ -5215,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5215 } 5215 }
5216 5216
5217 if (!pmu->pg_buf.cpu_va) { 5217 if (!pmu->pg_buf.cpu_va) {
5218 err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf); 5218 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
5219 if (err) { 5219 if (err) {
5220 gk20a_err(d, "failed to allocate memory\n"); 5220 gk20a_err(d, "failed to allocate memory\n");
5221 return -ENOMEM; 5221 return -ENOMEM;
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
index 7c73be77..03b12740 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_common.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c
@@ -70,7 +70,7 @@ static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g,
70{ 70{
71 struct gr_gk20a *gr = &g->gr; 71 struct gr_gk20a *gr = &g->gr;
72 72
73 return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, 73 return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
74 compbit_backing_size, 74 compbit_backing_size,
75 &gr->compbit_store.mem); 75 &gr->compbit_store.mem);
76} 76}
@@ -80,7 +80,7 @@ static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
80{ 80{
81 struct gr_gk20a *gr = &g->gr; 81 struct gr_gk20a *gr = &g->gr;
82 82
83 return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, 83 return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
84 compbit_backing_size, 84 compbit_backing_size,
85 &gr->compbit_store.mem); 85 &gr->compbit_store.mem);
86} 86}
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 79654af3..cfe7745d 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -509,7 +509,7 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm)
509 509
510static int gk20a_alloc_sysmem_flush(struct gk20a *g) 510static int gk20a_alloc_sysmem_flush(struct gk20a *g)
511{ 511{
512 return gk20a_gmmu_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush); 512 return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
513} 513}
514 514
515#if defined(CONFIG_GK20A_VIDMEM) 515#if defined(CONFIG_GK20A_VIDMEM)
@@ -897,9 +897,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
897 * default. 897 * default.
898 */ 898 */
899 if (IS_ENABLED(CONFIG_ARM64)) 899 if (IS_ENABLED(CONFIG_ARM64))
900 err = gk20a_gmmu_alloc(g, len, &entry->mem); 900 err = nvgpu_dma_alloc(g, len, &entry->mem);
901 else 901 else
902 err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, 902 err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
903 len, &entry->mem); 903 len, &entry->mem);
904 904
905 905
@@ -929,7 +929,7 @@ void free_gmmu_pages(struct vm_gk20a *vm,
929 return; 929 return;
930 } 930 }
931 931
932 gk20a_gmmu_free(g, &entry->mem); 932 nvgpu_dma_free(g, &entry->mem);
933} 933}
934 934
935int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) 935int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
@@ -1756,7 +1756,7 @@ static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
1756 if (buf->dmabuf_priv) 1756 if (buf->dmabuf_priv)
1757 buf->dmabuf_priv_delete(buf->dmabuf_priv); 1757 buf->dmabuf_priv_delete(buf->dmabuf_priv);
1758 1758
1759 gk20a_gmmu_free(buf->g, buf->mem); 1759 nvgpu_dma_free(buf->g, buf->mem);
1760 nvgpu_kfree(buf->g, buf); 1760 nvgpu_kfree(buf->g, buf);
1761} 1761}
1762 1762
@@ -1873,7 +1873,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
1873 1873
1874 buf->mem->user_mem = true; 1874 buf->mem->user_mem = true;
1875 1875
1876 err = gk20a_gmmu_alloc_vid(g, bytes, buf->mem); 1876 err = nvgpu_dma_alloc_vid(g, bytes, buf->mem);
1877 if (err) 1877 if (err)
1878 goto err_memfree; 1878 goto err_memfree;
1879 1879
@@ -1896,7 +1896,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
1896 return fd; 1896 return fd;
1897 1897
1898err_bfree: 1898err_bfree:
1899 gk20a_gmmu_free(g, buf->mem); 1899 nvgpu_dma_free(g, buf->mem);
1900err_memfree: 1900err_memfree:
1901 nvgpu_kfree(g, buf->mem); 1901 nvgpu_kfree(g, buf->mem);
1902err_kfree: 1902err_kfree:
@@ -4199,7 +4199,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
4199 4199
4200 gk20a_dbg_fn(""); 4200 gk20a_dbg_fn("");
4201 4201
4202 err = gk20a_gmmu_alloc(g, ram_in_alloc_size_v(), inst_block); 4202 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
4203 if (err) { 4203 if (err) {
4204 gk20a_err(dev, "%s: memory allocation failed\n", __func__); 4204 gk20a_err(dev, "%s: memory allocation failed\n", __func__);
4205 return err; 4205 return err;
@@ -4212,7 +4212,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
4212void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) 4212void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
4213{ 4213{
4214 if (inst_block->size) 4214 if (inst_block->size)
4215 gk20a_gmmu_free(g, inst_block); 4215 nvgpu_dma_free(g, inst_block);
4216} 4216}
4217 4217
4218u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block) 4218u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block)
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 7a6bfe22..547ba924 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -3151,7 +3151,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
3151 pmu->ucode_image = (u32 *)((u8 *)pmu->desc + 3151 pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
3152 pmu->desc->descriptor_size); 3152 pmu->desc->descriptor_size);
3153 3153
3154 err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, 3154 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX,
3155 &pmu->ucode); 3155 &pmu->ucode);
3156 if (err) 3156 if (err)
3157 goto err_release_fw; 3157 goto err_release_fw;
@@ -3225,7 +3225,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3225 3225
3226 INIT_WORK(&pmu->pg_init, pmu_setup_hw); 3226 INIT_WORK(&pmu->pg_init, pmu_setup_hw);
3227 3227
3228 err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, 3228 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
3229 &pmu->seq_buf); 3229 &pmu->seq_buf);
3230 if (err) { 3230 if (err) {
3231 gk20a_err(d, "failed to allocate memory\n"); 3231 gk20a_err(d, "failed to allocate memory\n");
@@ -3242,7 +3242,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3242 3242
3243 pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; 3243 pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
3244 3244
3245 err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, 3245 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
3246 &pmu->trace_buf); 3246 &pmu->trace_buf);
3247 if (err) { 3247 if (err) {
3248 gk20a_err(d, "failed to allocate pmu trace buffer\n"); 3248 gk20a_err(d, "failed to allocate pmu trace buffer\n");
@@ -3255,7 +3255,7 @@ skip_init:
3255 gk20a_dbg_fn("done"); 3255 gk20a_dbg_fn("done");
3256 return 0; 3256 return 0;
3257 err_free_seq_buf: 3257 err_free_seq_buf:
3258 gk20a_gmmu_unmap_free(vm, &pmu->seq_buf); 3258 nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
3259 err_free_seq: 3259 err_free_seq:
3260 nvgpu_kfree(g, pmu->seq); 3260 nvgpu_kfree(g, pmu->seq);
3261 err_free_mutex: 3261 err_free_mutex:
@@ -4760,7 +4760,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4760 struct vm_gk20a *vm = &mm->pmu.vm; 4760 struct vm_gk20a *vm = &mm->pmu.vm;
4761 int err; 4761 int err;
4762 4762
4763 err = gk20a_gmmu_alloc_map_vid(vm, size, mem); 4763 err = nvgpu_dma_alloc_map_vid(vm, size, mem);
4764 if (err) { 4764 if (err) {
4765 gk20a_err(g->dev, "memory allocation failed"); 4765 gk20a_err(g->dev, "memory allocation failed");
4766 return -ENOMEM; 4766 return -ENOMEM;
@@ -4776,7 +4776,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4776 struct vm_gk20a *vm = &mm->pmu.vm; 4776 struct vm_gk20a *vm = &mm->pmu.vm;
4777 int err; 4777 int err;
4778 4778
4779 err = gk20a_gmmu_alloc_map_sys(vm, size, mem); 4779 err = nvgpu_dma_alloc_map_sys(vm, size, mem);
4780 if (err) { 4780 if (err) {
4781 gk20a_err(g->dev, "failed to allocate memory\n"); 4781 gk20a_err(g->dev, "failed to allocate memory\n");
4782 return -ENOMEM; 4782 return -ENOMEM;
@@ -4787,7 +4787,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4787 4787
4788void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) 4788void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
4789{ 4789{
4790 gk20a_gmmu_free(g, mem); 4790 nvgpu_dma_free(g, mem);
4791 memset(mem, 0, sizeof(struct nvgpu_mem)); 4791 memset(mem, 0, sizeof(struct nvgpu_mem));
4792} 4792}
4793 4793
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 3cfcbb19..181e5301 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -369,7 +369,7 @@ static int gm20b_alloc_blob_space(struct gk20a *g,
369{ 369{
370 int err; 370 int err;
371 371
372 err = gk20a_gmmu_alloc_sys(g, size, mem); 372 err = nvgpu_dma_alloc_sys(g, size, mem);
373 373
374 return err; 374 return err;
375} 375}
@@ -1115,7 +1115,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1115 err = -1; 1115 err = -1;
1116 goto err_release_acr_fw; 1116 goto err_release_acr_fw;
1117 } 1117 }
1118 err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes, 1118 err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
1119 &acr->acr_ucode); 1119 &acr->acr_ucode);
1120 if (err) { 1120 if (err) {
1121 err = -ENOMEM; 1121 err = -ENOMEM;
@@ -1171,7 +1171,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1171 } 1171 }
1172 return 0; 1172 return 0;
1173err_free_ucode_map: 1173err_free_ucode_map:
1174 gk20a_gmmu_unmap_free(vm, &acr->acr_ucode); 1174 nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
1175err_release_acr_fw: 1175err_release_acr_fw:
1176 release_firmware(acr_fw); 1176 release_firmware(acr_fw);
1177 acr->acr_fw = NULL; 1177 acr->acr_fw = NULL;
@@ -1417,7 +1417,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1417 /*TODO in code verify that enable PMU is done, 1417 /*TODO in code verify that enable PMU is done,
1418 scrubbing etc is done*/ 1418 scrubbing etc is done*/
1419 /*TODO in code verify that gmmu vm init is done*/ 1419 /*TODO in code verify that gmmu vm init is done*/
1420 err = gk20a_gmmu_alloc_flags_sys(g, 1420 err = nvgpu_dma_alloc_flags_sys(g,
1421 NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode); 1421 NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode);
1422 if (err) { 1422 if (err) {
1423 gk20a_err(d, "failed to allocate memory\n"); 1423 gk20a_err(d, "failed to allocate memory\n");
@@ -1475,7 +1475,7 @@ err_unmap_bl:
1475 gk20a_gmmu_unmap(vm, acr->hsbl_ucode.gpu_va, 1475 gk20a_gmmu_unmap(vm, acr->hsbl_ucode.gpu_va,
1476 acr->hsbl_ucode.size, gk20a_mem_flag_none); 1476 acr->hsbl_ucode.size, gk20a_mem_flag_none);
1477err_free_ucode: 1477err_free_ucode:
1478 gk20a_gmmu_free(g, &acr->hsbl_ucode); 1478 nvgpu_dma_free(g, &acr->hsbl_ucode);
1479err_done: 1479err_done:
1480 release_firmware(hsbl_fw); 1480 release_firmware(hsbl_fw);
1481 return err; 1481 return err;
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 9acc8eda..631f9891 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -113,13 +113,13 @@ static int gp106_alloc_blob_space(struct gk20a *g,
113 * Even though this mem_desc wouldn't be used, the wpr region needs to 113 * Even though this mem_desc wouldn't be used, the wpr region needs to
114 * be reserved in the allocator. 114 * be reserved in the allocator.
115 */ 115 */
116 err = gk20a_gmmu_alloc_flags_vid_at(g, 116 err = nvgpu_dma_alloc_flags_vid_at(g,
117 NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, 117 NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size,
118 &g->acr.wpr_dummy, wpr_inf.wpr_base); 118 &g->acr.wpr_dummy, wpr_inf.wpr_base);
119 if (err) 119 if (err)
120 return err; 120 return err;
121 121
122 return gk20a_gmmu_alloc_flags_vid_at(g, 122 return nvgpu_dma_alloc_flags_vid_at(g,
123 NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem, 123 NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem,
124 wpr_inf.nonwpr_base); 124 wpr_inf.nonwpr_base);
125} 125}
@@ -1094,7 +1094,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g)
1094 err = -1; 1094 err = -1;
1095 goto err_release_acr_fw; 1095 goto err_release_acr_fw;
1096 } 1096 }
1097 err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes, 1097 err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
1098 &acr->acr_ucode); 1098 &acr->acr_ucode);
1099 if (err) { 1099 if (err) {
1100 err = -ENOMEM; 1100 err = -ENOMEM;
@@ -1170,7 +1170,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g)
1170 1170
1171 return 0; 1171 return 0;
1172err_free_ucode_map: 1172err_free_ucode_map:
1173 gk20a_gmmu_unmap_free(vm, &acr->acr_ucode); 1173 nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
1174err_release_acr_fw: 1174err_release_acr_fw:
1175 release_firmware(acr_fw); 1175 release_firmware(acr_fw);
1176 acr->acr_fw = NULL; 1176 acr->acr_fw = NULL;
diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c
index 78859f88..a804f9bb 100644
--- a/drivers/gpu/nvgpu/gp106/gr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c
@@ -226,11 +226,11 @@ static int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
226 return 0; 226 return 0;
227 227
228fail_free_betacb: 228fail_free_betacb:
229 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); 229 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
230fail_free_spill: 230fail_free_spill:
231 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); 231 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
232fail_free_preempt: 232fail_free_preempt:
233 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); 233 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
234fail: 234fail:
235 return err; 235 return err;
236} 236}
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index b9367120..c1cb1376 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -839,7 +839,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
839 839
840 gk20a_dbg_fn(""); 840 gk20a_dbg_fn("");
841 841
842 err = gk20a_gmmu_alloc_sys(vm->mm->g, size, mem); 842 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
843 if (err) 843 if (err)
844 return err; 844 return err;
845 845
@@ -859,7 +859,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
859 return 0; 859 return 0;
860 860
861fail_free: 861fail_free:
862 gk20a_gmmu_free(vm->mm->g, mem); 862 nvgpu_dma_free(vm->mm->g, mem);
863 return err; 863 return err;
864} 864}
865 865
@@ -980,11 +980,11 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
980 return 0; 980 return 0;
981 981
982fail_free_betacb: 982fail_free_betacb:
983 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); 983 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
984fail_free_spill: 984fail_free_spill:
985 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); 985 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
986fail_free_preempt: 986fail_free_preempt:
987 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); 987 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
988fail: 988fail:
989 return err; 989 return err;
990} 990}
@@ -1098,10 +1098,10 @@ static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
1098 if (g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close) 1098 if (g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close)
1099 dump_ctx_switch_stats(g, vm, gr_ctx); 1099 dump_ctx_switch_stats(g, vm, gr_ctx);
1100 1100
1101 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); 1101 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
1102 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); 1102 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
1103 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); 1103 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
1104 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); 1104 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
1105 gr_gk20a_free_gr_ctx(g, vm, gr_ctx); 1105 gr_gk20a_free_gr_ctx(g, vm, gr_ctx);
1106 gk20a_dbg_fn("done"); 1106 gk20a_dbg_fn("done");
1107} 1107}
diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
index bf52b5c9..e8f3d930 100644
--- a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c
@@ -39,7 +39,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
39 gk20a_dbg_fn(""); 39 gk20a_dbg_fn("");
40 40
41 if (!g->mm.bar2_desc.gpu_va) { 41 if (!g->mm.bar2_desc.gpu_va) {
42 err = gk20a_gmmu_alloc_map_sys(vm, rbfb_size, 42 err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
43 &g->mm.bar2_desc); 43 &g->mm.bar2_desc);
44 if (err) { 44 if (err) {
45 dev_err(dev_from_gk20a(g), 45 dev_err(dev_from_gk20a(g),
@@ -63,7 +63,7 @@ void gp10b_replayable_pagefault_buffer_deinit(struct gk20a *g)
63{ 63{
64 struct vm_gk20a *vm = &g->mm.bar2.vm; 64 struct vm_gk20a *vm = &g->mm.bar2.vm;
65 65
66 gk20a_gmmu_unmap_free(vm, &g->mm.bar2_desc); 66 nvgpu_dma_unmap_free(vm, &g->mm.bar2_desc);
67} 67}
68 68
69u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g) 69u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/include/nvgpu/dma.h b/drivers/gpu/nvgpu/include/nvgpu/dma.h
index d4fad584..43cff215 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/dma.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/dma.h
@@ -24,7 +24,7 @@ struct vm_gk20a;
24struct nvgpu_mem; 24struct nvgpu_mem;
25 25
26/* 26/*
27 * Flags for the below gk20a_gmmu_{alloc,alloc_map}_flags* 27 * Flags for the below nvgpu_dma_{alloc,alloc_map}_flags*
28 */ 28 */
29 29
30/* 30/*
@@ -45,7 +45,7 @@ struct nvgpu_mem;
45#define NVGPU_DMA_READ_ONLY (1 << 2) 45#define NVGPU_DMA_READ_ONLY (1 << 2)
46 46
47/** 47/**
48 * gk20a_gmmu_alloc - Allocate DMA memory 48 * nvgpu_dma_alloc - Allocate DMA memory
49 * 49 *
50 * @g - The GPU. 50 * @g - The GPU.
51 * @size - Size of the allocation in bytes. 51 * @size - Size of the allocation in bytes.
@@ -56,10 +56,10 @@ struct nvgpu_mem;
56 * memory can be either placed in VIDMEM or SYSMEM, which ever is more 56 * memory can be either placed in VIDMEM or SYSMEM, which ever is more
57 * convenient for the driver. 57 * convenient for the driver.
58 */ 58 */
59int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem); 59int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
60 60
61/** 61/**
62 * gk20a_gmmu_alloc_flags - Allocate DMA memory 62 * nvgpu_dma_alloc_flags - Allocate DMA memory
63 * 63 *
64 * @g - The GPU. 64 * @g - The GPU.
65 * @flags - Flags modifying the operation of the DMA allocation. 65 * @flags - Flags modifying the operation of the DMA allocation.
@@ -77,11 +77,11 @@ int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
77 * %NVGPU_DMA_FORCE_CONTIGUOUS 77 * %NVGPU_DMA_FORCE_CONTIGUOUS
78 * %NVGPU_DMA_READ_ONLY 78 * %NVGPU_DMA_READ_ONLY
79 */ 79 */
80int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, 80int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
81 struct nvgpu_mem *mem); 81 struct nvgpu_mem *mem);
82 82
83/** 83/**
84 * gk20a_gmmu_alloc_sys - Allocate DMA memory 84 * nvgpu_dma_alloc_sys - Allocate DMA memory
85 * 85 *
86 * @g - The GPU. 86 * @g - The GPU.
87 * @size - Size of the allocation in bytes. 87 * @size - Size of the allocation in bytes.
@@ -91,10 +91,10 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
91 * Returns 0 on success and a suitable error code when there's an error. This 91 * Returns 0 on success and a suitable error code when there's an error. This
92 * allocates memory specifically in SYSMEM. 92 * allocates memory specifically in SYSMEM.
93 */ 93 */
94int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem); 94int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
95 95
96/** 96/**
97 * gk20a_gmmu_alloc_flags_sys - Allocate DMA memory 97 * nvgpu_dma_alloc_flags_sys - Allocate DMA memory
98 * 98 *
99 * @g - The GPU. 99 * @g - The GPU.
100 * @flags - Flags modifying the operation of the DMA allocation. 100 * @flags - Flags modifying the operation of the DMA allocation.
@@ -111,11 +111,11 @@ int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
111 * %NVGPU_DMA_FORCE_CONTIGUOUS 111 * %NVGPU_DMA_FORCE_CONTIGUOUS
112 * %NVGPU_DMA_READ_ONLY 112 * %NVGPU_DMA_READ_ONLY
113 */ 113 */
114int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, 114int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
115 size_t size, struct nvgpu_mem *mem); 115 size_t size, struct nvgpu_mem *mem);
116 116
117/** 117/**
118 * gk20a_gmmu_alloc_vid - Allocate DMA memory 118 * nvgpu_dma_alloc_vid - Allocate DMA memory
119 * 119 *
120 * @g - The GPU. 120 * @g - The GPU.
121 * @size - Size of the allocation in bytes. 121 * @size - Size of the allocation in bytes.
@@ -125,10 +125,10 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
125 * Returns 0 on success and a suitable error code when there's an error. This 125 * Returns 0 on success and a suitable error code when there's an error. This
126 * allocates memory specifically in VIDMEM. 126 * allocates memory specifically in VIDMEM.
127 */ 127 */
128int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem); 128int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
129 129
130/** 130/**
131 * gk20a_gmmu_alloc_flags_vid - Allocate DMA memory 131 * nvgpu_dma_alloc_flags_vid - Allocate DMA memory
132 * 132 *
133 * @g - The GPU. 133 * @g - The GPU.
134 * @flags - Flags modifying the operation of the DMA allocation. 134 * @flags - Flags modifying the operation of the DMA allocation.
@@ -144,11 +144,11 @@ int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
144 * %NVGPU_DMA_NO_KERNEL_MAPPING 144 * %NVGPU_DMA_NO_KERNEL_MAPPING
145 * 145 *
146 */ 146 */
147int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, 147int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
148 size_t size, struct nvgpu_mem *mem); 148 size_t size, struct nvgpu_mem *mem);
149 149
150/** 150/**
151 * gk20a_gmmu_alloc_flags_vid_at - Allocate DMA memory 151 * nvgpu_dma_alloc_flags_vid_at - Allocate DMA memory
152 * 152 *
153 * @g - The GPU. 153 * @g - The GPU.
154 * @flags - Flags modifying the operation of the DMA allocation. 154 * @flags - Flags modifying the operation of the DMA allocation.
@@ -165,29 +165,29 @@ int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
165 * 165 *
166 * %NVGPU_DMA_NO_KERNEL_MAPPING 166 * %NVGPU_DMA_NO_KERNEL_MAPPING
167 */ 167 */
168int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, 168int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
169 size_t size, struct nvgpu_mem *mem, dma_addr_t at); 169 size_t size, struct nvgpu_mem *mem, dma_addr_t at);
170 170
171/** 171/**
172 * gk20a_gmmu_free - Free a DMA allocation 172 * nvgpu_dma_free - Free a DMA allocation
173 * 173 *
174 * @g - The GPU. 174 * @g - The GPU.
175 * @mem - An allocation to free. 175 * @mem - An allocation to free.
176 * 176 *
177 * Free memory created with any of: 177 * Free memory created with any of:
178 * 178 *
179 * gk20a_gmmu_alloc() 179 * nvgpu_dma_alloc()
180 * gk20a_gmmu_alloc_flags() 180 * nvgpu_dma_alloc_flags()
181 * gk20a_gmmu_alloc_sys() 181 * nvgpu_dma_alloc_sys()
182 * gk20a_gmmu_alloc_flags_sys() 182 * nvgpu_dma_alloc_flags_sys()
183 * gk20a_gmmu_alloc_vid() 183 * nvgpu_dma_alloc_vid()
184 * gk20a_gmmu_alloc_flags_vid() 184 * nvgpu_dma_alloc_flags_vid()
185 * gk20a_gmmu_alloc_flags_vid_at() 185 * nvgpu_dma_alloc_flags_vid_at()
186 */ 186 */
187void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem); 187void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem);
188 188
189/** 189/**
190 * gk20a_gmmu_alloc_map - Allocate DMA memory and map into GMMU. 190 * nvgpu_dma_alloc_map - Allocate DMA memory and map into GMMU.
191 * 191 *
192 * @vm - VM context for GMMU mapping. 192 * @vm - VM context for GMMU mapping.
193 * @size - Size of the allocation in bytes. 193 * @size - Size of the allocation in bytes.
@@ -198,11 +198,11 @@ void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem);
198 * either placed in VIDMEM or SYSMEM, which ever is more convenient for the 198 * either placed in VIDMEM or SYSMEM, which ever is more convenient for the
199 * driver. 199 * driver.
200 */ 200 */
201int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, 201int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
202 struct nvgpu_mem *mem); 202 struct nvgpu_mem *mem);
203 203
204/** 204/**
205 * gk20a_gmmu_alloc_map_flags - Allocate DMA memory and map into GMMU. 205 * nvgpu_dma_alloc_map_flags - Allocate DMA memory and map into GMMU.
206 * 206 *
207 * @vm - VM context for GMMU mapping. 207 * @vm - VM context for GMMU mapping.
208 * @flags - Flags modifying the operation of the DMA allocation. 208 * @flags - Flags modifying the operation of the DMA allocation.
@@ -221,11 +221,11 @@ int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
221 * %NVGPU_DMA_FORCE_CONTIGUOUS 221 * %NVGPU_DMA_FORCE_CONTIGUOUS
222 * %NVGPU_DMA_READ_ONLY 222 * %NVGPU_DMA_READ_ONLY
223 */ 223 */
224int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, 224int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
225 size_t size, struct nvgpu_mem *mem); 225 size_t size, struct nvgpu_mem *mem);
226 226
227/** 227/**
228 * gk20a_gmmu_alloc_map_sys - Allocate DMA memory and map into GMMU. 228 * nvgpu_dma_alloc_map_sys - Allocate DMA memory and map into GMMU.
229 * 229 *
230 * @vm - VM context for GMMU mapping. 230 * @vm - VM context for GMMU mapping.
231 * @size - Size of the allocation in bytes. 231 * @size - Size of the allocation in bytes.
@@ -234,11 +234,11 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
234 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 234 * Allocate memory suitable for doing DMA and map that memory into the GMMU.
235 * This memory will be placed in SYSMEM. 235 * This memory will be placed in SYSMEM.
236 */ 236 */
237int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, 237int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
238 struct nvgpu_mem *mem); 238 struct nvgpu_mem *mem);
239 239
240/** 240/**
241 * gk20a_gmmu_alloc_map_flags_sys - Allocate DMA memory and map into GMMU. 241 * nvgpu_dma_alloc_map_flags_sys - Allocate DMA memory and map into GMMU.
242 * 242 *
243 * @vm - VM context for GMMU mapping. 243 * @vm - VM context for GMMU mapping.
244 * @flags - Flags modifying the operation of the DMA allocation. 244 * @flags - Flags modifying the operation of the DMA allocation.
@@ -255,11 +255,11 @@ int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
255 * %NVGPU_DMA_FORCE_CONTIGUOUS 255 * %NVGPU_DMA_FORCE_CONTIGUOUS
256 * %NVGPU_DMA_READ_ONLY 256 * %NVGPU_DMA_READ_ONLY
257 */ 257 */
258int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, 258int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
259 size_t size, struct nvgpu_mem *mem); 259 size_t size, struct nvgpu_mem *mem);
260 260
261/** 261/**
262 * gk20a_gmmu_alloc_map_vid - Allocate DMA memory and map into GMMU. 262 * nvgpu_dma_alloc_map_vid - Allocate DMA memory and map into GMMU.
263 * 263 *
264 * @vm - VM context for GMMU mapping. 264 * @vm - VM context for GMMU mapping.
265 * @size - Size of the allocation in bytes. 265 * @size - Size of the allocation in bytes.
@@ -268,11 +268,11 @@ int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
268 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 268 * Allocate memory suitable for doing DMA and map that memory into the GMMU.
269 * This memory will be placed in VIDMEM. 269 * This memory will be placed in VIDMEM.
270 */ 270 */
271int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, 271int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
272 struct nvgpu_mem *mem); 272 struct nvgpu_mem *mem);
273 273
274/** 274/**
275 * gk20a_gmmu_alloc_map_flags_vid - Allocate DMA memory and map into GMMU. 275 * nvgpu_dma_alloc_map_flags_vid - Allocate DMA memory and map into GMMU.
276 * 276 *
277 * @vm - VM context for GMMU mapping. 277 * @vm - VM context for GMMU mapping.
278 * @flags - Flags modifying the operation of the DMA allocation. 278 * @flags - Flags modifying the operation of the DMA allocation.
@@ -289,24 +289,24 @@ int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
289 * %NVGPU_DMA_FORCE_CONTIGUOUS 289 * %NVGPU_DMA_FORCE_CONTIGUOUS
290 * %NVGPU_DMA_READ_ONLY 290 * %NVGPU_DMA_READ_ONLY
291 */ 291 */
292int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, 292int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
293 size_t size, struct nvgpu_mem *mem); 293 size_t size, struct nvgpu_mem *mem);
294 294
295/** 295/**
296 * gk20a_gmmu_unmap_free - Free a DMA allocation 296 * nvgpu_dma_unmap_free - Free a DMA allocation
297 * 297 *
298 * @g - The GPU. 298 * @g - The GPU.
299 * @mem - An allocation to free. 299 * @mem - An allocation to free.
300 * 300 *
301 * Free memory created with any of: 301 * Free memory created with any of:
302 * 302 *
303 * gk20a_gmmu_alloc_map() 303 * nvgpu_dma_alloc_map()
304 * gk20a_gmmu_alloc_map_flags() 304 * nvgpu_dma_alloc_map_flags()
305 * gk20a_gmmu_alloc_map_sys() 305 * nvgpu_dma_alloc_map_sys()
306 * gk20a_gmmu_alloc_map_flags_sys() 306 * nvgpu_dma_alloc_map_flags_sys()
307 * gk20a_gmmu_alloc_map_vid() 307 * nvgpu_dma_alloc_map_vid()
308 * gk20a_gmmu_alloc_map_flags_vid() 308 * nvgpu_dma_alloc_map_flags_vid()
309 */ 309 */
310void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem); 310void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem);
311 311
312#endif 312#endif
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 59fb0c4a..e2883f7c 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -216,7 +216,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
216 216
217 runlist_size = sizeof(u16) * f->num_channels; 217 runlist_size = sizeof(u16) * f->num_channels;
218 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 218 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
219 int err = gk20a_gmmu_alloc_sys(g, runlist_size, 219 int err = nvgpu_dma_alloc_sys(g, runlist_size,
220 &runlist->mem[i]); 220 &runlist->mem[i]);
221 if (err) { 221 if (err) {
222 dev_err(d, "memory allocation failed\n"); 222 dev_err(d, "memory allocation failed\n");
@@ -260,7 +260,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
260 260
261 f->userd_entry_size = 1 << ram_userd_base_shift_v(); 261 f->userd_entry_size = 1 << ram_userd_base_shift_v();
262 262
263 err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * f->num_channels, 263 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels,
264 &f->userd); 264 &f->userd);
265 if (err) { 265 if (err) {
266 dev_err(d, "memory allocation failed\n"); 266 dev_err(d, "memory allocation failed\n");
@@ -327,7 +327,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
327clean_up: 327clean_up:
328 gk20a_dbg_fn("fail"); 328 gk20a_dbg_fn("fail");
329 /* FIXME: unmap from bar1 */ 329 /* FIXME: unmap from bar1 */
330 gk20a_gmmu_free(g, &f->userd); 330 nvgpu_dma_free(g, &f->userd);
331 331
332 memset(&f->userd, 0, sizeof(f->userd)); 332 memset(&f->userd, 0, sizeof(f->userd));
333 333
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index 527e12e4..da41abd4 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -42,10 +42,10 @@ static void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
42 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 42 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size,
43 gmmu_page_size_kernel); 43 gmmu_page_size_kernel);
44 44
45 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); 45 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
46 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); 46 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
47 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); 47 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
48 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); 48 nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
49 49
50 nvgpu_kfree(g, gr_ctx); 50 nvgpu_kfree(g, gr_ctx);
51} 51}