summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c12
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c30
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c10
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.h2
7 files changed, 32 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index e7bd0a49..6017046f 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -53,7 +53,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
53 struct nvgpu_sgt *sgt, 53 struct nvgpu_sgt *sgt,
54 u64 buffer_offset, 54 u64 buffer_offset,
55 u64 size, 55 u64 size,
56 int pgsz_idx, 56 u32 pgsz_idx,
57 u8 kind_v, 57 u8 kind_v,
58 u32 ctag_offset, 58 u32 ctag_offset,
59 u32 flags, 59 u32 flags,
@@ -147,12 +147,12 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
147 else 147 else
148 prot = TEGRA_VGPU_MAP_PROT_NONE; 148 prot = TEGRA_VGPU_MAP_PROT_NONE;
149 149
150 if (pgsz_idx == gmmu_page_size_kernel) { 150 if (pgsz_idx == GMMU_PAGE_SIZE_KERNEL) {
151 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) { 151 if (page_size == vm->gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL]) {
152 pgsz_idx = gmmu_page_size_small; 152 pgsz_idx = GMMU_PAGE_SIZE_SMALL;
153 } else if (page_size == 153 } else if (page_size ==
154 vm->gmmu_page_sizes[gmmu_page_size_big]) { 154 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
155 pgsz_idx = gmmu_page_size_big; 155 pgsz_idx = GMMU_PAGE_SIZE_BIG;
156 } else { 156 } else {
157 nvgpu_err(g, "invalid kernel page size %d", 157 nvgpu_err(g, "invalid kernel page size %d",
158 page_size); 158 page_size);
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
index 9435b75f..704c400e 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
@@ -30,7 +30,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
30 struct nvgpu_sgt *sgt, 30 struct nvgpu_sgt *sgt,
31 u64 buffer_offset, 31 u64 buffer_offset,
32 u64 size, 32 u64 size,
33 int pgsz_idx, 33 u32 pgsz_idx,
34 u8 kind_v, 34 u8 kind_v,
35 u32 ctag_offset, 35 u32 ctag_offset,
36 u32 flags, 36 u32 flags,
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 0077c537..fa64cb82 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -192,7 +192,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
192 /* Circular Buffer */ 192 /* Circular Buffer */
193 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 193 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
194 gr->global_ctx_buffer[CIRCULAR].mem.size, 194 gr->global_ctx_buffer[CIRCULAR].mem.size,
195 gmmu_page_size_kernel); 195 GMMU_PAGE_SIZE_KERNEL);
196 196
197 if (!gpu_va) 197 if (!gpu_va)
198 goto clean_up; 198 goto clean_up;
@@ -202,7 +202,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
202 /* Attribute Buffer */ 202 /* Attribute Buffer */
203 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 203 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
204 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 204 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
205 gmmu_page_size_kernel); 205 GMMU_PAGE_SIZE_KERNEL);
206 206
207 if (!gpu_va) 207 if (!gpu_va)
208 goto clean_up; 208 goto clean_up;
@@ -212,7 +212,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
212 /* Page Pool */ 212 /* Page Pool */
213 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 213 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
214 gr->global_ctx_buffer[PAGEPOOL].mem.size, 214 gr->global_ctx_buffer[PAGEPOOL].mem.size,
215 gmmu_page_size_kernel); 215 GMMU_PAGE_SIZE_KERNEL);
216 if (!gpu_va) 216 if (!gpu_va)
217 goto clean_up; 217 goto clean_up;
218 g_bfr_va[PAGEPOOL_VA] = gpu_va; 218 g_bfr_va[PAGEPOOL_VA] = gpu_va;
@@ -221,7 +221,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
221 /* Priv register Access Map */ 221 /* Priv register Access Map */
222 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 222 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
223 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 223 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
224 gmmu_page_size_kernel); 224 GMMU_PAGE_SIZE_KERNEL);
225 if (!gpu_va) 225 if (!gpu_va)
226 goto clean_up; 226 goto clean_up;
227 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 227 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
@@ -232,7 +232,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
232#ifdef CONFIG_GK20A_CTXSW_TRACE 232#ifdef CONFIG_GK20A_CTXSW_TRACE
233 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 233 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
234 gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size, 234 gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size,
235 gmmu_page_size_kernel); 235 GMMU_PAGE_SIZE_KERNEL);
236 236
237 if (!gpu_va) 237 if (!gpu_va)
238 goto clean_up; 238 goto clean_up;
@@ -262,7 +262,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
262 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 262 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
263 if (g_bfr_va[i]) { 263 if (g_bfr_va[i]) {
264 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], 264 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
265 gmmu_page_size_kernel); 265 GMMU_PAGE_SIZE_KERNEL);
266 g_bfr_va[i] = 0; 266 g_bfr_va[i] = 0;
267 } 267 }
268 } 268 }
@@ -285,7 +285,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
285 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 285 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
286 if (g_bfr_va[i]) { 286 if (g_bfr_va[i]) {
287 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], 287 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
288 gmmu_page_size_kernel); 288 GMMU_PAGE_SIZE_KERNEL);
289 g_bfr_va[i] = 0; 289 g_bfr_va[i] = 0;
290 g_bfr_size[i] = 0; 290 g_bfr_size[i] = 0;
291 } 291 }
@@ -317,7 +317,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
317 317
318 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm, 318 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
319 gr->ctx_vars.buffer_total_size, 319 gr->ctx_vars.buffer_total_size,
320 gmmu_page_size_kernel); 320 GMMU_PAGE_SIZE_KERNEL);
321 321
322 if (!gr_ctx->mem.gpu_va) 322 if (!gr_ctx->mem.gpu_va)
323 return -ENOMEM; 323 return -ENOMEM;
@@ -336,7 +336,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
336 if (unlikely(err)) { 336 if (unlikely(err)) {
337 nvgpu_err(g, "fail to alloc gr_ctx"); 337 nvgpu_err(g, "fail to alloc gr_ctx");
338 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, 338 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
339 gmmu_page_size_kernel); 339 GMMU_PAGE_SIZE_KERNEL);
340 gr_ctx->mem.aperture = APERTURE_INVALID; 340 gr_ctx->mem.aperture = APERTURE_INVALID;
341 } else { 341 } else {
342 gr_ctx->virt_ctx = p->gr_ctx_handle; 342 gr_ctx->virt_ctx = p->gr_ctx_handle;
@@ -365,7 +365,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
365 patch_ctx->mem.size = 128 * sizeof(u32); 365 patch_ctx->mem.size = 128 * sizeof(u32);
366 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, 366 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
367 patch_ctx->mem.size, 367 patch_ctx->mem.size,
368 gmmu_page_size_kernel); 368 GMMU_PAGE_SIZE_KERNEL);
369 if (!patch_ctx->mem.gpu_va) 369 if (!patch_ctx->mem.gpu_va)
370 return -ENOMEM; 370 return -ENOMEM;
371 371
@@ -376,7 +376,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
376 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 376 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
377 if (err || msg.ret) { 377 if (err || msg.ret) {
378 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 378 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
379 gmmu_page_size_kernel); 379 GMMU_PAGE_SIZE_KERNEL);
380 err = -ENOMEM; 380 err = -ENOMEM;
381 } 381 }
382 382
@@ -394,7 +394,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
394 /* server will free on channel close */ 394 /* server will free on channel close */
395 395
396 __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va, 396 __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va,
397 gmmu_page_size_kernel); 397 GMMU_PAGE_SIZE_KERNEL);
398 patch_ctx->mem.gpu_va = 0; 398 patch_ctx->mem.gpu_va = 0;
399 } 399 }
400} 400}
@@ -414,7 +414,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
414 /* server will free on channel close */ 414 /* server will free on channel close */
415 415
416 __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va, 416 __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va,
417 gmmu_page_size_kernel); 417 GMMU_PAGE_SIZE_KERNEL);
418 pm_ctx->mem.gpu_va = 0; 418 pm_ctx->mem.gpu_va = 0;
419} 419}
420 420
@@ -437,7 +437,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
437 WARN_ON(err || msg.ret); 437 WARN_ON(err || msg.ret);
438 438
439 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, 439 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
440 gmmu_page_size_kernel); 440 GMMU_PAGE_SIZE_KERNEL);
441 441
442 tsg = &g->fifo.tsg[gr_ctx->tsgid]; 442 tsg = &g->fifo.tsg[gr_ctx->tsgid];
443 vgpu_gr_unmap_global_ctx_buffers(tsg); 443 vgpu_gr_unmap_global_ctx_buffers(tsg);
@@ -1120,7 +1120,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1120 if (pm_ctx->mem.gpu_va == 0) { 1120 if (pm_ctx->mem.gpu_va == 0) {
1121 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm, 1121 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
1122 g->gr.ctx_vars.pm_ctxsw_image_size, 1122 g->gr.ctx_vars.pm_ctxsw_image_size,
1123 gmmu_page_size_kernel); 1123 GMMU_PAGE_SIZE_KERNEL);
1124 1124
1125 if (!pm_ctx->mem.gpu_va) 1125 if (!pm_ctx->mem.gpu_va)
1126 return -ENOMEM; 1126 return -ENOMEM;
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
index e718a30d..43cff1c0 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
@@ -43,7 +43,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
43 43
44 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm, 44 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm,
45 g->syncpt_unit_size, 45 g->syncpt_unit_size,
46 gmmu_page_size_kernel); 46 GMMU_PAGE_SIZE_KERNEL);
47 if (!vm->syncpt_ro_map_gpu_va) { 47 if (!vm->syncpt_ro_map_gpu_va) {
48 nvgpu_err(g, "allocating read-only va space failed"); 48 nvgpu_err(g, "allocating read-only va space failed");
49 return -ENOMEM; 49 return -ENOMEM;
@@ -63,7 +63,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
63 "mapping read-only va space failed err %d", 63 "mapping read-only va space failed err %d",
64 err); 64 err);
65 __nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va, 65 __nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va,
66 gmmu_page_size_kernel); 66 GMMU_PAGE_SIZE_KERNEL);
67 vm->syncpt_ro_map_gpu_va = 0; 67 vm->syncpt_ro_map_gpu_va = 0;
68 return err; 68 return err;
69 } 69 }
@@ -91,7 +91,7 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
91 return err; 91 return err;
92 92
93 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size, 93 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
94 gmmu_page_size_kernel); 94 GMMU_PAGE_SIZE_KERNEL);
95 if (!syncpt_buf->gpu_va) { 95 if (!syncpt_buf->gpu_va) {
96 nvgpu_err(g, "allocating syncpt va space failed"); 96 nvgpu_err(g, "allocating syncpt va space failed");
97 return -ENOMEM; 97 return -ENOMEM;
@@ -110,7 +110,7 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
110 if (err) { 110 if (err) {
111 nvgpu_err(g, "mapping syncpt va space failed err %d", err); 111 nvgpu_err(g, "mapping syncpt va space failed err %d", err);
112 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, 112 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va,
113 gmmu_page_size_kernel); 113 GMMU_PAGE_SIZE_KERNEL);
114 return err; 114 return err;
115 } 115 }
116 116
@@ -121,7 +121,7 @@ void vgpu_gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c,
121 struct nvgpu_mem *syncpt_buf) 121 struct nvgpu_mem *syncpt_buf)
122{ 122{
123 nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va); 123 nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va);
124 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, gmmu_page_size_kernel); 124 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, GMMU_PAGE_SIZE_KERNEL);
125 nvgpu_dma_free(c->g, syncpt_buf); 125 nvgpu_dma_free(c->g, syncpt_buf);
126} 126}
127 127
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
index 2372b9c4..b536d15e 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
@@ -40,7 +40,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
40 p->ch_handle = c->virt_ctx; 40 p->ch_handle = c->virt_ctx;
41 p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm, 41 p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm,
42 ctxsw_prog_fecs_header_v(), 42 ctxsw_prog_fecs_header_v(),
43 gmmu_page_size_kernel); 43 GMMU_PAGE_SIZE_KERNEL);
44 if (!p->ctx_header_va) { 44 if (!p->ctx_header_va) {
45 nvgpu_err(c->g, "alloc va failed for ctx_header"); 45 nvgpu_err(c->g, "alloc va failed for ctx_header");
46 return -ENOMEM; 46 return -ENOMEM;
@@ -50,7 +50,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
50 if (unlikely(err)) { 50 if (unlikely(err)) {
51 nvgpu_err(c->g, "alloc ctx_header failed err %d", err); 51 nvgpu_err(c->g, "alloc ctx_header failed err %d", err);
52 __nvgpu_vm_free_va(c->vm, p->ctx_header_va, 52 __nvgpu_vm_free_va(c->vm, p->ctx_header_va,
53 gmmu_page_size_kernel); 53 GMMU_PAGE_SIZE_KERNEL);
54 return err; 54 return err;
55 } 55 }
56 ctx->mem.gpu_va = p->ctx_header_va; 56 ctx->mem.gpu_va = p->ctx_header_va;
@@ -75,7 +75,7 @@ void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c)
75 if (unlikely(err)) 75 if (unlikely(err))
76 nvgpu_err(c->g, "free ctx_header failed err %d", err); 76 nvgpu_err(c->g, "free ctx_header failed err %d", err);
77 __nvgpu_vm_free_va(c->vm, ctx->mem.gpu_va, 77 __nvgpu_vm_free_va(c->vm, ctx->mem.gpu_va,
78 gmmu_page_size_kernel); 78 GMMU_PAGE_SIZE_KERNEL);
79 ctx->mem.gpu_va = 0; 79 ctx->mem.gpu_va = 0;
80 } 80 }
81} 81}
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 54b1e7c2..229a9767 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -84,7 +84,7 @@ int vgpu_init_mm_support(struct gk20a *g)
84void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, 84void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
85 u64 vaddr, 85 u64 vaddr,
86 u64 size, 86 u64 size,
87 int pgsz_idx, 87 u32 pgsz_idx,
88 bool va_allocated, 88 bool va_allocated,
89 enum gk20a_mem_rw_flag rw_flag, 89 enum gk20a_mem_rw_flag rw_flag,
90 bool sparse, 90 bool sparse,
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.h b/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
index e8f40d5c..41bae96d 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
@@ -33,7 +33,7 @@ enum gk20a_mem_rw_flag;
33void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, 33void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
34 u64 vaddr, 34 u64 vaddr,
35 u64 size, 35 u64 size,
36 int pgsz_idx, 36 u32 pgsz_idx,
37 bool va_allocated, 37 bool va_allocated,
38 enum gk20a_mem_rw_flag rw_flag, 38 enum gk20a_mem_rw_flag rw_flag,
39 bool sparse, 39 bool sparse,