summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-03-20 15:59:09 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 22:17:24 -0400
commit1b7b271980094637cf34a9d8ad14cb36f2c36363 (patch)
treea897b5d9a013c14866f938673da6a8d8fe80ef51 /drivers
parent21f1396d1c80010470e0f071fabe84279b3aebae (diff)
gpu: nvgpu: Use common allocator for context
Reduce amount of duplicate code around memory allocation by using common helpers, and common data structure for storing results of allocations. Bug 1605769 Change-Id: I10c226e2377aa867a5cf11be61d08a9d67206b1d Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/720507
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c184
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h17
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c30
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c45
6 files changed, 110 insertions, 172 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 1e33a970..86a069b2 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -713,8 +713,8 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
713 713
714 gk20a_dbg_fn(""); 714 gk20a_dbg_fn("");
715 715
716 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 716 ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
717 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 717 PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
718 0, pgprot_writecombine(PAGE_KERNEL)); 718 0, pgprot_writecombine(PAGE_KERNEL));
719 if (!ctx_ptr) 719 if (!ctx_ptr)
720 return -ENOMEM; 720 return -ENOMEM;
@@ -857,7 +857,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
857 (u64_hi32(ch_ctx->global_ctx_buffer_va[PAGEPOOL_VA]) << 857 (u64_hi32(ch_ctx->global_ctx_buffer_va[PAGEPOOL_VA]) <<
858 (32 - gr_scc_pagepool_base_addr_39_8_align_bits_v())); 858 (32 - gr_scc_pagepool_base_addr_39_8_align_bits_v()));
859 859
860 size = gr->global_ctx_buffer[PAGEPOOL].size / 860 size = gr->global_ctx_buffer[PAGEPOOL].mem.size /
861 gr_scc_pagepool_total_pages_byte_granularity_v(); 861 gr_scc_pagepool_total_pages_byte_granularity_v();
862 862
863 if (size == g->ops.gr.pagepool_default_size(g)) 863 if (size == g->ops.gr.pagepool_default_size(g))
@@ -1490,14 +1490,14 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1490 if (err) 1490 if (err)
1491 goto clean_up; 1491 goto clean_up;
1492 1492
1493 gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].pages, 1493 gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].mem.pages,
1494 PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].size) >> 1494 PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].mem.size) >>
1495 PAGE_SHIFT, 0, pgprot_writecombine(PAGE_KERNEL)); 1495 PAGE_SHIFT, 0, pgprot_writecombine(PAGE_KERNEL));
1496 if (!gold_ptr) 1496 if (!gold_ptr)
1497 goto clean_up; 1497 goto clean_up;
1498 1498
1499 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 1499 ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
1500 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 1500 PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
1501 0, pgprot_writecombine(PAGE_KERNEL)); 1501 0, pgprot_writecombine(PAGE_KERNEL));
1502 if (!ctx_ptr) 1502 if (!ctx_ptr)
1503 goto clean_up; 1503 goto clean_up;
@@ -1536,7 +1536,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1536 gk20a_mem_rd32(gold_ptr, i); 1536 gk20a_mem_rd32(gold_ptr, i);
1537 } 1537 }
1538 1538
1539 gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->gpu_va); 1539 gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
1540 1540
1541 gr->ctx_vars.golden_image_initialized = true; 1541 gr->ctx_vars.golden_image_initialized = true;
1542 1542
@@ -1570,8 +1570,8 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1570 Flush and invalidate before cpu update. */ 1570 Flush and invalidate before cpu update. */
1571 g->ops.mm.l2_flush(g, true); 1571 g->ops.mm.l2_flush(g, true);
1572 1572
1573 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 1573 ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
1574 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 1574 PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
1575 0, pgprot_writecombine(PAGE_KERNEL)); 1575 0, pgprot_writecombine(PAGE_KERNEL));
1576 if (!ctx_ptr) 1576 if (!ctx_ptr)
1577 return -ENOMEM; 1577 return -ENOMEM;
@@ -1610,8 +1610,8 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
1610 Flush and invalidate before cpu update. */ 1610 Flush and invalidate before cpu update. */
1611 g->ops.mm.l2_flush(g, true); 1611 g->ops.mm.l2_flush(g, true);
1612 1612
1613 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 1613 ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
1614 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 1614 PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
1615 0, pgprot_writecombine(PAGE_KERNEL)); 1615 0, pgprot_writecombine(PAGE_KERNEL));
1616 if (!ctx_ptr) 1616 if (!ctx_ptr)
1617 return -ENOMEM; 1617 return -ENOMEM;
@@ -2207,50 +2207,26 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2207 return 0; 2207 return 0;
2208} 2208}
2209 2209
2210static void gk20a_gr_destroy_ctx_buffer(struct platform_device *pdev, 2210static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
2211 struct gr_ctx_buffer_desc *desc) 2211 struct gr_ctx_buffer_desc *desc)
2212{ 2212{
2213 struct device *dev = &pdev->dev;
2214 if (!desc) 2213 if (!desc)
2215 return; 2214 return;
2216 if (desc->sgt) { 2215 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &desc->mem);
2217 gk20a_free_sgtable(&desc->sgt);
2218 desc->sgt = NULL;
2219 }
2220 if (desc->pages) {
2221 dma_free_attrs(dev, desc->size, desc->pages,
2222 desc->iova, &desc->attrs);
2223 desc->pages = NULL;
2224 }
2225} 2216}
2226 2217
2227static int gk20a_gr_alloc_ctx_buffer(struct platform_device *pdev, 2218static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
2228 struct gr_ctx_buffer_desc *desc, 2219 struct gr_ctx_buffer_desc *desc,
2229 size_t size) 2220 size_t size)
2230{ 2221{
2231 struct device *dev = &pdev->dev;
2232 DEFINE_DMA_ATTRS(attrs);
2233 dma_addr_t iova;
2234 int err = 0; 2222 int err = 0;
2235 2223
2236 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 2224 err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
2237 2225 size, &desc->mem);
2238 desc->pages = dma_alloc_attrs(&pdev->dev, size, &iova, 2226 if (err)
2239 GFP_KERNEL, &attrs); 2227 return err;
2240 if (!desc->pages)
2241 return -ENOMEM;
2242 2228
2243 desc->iova = iova;
2244 desc->size = size;
2245 desc->attrs = attrs;
2246 desc->destroy = gk20a_gr_destroy_ctx_buffer; 2229 desc->destroy = gk20a_gr_destroy_ctx_buffer;
2247 err = gk20a_get_sgtable_from_pages(&pdev->dev, &desc->sgt, desc->pages,
2248 desc->iova, desc->size);
2249 if (err) {
2250 dma_free_attrs(dev, desc->size, desc->pages,
2251 desc->iova, &desc->attrs);
2252 memset(desc, 0, sizeof(*desc));
2253 }
2254 2230
2255 return err; 2231 return err;
2256} 2232}
@@ -2274,7 +2250,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2274 2250
2275 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 2251 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
2276 2252
2277 err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[CIRCULAR], 2253 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR],
2278 cb_buffer_size); 2254 cb_buffer_size);
2279 if (err) 2255 if (err)
2280 goto clean_up; 2256 goto clean_up;
@@ -2286,7 +2262,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2286 2262
2287 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 2263 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
2288 2264
2289 err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[PAGEPOOL], 2265 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL],
2290 pagepool_buffer_size); 2266 pagepool_buffer_size);
2291 if (err) 2267 if (err)
2292 goto clean_up; 2268 goto clean_up;
@@ -2298,7 +2274,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2298 2274
2299 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 2275 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
2300 2276
2301 err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[ATTRIBUTE], 2277 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE],
2302 attr_buffer_size); 2278 attr_buffer_size);
2303 if (err) 2279 if (err)
2304 goto clean_up; 2280 goto clean_up;
@@ -2314,7 +2290,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2314 gk20a_dbg_info("golden_image_size : %d", 2290 gk20a_dbg_info("golden_image_size : %d",
2315 gr->ctx_vars.golden_image_size); 2291 gr->ctx_vars.golden_image_size);
2316 2292
2317 err = gk20a_gr_alloc_ctx_buffer(pdev, 2293 err = gk20a_gr_alloc_ctx_buffer(g,
2318 &gr->global_ctx_buffer[GOLDEN_CTX], 2294 &gr->global_ctx_buffer[GOLDEN_CTX],
2319 gr->ctx_vars.golden_image_size); 2295 gr->ctx_vars.golden_image_size);
2320 if (err) 2296 if (err)
@@ -2323,7 +2299,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2323 gk20a_dbg_info("priv_access_map_size : %d", 2299 gk20a_dbg_info("priv_access_map_size : %d",
2324 gr->ctx_vars.priv_access_map_size); 2300 gr->ctx_vars.priv_access_map_size);
2325 2301
2326 err = gk20a_gr_alloc_ctx_buffer(pdev, 2302 err = gk20a_gr_alloc_ctx_buffer(g,
2327 &gr->global_ctx_buffer[PRIV_ACCESS_MAP], 2303 &gr->global_ctx_buffer[PRIV_ACCESS_MAP],
2328 gr->ctx_vars.priv_access_map_size); 2304 gr->ctx_vars.priv_access_map_size);
2329 2305
@@ -2337,7 +2313,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2337 gk20a_err(dev_from_gk20a(g), "fail"); 2313 gk20a_err(dev_from_gk20a(g), "fail");
2338 for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) { 2314 for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
2339 if (gr->global_ctx_buffer[i].destroy) { 2315 if (gr->global_ctx_buffer[i].destroy) {
2340 gr->global_ctx_buffer[i].destroy(pdev, 2316 gr->global_ctx_buffer[i].destroy(g,
2341 &gr->global_ctx_buffer[i]); 2317 &gr->global_ctx_buffer[i]);
2342 } 2318 }
2343 } 2319 }
@@ -2346,7 +2322,6 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2346 2322
2347static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g) 2323static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
2348{ 2324{
2349 struct platform_device *pdev = g->dev;
2350 struct gr_gk20a *gr = &g->gr; 2325 struct gr_gk20a *gr = &g->gr;
2351 DEFINE_DMA_ATTRS(attrs); 2326 DEFINE_DMA_ATTRS(attrs);
2352 u32 i; 2327 u32 i;
@@ -2354,7 +2329,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
2354 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 2329 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
2355 2330
2356 for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) { 2331 for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
2357 gr->global_ctx_buffer[i].destroy(pdev, 2332 gr->global_ctx_buffer[i].destroy(g,
2358 &gr->global_ctx_buffer[i]); 2333 &gr->global_ctx_buffer[i]);
2359 } 2334 }
2360 2335
@@ -2375,12 +2350,12 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2375 gk20a_dbg_fn(""); 2350 gk20a_dbg_fn("");
2376 2351
2377 /* Circular Buffer */ 2352 /* Circular Buffer */
2378 if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].sgt == NULL)) { 2353 if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt == NULL)) {
2379 sgt = gr->global_ctx_buffer[CIRCULAR].sgt; 2354 sgt = gr->global_ctx_buffer[CIRCULAR].mem.sgt;
2380 size = gr->global_ctx_buffer[CIRCULAR].size; 2355 size = gr->global_ctx_buffer[CIRCULAR].mem.size;
2381 } else { 2356 } else {
2382 sgt = gr->global_ctx_buffer[CIRCULAR_VPR].sgt; 2357 sgt = gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt;
2383 size = gr->global_ctx_buffer[CIRCULAR_VPR].size; 2358 size = gr->global_ctx_buffer[CIRCULAR_VPR].mem.size;
2384 } 2359 }
2385 2360
2386 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2361 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
@@ -2392,12 +2367,12 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2392 g_bfr_size[CIRCULAR_VA] = size; 2367 g_bfr_size[CIRCULAR_VA] = size;
2393 2368
2394 /* Attribute Buffer */ 2369 /* Attribute Buffer */
2395 if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt == NULL)) { 2370 if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt == NULL)) {
2396 sgt = gr->global_ctx_buffer[ATTRIBUTE].sgt; 2371 sgt = gr->global_ctx_buffer[ATTRIBUTE].mem.sgt;
2397 size = gr->global_ctx_buffer[ATTRIBUTE].size; 2372 size = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
2398 } else { 2373 } else {
2399 sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt; 2374 sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt;
2400 size = gr->global_ctx_buffer[ATTRIBUTE_VPR].size; 2375 size = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.size;
2401 } 2376 }
2402 2377
2403 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2378 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
@@ -2409,12 +2384,12 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2409 g_bfr_size[ATTRIBUTE_VA] = size; 2384 g_bfr_size[ATTRIBUTE_VA] = size;
2410 2385
2411 /* Page Pool */ 2386 /* Page Pool */
2412 if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].sgt == NULL)) { 2387 if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt == NULL)) {
2413 sgt = gr->global_ctx_buffer[PAGEPOOL].sgt; 2388 sgt = gr->global_ctx_buffer[PAGEPOOL].mem.sgt;
2414 size = gr->global_ctx_buffer[PAGEPOOL].size; 2389 size = gr->global_ctx_buffer[PAGEPOOL].mem.size;
2415 } else { 2390 } else {
2416 sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].sgt; 2391 sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt;
2417 size = gr->global_ctx_buffer[PAGEPOOL_VPR].size; 2392 size = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.size;
2418 } 2393 }
2419 2394
2420 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2395 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
@@ -2426,8 +2401,8 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2426 g_bfr_size[PAGEPOOL_VA] = size; 2401 g_bfr_size[PAGEPOOL_VA] = size;
2427 2402
2428 /* Golden Image */ 2403 /* Golden Image */
2429 sgt = gr->global_ctx_buffer[GOLDEN_CTX].sgt; 2404 sgt = gr->global_ctx_buffer[GOLDEN_CTX].mem.sgt;
2430 size = gr->global_ctx_buffer[GOLDEN_CTX].size; 2405 size = gr->global_ctx_buffer[GOLDEN_CTX].mem.size;
2431 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0, 2406 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
2432 gk20a_mem_flag_none); 2407 gk20a_mem_flag_none);
2433 if (!gpu_va) 2408 if (!gpu_va)
@@ -2436,8 +2411,8 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2436 g_bfr_size[GOLDEN_CTX_VA] = size; 2411 g_bfr_size[GOLDEN_CTX_VA] = size;
2437 2412
2438 /* Priv register Access Map */ 2413 /* Priv register Access Map */
2439 sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].sgt; 2414 sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.sgt;
2440 size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].size; 2415 size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
2441 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0, 2416 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
2442 gk20a_mem_flag_none); 2417 gk20a_mem_flag_none);
2443 if (!gpu_va) 2418 if (!gpu_va)
@@ -2452,7 +2427,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2452 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 2427 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
2453 if (g_bfr_va[i]) { 2428 if (g_bfr_va[i]) {
2454 gk20a_gmmu_unmap(ch_vm, g_bfr_va[i], 2429 gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
2455 gr->global_ctx_buffer[i].size, 2430 gr->global_ctx_buffer[i].mem.size,
2456 gk20a_mem_flag_none); 2431 gk20a_mem_flag_none);
2457 g_bfr_va[i] = 0; 2432 g_bfr_va[i] = 0;
2458 } 2433 }
@@ -2488,11 +2463,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2488{ 2463{
2489 struct gr_ctx_desc *gr_ctx = NULL; 2464 struct gr_ctx_desc *gr_ctx = NULL;
2490 struct gr_gk20a *gr = &g->gr; 2465 struct gr_gk20a *gr = &g->gr;
2491 struct device *d = dev_from_gk20a(g);
2492 struct sg_table *sgt;
2493 DEFINE_DMA_ATTRS(attrs);
2494 int err = 0; 2466 int err = 0;
2495 dma_addr_t iova;
2496 2467
2497 gk20a_dbg_fn(""); 2468 gk20a_dbg_fn("");
2498 2469
@@ -2507,40 +2478,24 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2507 if (!gr_ctx) 2478 if (!gr_ctx)
2508 return -ENOMEM; 2479 return -ENOMEM;
2509 2480
2510 gr_ctx->size = gr->ctx_vars.buffer_total_size; 2481 err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
2511 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 2482 gr->ctx_vars.buffer_total_size,
2512 gr_ctx->pages = dma_alloc_attrs(d, gr_ctx->size, 2483 &gr_ctx->mem);
2513 &iova, GFP_KERNEL, &attrs);
2514 if (!gr_ctx->pages) {
2515 err = -ENOMEM;
2516 goto err_free_ctx;
2517 }
2518
2519 gr_ctx->iova = iova;
2520 err = gk20a_get_sgtable_from_pages(d, &sgt, gr_ctx->pages,
2521 gr_ctx->iova, gr_ctx->size);
2522 if (err) 2484 if (err)
2523 goto err_free; 2485 goto err_free_ctx;
2524
2525 gr_ctx->gpu_va = gk20a_gmmu_map(vm, &sgt, gr_ctx->size,
2526 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2527 gk20a_mem_flag_none);
2528 if (!gr_ctx->gpu_va)
2529 goto err_free_sgt;
2530 2486
2531 gk20a_free_sgtable(&sgt); 2487 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size,
2488 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2489 gk20a_mem_flag_none);
2490 if (!gr_ctx->mem.gpu_va)
2491 goto err_free_mem;
2532 2492
2533 *__gr_ctx = gr_ctx; 2493 *__gr_ctx = gr_ctx;
2534 2494
2535 return 0; 2495 return 0;
2536 2496
2537 err_free_sgt: 2497 err_free_mem:
2538 gk20a_free_sgtable(&sgt); 2498 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem);
2539 err_free:
2540 dma_free_attrs(d, gr_ctx->size,
2541 gr_ctx->pages, gr_ctx->iova, &attrs);
2542 gr_ctx->pages = NULL;
2543 gr_ctx->iova = 0;
2544 err_free_ctx: 2499 err_free_ctx:
2545 kfree(gr_ctx); 2500 kfree(gr_ctx);
2546 gr_ctx = NULL; 2501 gr_ctx = NULL;
@@ -2582,21 +2537,14 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
2582void gr_gk20a_free_gr_ctx(struct gk20a *g, 2537void gr_gk20a_free_gr_ctx(struct gk20a *g,
2583 struct vm_gk20a *vm, struct gr_ctx_desc *gr_ctx) 2538 struct vm_gk20a *vm, struct gr_ctx_desc *gr_ctx)
2584{ 2539{
2585 struct device *d = dev_from_gk20a(g);
2586 DEFINE_DMA_ATTRS(attrs);
2587
2588 gk20a_dbg_fn(""); 2540 gk20a_dbg_fn("");
2589 2541
2590 if (!gr_ctx || !gr_ctx->gpu_va) 2542 if (!gr_ctx || !gr_ctx->mem.gpu_va)
2591 return; 2543 return;
2592 2544
2593 gk20a_gmmu_unmap(vm, gr_ctx->gpu_va, 2545 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
2594 gr_ctx->size, gk20a_mem_flag_none); 2546 gr_ctx->mem.size, gk20a_mem_flag_none);
2595 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 2547 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem);
2596 dma_free_attrs(d, gr_ctx->size,
2597 gr_ctx->pages, gr_ctx->iova, &attrs);
2598 gr_ctx->pages = NULL;
2599 gr_ctx->iova = 0;
2600 kfree(gr_ctx); 2548 kfree(gr_ctx);
2601} 2549}
2602 2550
@@ -2801,7 +2749,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2801 } 2749 }
2802 2750
2803 /* commit gr ctx buffer */ 2751 /* commit gr ctx buffer */
2804 err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->gpu_va); 2752 err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
2805 if (err) { 2753 if (err) {
2806 gk20a_err(dev_from_gk20a(g), 2754 gk20a_err(dev_from_gk20a(g),
2807 "fail to commit gr ctx buffer"); 2755 "fail to commit gr ctx buffer");
@@ -4449,8 +4397,8 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
4449 DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size, 4397 DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
4450 PAGE_SIZE); 4398 PAGE_SIZE);
4451 4399
4452 data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].pages, 4400 data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.pages,
4453 PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].size) >> 4401 PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size) >>
4454 PAGE_SHIFT, 0, pgprot_writecombine(PAGE_KERNEL)); 4402 PAGE_SHIFT, 0, pgprot_writecombine(PAGE_KERNEL));
4455 if (!data) { 4403 if (!data) {
4456 gk20a_err(dev_from_gk20a(g), 4404 gk20a_err(dev_from_gk20a(g),
@@ -6851,8 +6799,8 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
6851 6799
6852 /* would have been a variant of gr_gk20a_apply_instmem_overrides */ 6800 /* would have been a variant of gr_gk20a_apply_instmem_overrides */
6853 /* recoded in-place instead.*/ 6801 /* recoded in-place instead.*/
6854 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 6802 ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
6855 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 6803 PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
6856 0, pgprot_writecombine(PAGE_KERNEL)); 6804 0, pgprot_writecombine(PAGE_KERNEL));
6857 if (!ctx_ptr) { 6805 if (!ctx_ptr) {
6858 err = -ENOMEM; 6806 err = -ENOMEM;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index e2e5fdd7..c86fee45 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1581,7 +1581,7 @@ void gk20a_gmmu_free_attr(struct gk20a *g, enum dma_attr attr,
1581{ 1581{
1582 struct device *d = dev_from_gk20a(g); 1582 struct device *d = dev_from_gk20a(g);
1583 1583
1584 if (mem->cpu_va) { 1584 if (mem->cpu_va || mem->pages) {
1585 if (attr) { 1585 if (attr) {
1586 DEFINE_DMA_ATTRS(attrs); 1586 DEFINE_DMA_ATTRS(attrs);
1587 dma_set_attr(attr, &attrs); 1587 dma_set_attr(attr, &attrs);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index b510b472..91659537 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -82,15 +82,10 @@ struct zcull_ctx_desc {
82 u32 ctx_sw_mode; 82 u32 ctx_sw_mode;
83}; 83};
84 84
85struct gr_ctx_buffer_desc; 85struct gk20a;
86struct platform_device;
87struct gr_ctx_buffer_desc { 86struct gr_ctx_buffer_desc {
88 void (*destroy)(struct platform_device *, struct gr_ctx_buffer_desc *); 87 void (*destroy)(struct gk20a *, struct gr_ctx_buffer_desc *);
89 struct sg_table *sgt; 88 struct mem_desc mem;
90 struct page **pages;
91 size_t size;
92 u64 iova;
93 struct dma_attrs attrs;
94 void *priv; 89 void *priv;
95}; 90};
96 91
@@ -99,10 +94,8 @@ struct gr_ctx_buffer_desc {
99#endif 94#endif
100 95
101struct gr_ctx_desc { 96struct gr_ctx_desc {
102 struct page **pages; 97 struct mem_desc mem;
103 u64 iova; 98
104 size_t size;
105 u64 gpu_va;
106 int preempt_mode; 99 int preempt_mode;
107#ifdef CONFIG_ARCH_TEGRA_18x_SOC 100#ifdef CONFIG_ARCH_TEGRA_18x_SOC
108 struct gr_ctx_desc_t18x t18x; 101 struct gr_ctx_desc_t18x t18x;
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
index fea2c774..126f9633 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
@@ -80,19 +80,18 @@ static int gk20a_tegra_secure_page_alloc(struct platform_device *pdev)
80 return 0; 80 return 0;
81} 81}
82 82
83static void gk20a_tegra_secure_destroy(struct platform_device *pdev, 83static void gk20a_tegra_secure_destroy(struct gk20a *g,
84 struct gr_ctx_buffer_desc *desc) 84 struct gr_ctx_buffer_desc *desc)
85{ 85{
86 if (desc->sgt) { 86 DEFINE_DMA_ATTRS(attrs);
87 gk20a_free_sgtable(&desc->sgt);
88 desc->sgt = NULL;
89 }
90 87
91 if (desc->iova) { 88 if (desc->mem.sgt) {
92 dma_free_attrs(&tegra_vpr_dev, desc->size, 89 phys_addr_t pa = sg_phys(desc->mem.sgt->sgl);
93 (void *)(uintptr_t)desc->iova, 90 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
94 desc->iova, &desc->attrs); 91 (void *)(uintptr_t)pa,
95 desc->iova = 0; 92 pa, &attrs);
93 gk20a_free_sgtable(&desc->mem.sgt);
94 desc->mem.sgt = NULL;
96 } 95 }
97} 96}
98 97
@@ -116,9 +115,7 @@ static int gk20a_tegra_secure_alloc(struct platform_device *pdev,
116 if (dma_mapping_error(&tegra_vpr_dev, iova)) 115 if (dma_mapping_error(&tegra_vpr_dev, iova))
117 return -ENOMEM; 116 return -ENOMEM;
118 117
119 desc->iova = iova; 118 desc->mem.size = size;
120 desc->size = size;
121 desc->attrs = attrs;
122 desc->destroy = gk20a_tegra_secure_destroy; 119 desc->destroy = gk20a_tegra_secure_destroy;
123 120
124 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 121 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
@@ -136,16 +133,15 @@ static int gk20a_tegra_secure_alloc(struct platform_device *pdev,
136 /* This bypasses SMMU for VPR during gmmu_map. */ 133 /* This bypasses SMMU for VPR during gmmu_map. */
137 sg_dma_address(sgt->sgl) = 0; 134 sg_dma_address(sgt->sgl) = 0;
138 135
139 desc->sgt = sgt; 136 desc->mem.sgt = sgt;
140 137
141 return err; 138 return err;
142 139
143fail_sgt: 140fail_sgt:
144 kfree(sgt); 141 kfree(sgt);
145fail: 142fail:
146 dma_free_attrs(&tegra_vpr_dev, desc->size, 143 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
147 (void *)(uintptr_t)&desc->iova, 144 (void *)(uintptr_t)iova, iova, &attrs);
148 desc->iova, &desc->attrs);
149 return err; 145 return err;
150} 146}
151 147
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index c7479078..e318f002 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -978,8 +978,8 @@ static int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
978 if (!ch_ctx || !ch_ctx->gr_ctx || c->vpr) 978 if (!ch_ctx || !ch_ctx->gr_ctx || c->vpr)
979 return -EINVAL; 979 return -EINVAL;
980 980
981 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 981 ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
982 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 982 PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
983 0, pgprot_writecombine(PAGE_KERNEL)); 983 0, pgprot_writecombine(PAGE_KERNEL));
984 if (!ctx_ptr) 984 if (!ctx_ptr)
985 return -ENOMEM; 985 return -ENOMEM;
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 60880f6d..fd8bb81b 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -107,17 +107,17 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
107 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 107 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
108 108
109 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 109 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
110 gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size; 110 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
111 111
112 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 112 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
113 gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size; 113 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
114 114
115 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 115 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
116 gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size; 116 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
117 117
118 gk20a_dbg_info("priv access map size : %d", 118 gk20a_dbg_info("priv access map size : %d",
119 gr->ctx_vars.priv_access_map_size); 119 gr->ctx_vars.priv_access_map_size);
120 gr->global_ctx_buffer[PRIV_ACCESS_MAP].size = 120 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
121 gr->ctx_vars.priv_access_map_size; 121 gr->ctx_vars.priv_access_map_size;
122 122
123 return 0; 123 return 0;
@@ -143,38 +143,38 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
143 143
144 /* Circular Buffer */ 144 /* Circular Buffer */
145 gpu_va = gk20a_vm_alloc_va(ch_vm, 145 gpu_va = gk20a_vm_alloc_va(ch_vm,
146 gr->global_ctx_buffer[CIRCULAR].size, 0); 146 gr->global_ctx_buffer[CIRCULAR].mem.size, 0);
147 147
148 if (!gpu_va) 148 if (!gpu_va)
149 goto clean_up; 149 goto clean_up;
150 g_bfr_va[CIRCULAR_VA] = gpu_va; 150 g_bfr_va[CIRCULAR_VA] = gpu_va;
151 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].size; 151 g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
152 152
153 /* Attribute Buffer */ 153 /* Attribute Buffer */
154 gpu_va = gk20a_vm_alloc_va(ch_vm, 154 gpu_va = gk20a_vm_alloc_va(ch_vm,
155 gr->global_ctx_buffer[ATTRIBUTE].size, 0); 155 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 0);
156 156
157 if (!gpu_va) 157 if (!gpu_va)
158 goto clean_up; 158 goto clean_up;
159 g_bfr_va[ATTRIBUTE_VA] = gpu_va; 159 g_bfr_va[ATTRIBUTE_VA] = gpu_va;
160 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].size; 160 g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
161 161
162 /* Page Pool */ 162 /* Page Pool */
163 gpu_va = gk20a_vm_alloc_va(ch_vm, 163 gpu_va = gk20a_vm_alloc_va(ch_vm,
164 gr->global_ctx_buffer[PAGEPOOL].size, 0); 164 gr->global_ctx_buffer[PAGEPOOL].mem.size, 0);
165 if (!gpu_va) 165 if (!gpu_va)
166 goto clean_up; 166 goto clean_up;
167 g_bfr_va[PAGEPOOL_VA] = gpu_va; 167 g_bfr_va[PAGEPOOL_VA] = gpu_va;
168 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].size; 168 g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
169 169
170 /* Priv register Access Map */ 170 /* Priv register Access Map */
171 gpu_va = gk20a_vm_alloc_va(ch_vm, 171 gpu_va = gk20a_vm_alloc_va(ch_vm,
172 gr->global_ctx_buffer[PRIV_ACCESS_MAP].size, 0); 172 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 0);
173 if (!gpu_va) 173 if (!gpu_va)
174 goto clean_up; 174 goto clean_up;
175 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 175 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
176 g_bfr_size[PRIV_ACCESS_MAP_VA] = 176 g_bfr_size[PRIV_ACCESS_MAP_VA] =
177 gr->global_ctx_buffer[PRIV_ACCESS_MAP].size; 177 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
178 178
179 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; 179 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
180 msg.handle = platform->virt_handle; 180 msg.handle = platform->virt_handle;
@@ -257,10 +257,10 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g,
257 if (!gr_ctx) 257 if (!gr_ctx)
258 return -ENOMEM; 258 return -ENOMEM;
259 259
260 gr_ctx->size = gr->ctx_vars.buffer_total_size; 260 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
261 gr_ctx->gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->size, 0); 261 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->mem.size, 0);
262 262
263 if (!gr_ctx->gpu_va) { 263 if (!gr_ctx->mem.gpu_va) {
264 kfree(gr_ctx); 264 kfree(gr_ctx);
265 return -ENOMEM; 265 return -ENOMEM;
266 } 266 }
@@ -268,13 +268,14 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g,
268 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX; 268 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX;
269 msg.handle = platform->virt_handle; 269 msg.handle = platform->virt_handle;
270 p->handle = c->virt_ctx; 270 p->handle = c->virt_ctx;
271 p->gr_ctx_va = gr_ctx->gpu_va; 271 p->gr_ctx_va = gr_ctx->mem.gpu_va;
272 p->class_num = c->obj_class; 272 p->class_num = c->obj_class;
273 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 273 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
274 274
275 if (err || msg.ret) { 275 if (err || msg.ret) {
276 kfree(gr_ctx); 276 kfree(gr_ctx);
277 gk20a_vm_free_va(ch_vm, gr_ctx->gpu_va, gr_ctx->size, 0); 277 gk20a_vm_free_va(ch_vm, gr_ctx->mem.gpu_va,
278 gr_ctx->mem.size, 0);
278 err = -ENOMEM; 279 err = -ENOMEM;
279 } else 280 } else
280 c->ch_ctx.gr_ctx = gr_ctx; 281 c->ch_ctx.gr_ctx = gr_ctx;
@@ -290,7 +291,7 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
290 291
291 gk20a_dbg_fn(""); 292 gk20a_dbg_fn("");
292 293
293 if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->gpu_va) { 294 if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->mem.gpu_va) {
294 struct tegra_vgpu_cmd_msg msg; 295 struct tegra_vgpu_cmd_msg msg;
295 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 296 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
296 int err; 297 int err;
@@ -301,9 +302,9 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
301 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 302 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
302 WARN_ON(err || msg.ret); 303 WARN_ON(err || msg.ret);
303 304
304 gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->gpu_va, 305 gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->mem.gpu_va,
305 ch_ctx->gr_ctx->size, 0); 306 ch_ctx->gr_ctx->mem.size, 0);
306 ch_ctx->gr_ctx->gpu_va = 0; 307 ch_ctx->gr_ctx->mem.gpu_va = 0;
307 kfree(ch_ctx->gr_ctx); 308 kfree(ch_ctx->gr_ctx);
308 } 309 }
309} 310}
@@ -429,7 +430,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
429 } 430 }
430 431
431 /* commit gr ctx buffer */ 432 /* commit gr ctx buffer */
432 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->gpu_va); 433 err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
433 if (err) { 434 if (err) {
434 gk20a_err(dev_from_gk20a(g), 435 gk20a_err(dev_from_gk20a(g),
435 "fail to commit gr ctx buffer"); 436 "fail to commit gr ctx buffer");