summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-06 18:30:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-20 19:14:32 -0400
commite32f62fadfcde413bcd9b5af61ad884e27ba2bf1 (patch)
treeeff606a0826841eae6ade5906acd9da589d1179a /drivers
parent52bd58b560d0b3b49c03ef5c2637b67adeac8193 (diff)
gpu: nvgpu: Move Linux nvgpu_mem fields
Hide the Linux specific nvgpu_mem fields so that in subsequent patches core code can instead of using struct sg_table it can use mem_desc. Routines for accessing system specific fields will be added as needed. This is the first step in a fairly major overhaul of the GMMU mapping routines. There are numerous issues with the current design (or lack there of): massively coupled code, system dependencies, disorganization, etc. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I2e7d3ae3a07468cfc17c1c642d28ed1b0952474d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464076 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c60
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c2
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c2
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fb_gk20a.c7
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c33
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c53
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c5
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h29
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h48
-rw-r--r--drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c10
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c2
19 files changed, 160 insertions, 113 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index 2a75ad13..832d0f47 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -107,10 +107,10 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
107 nvgpu_dma_flags_to_attrs(&dma_attrs, flags); 107 nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
108 108
109 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 109 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
110 mem->pages = dma_alloc_attrs(d, 110 mem->priv.pages = dma_alloc_attrs(d,
111 size, &iova, GFP_KERNEL, 111 size, &iova, GFP_KERNEL,
112 __DMA_ATTR(dma_attrs)); 112 __DMA_ATTR(dma_attrs));
113 if (!mem->pages) 113 if (!mem->priv.pages)
114 return -ENOMEM; 114 return -ENOMEM;
115 } else { 115 } else {
116 mem->cpu_va = dma_alloc_attrs(d, 116 mem->cpu_va = dma_alloc_attrs(d,
@@ -126,10 +126,12 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
126 } 126 }
127 127
128 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) 128 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
129 err = gk20a_get_sgtable_from_pages(d, &mem->sgt, mem->pages, 129 err = gk20a_get_sgtable_from_pages(d, &mem->priv.sgt,
130 mem->priv.pages,
130 iova, size); 131 iova, size);
131 else { 132 else {
132 err = gk20a_get_sgtable(d, &mem->sgt, mem->cpu_va, iova, size); 133 err = gk20a_get_sgtable(d, &mem->priv.sgt, mem->cpu_va,
134 iova, size);
133 memset(mem->cpu_va, 0, size); 135 memset(mem->cpu_va, 0, size);
134 } 136 }
135 if (err) 137 if (err)
@@ -137,7 +139,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
137 139
138 mem->size = size; 140 mem->size = size;
139 mem->aperture = APERTURE_SYSMEM; 141 mem->aperture = APERTURE_SYSMEM;
140 mem->flags = flags; 142 mem->priv.flags = flags;
141 143
142 gk20a_dbg_fn("done"); 144 gk20a_dbg_fn("done");
143 145
@@ -146,7 +148,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
146fail_free: 148fail_free:
147 dma_free_coherent(d, size, mem->cpu_va, iova); 149 dma_free_coherent(d, size, mem->cpu_va, iova);
148 mem->cpu_va = NULL; 150 mem->cpu_va = NULL;
149 mem->sgt = NULL; 151 mem->priv.sgt = NULL;
150 return err; 152 return err;
151} 153}
152 154
@@ -204,23 +206,23 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
204 else 206 else
205 mem->fixed = false; 207 mem->fixed = false;
206 208
207 mem->sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); 209 mem->priv.sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
208 if (!mem->sgt) { 210 if (!mem->priv.sgt) {
209 err = -ENOMEM; 211 err = -ENOMEM;
210 goto fail_physfree; 212 goto fail_physfree;
211 } 213 }
212 214
213 err = sg_alloc_table(mem->sgt, 1, GFP_KERNEL); 215 err = sg_alloc_table(mem->priv.sgt, 1, GFP_KERNEL);
214 if (err) 216 if (err)
215 goto fail_kfree; 217 goto fail_kfree;
216 218
217 set_vidmem_page_alloc(mem->sgt->sgl, addr); 219 set_vidmem_page_alloc(mem->priv.sgt->sgl, addr);
218 sg_set_page(mem->sgt->sgl, NULL, size, 0); 220 sg_set_page(mem->priv.sgt->sgl, NULL, size, 0);
219 221
220 mem->size = size; 222 mem->size = size;
221 mem->aperture = APERTURE_VIDMEM; 223 mem->aperture = APERTURE_VIDMEM;
222 mem->allocator = vidmem_alloc; 224 mem->allocator = vidmem_alloc;
223 mem->flags = flags; 225 mem->priv.flags = flags;
224 226
225 nvgpu_init_list_node(&mem->clear_list_entry); 227 nvgpu_init_list_node(&mem->clear_list_entry);
226 228
@@ -229,7 +231,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
229 return 0; 231 return 0;
230 232
231fail_kfree: 233fail_kfree:
232 nvgpu_kfree(g, mem->sgt); 234 nvgpu_kfree(g, mem->priv.sgt);
233fail_physfree: 235fail_physfree:
234 nvgpu_free(&g->mm.vidmem.allocator, addr); 236 nvgpu_free(&g->mm.vidmem.allocator, addr);
235 return err; 237 return err;
@@ -283,7 +285,7 @@ int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
283 if (err) 285 if (err)
284 return err; 286 return err;
285 287
286 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, 288 mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0,
287 gk20a_mem_flag_none, false, 289 gk20a_mem_flag_none, false,
288 mem->aperture); 290 mem->aperture);
289 if (!mem->gpu_va) { 291 if (!mem->gpu_va) {
@@ -313,7 +315,7 @@ int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
313 if (err) 315 if (err)
314 return err; 316 return err;
315 317
316 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, 318 mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0,
317 gk20a_mem_flag_none, false, 319 gk20a_mem_flag_none, false,
318 mem->aperture); 320 mem->aperture);
319 if (!mem->gpu_va) { 321 if (!mem->gpu_va) {
@@ -332,31 +334,31 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
332{ 334{
333 struct device *d = dev_from_gk20a(g); 335 struct device *d = dev_from_gk20a(g);
334 336
335 if (mem->cpu_va || mem->pages) { 337 if (mem->cpu_va || mem->priv.pages) {
336 if (mem->flags) { 338 if (mem->priv.flags) {
337 DEFINE_DMA_ATTRS(dma_attrs); 339 DEFINE_DMA_ATTRS(dma_attrs);
338 340
339 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags); 341 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags);
340 342
341 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 343 if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
342 dma_free_attrs(d, mem->size, mem->pages, 344 dma_free_attrs(d, mem->size, mem->priv.pages,
343 sg_dma_address(mem->sgt->sgl), 345 sg_dma_address(mem->priv.sgt->sgl),
344 __DMA_ATTR(dma_attrs)); 346 __DMA_ATTR(dma_attrs));
345 } else { 347 } else {
346 dma_free_attrs(d, mem->size, mem->cpu_va, 348 dma_free_attrs(d, mem->size, mem->cpu_va,
347 sg_dma_address(mem->sgt->sgl), 349 sg_dma_address(mem->priv.sgt->sgl),
348 __DMA_ATTR(dma_attrs)); 350 __DMA_ATTR(dma_attrs));
349 } 351 }
350 } else { 352 } else {
351 dma_free_coherent(d, mem->size, mem->cpu_va, 353 dma_free_coherent(d, mem->size, mem->cpu_va,
352 sg_dma_address(mem->sgt->sgl)); 354 sg_dma_address(mem->priv.sgt->sgl));
353 } 355 }
354 mem->cpu_va = NULL; 356 mem->cpu_va = NULL;
355 mem->pages = NULL; 357 mem->priv.pages = NULL;
356 } 358 }
357 359
358 if (mem->sgt) 360 if (mem->priv.sgt)
359 gk20a_free_sgtable(g, &mem->sgt); 361 gk20a_free_sgtable(g, &mem->priv.sgt);
360 362
361 mem->size = 0; 363 mem->size = 0;
362 mem->aperture = APERTURE_INVALID; 364 mem->aperture = APERTURE_INVALID;
@@ -368,7 +370,7 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
368 bool was_empty; 370 bool was_empty;
369 371
370 /* Sanity check - only this supported when allocating. */ 372 /* Sanity check - only this supported when allocating. */
371 WARN_ON(mem->flags != NVGPU_DMA_NO_KERNEL_MAPPING); 373 WARN_ON(mem->priv.flags != NVGPU_DMA_NO_KERNEL_MAPPING);
372 374
373 if (mem->user_mem) { 375 if (mem->user_mem) {
374 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); 376 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
@@ -385,8 +387,8 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
385 } else { 387 } else {
386 nvgpu_memset(g, mem, 0, 0, mem->size); 388 nvgpu_memset(g, mem, 0, 0, mem->size);
387 nvgpu_free(mem->allocator, 389 nvgpu_free(mem->allocator,
388 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 390 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
389 gk20a_free_sgtable(g, &mem->sgt); 391 gk20a_free_sgtable(g, &mem->priv.sgt);
390 392
391 mem->size = 0; 393 mem->size = 0;
392 mem->aperture = APERTURE_INVALID; 394 mem->aperture = APERTURE_INVALID;
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index eb214aad..bb19dd61 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -57,7 +57,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
57 return -EBUSY; 57 return -EBUSY;
58 } 58 }
59 59
60 cpu_va = vmap(mem->pages, 60 cpu_va = vmap(mem->priv.pages,
61 PAGE_ALIGN(mem->size) >> PAGE_SHIFT, 61 PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
62 0, pgprot_writecombine(PAGE_KERNEL)); 62 0, pgprot_writecombine(PAGE_KERNEL));
63 63
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index 378711fc..688e5ce8 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -87,7 +87,7 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
87 struct page_alloc_chunk *chunk = NULL; 87 struct page_alloc_chunk *chunk = NULL;
88 u32 byteoff, start_reg, until_end, n; 88 u32 byteoff, start_reg, until_end, n;
89 89
90 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 90 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
91 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 91 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
92 page_alloc_chunk, list_entry) { 92 page_alloc_chunk, list_entry) {
93 if (offset >= chunk->length) 93 if (offset >= chunk->length)
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 9e437410..bf7b6348 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -60,7 +60,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
60 if (ret) 60 if (ret)
61 goto out; 61 goto out;
62 62
63 sea->ro_sg_table = sea->sea_mem.sgt; 63 sea->ro_sg_table = sea->sea_mem.priv.sgt;
64 sea->size = SEMAPHORE_POOL_COUNT; 64 sea->size = SEMAPHORE_POOL_COUNT;
65 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE; 65 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE;
66 66
@@ -154,7 +154,7 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
154 154
155 page_idx = (unsigned long)ret; 155 page_idx = (unsigned long)ret;
156 156
157 p->page = sea->sea_mem.pages[page_idx]; 157 p->page = sea->sea_mem.priv.pages[page_idx];
158 p->ro_sg_table = sea->ro_sg_table; 158 p->ro_sg_table = sea->ro_sg_table;
159 p->page_idx = page_idx; 159 p->page_idx = page_idx;
160 p->sema_sea = sea; 160 p->sema_sea = sea;
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 18432c55..391f6612 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -1239,7 +1239,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1239 } 1239 }
1240 1240
1241 /* map backing store to gpu virtual space */ 1241 /* map backing store to gpu virtual space */
1242 vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.mem.sgt, 1242 vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.mem.priv.sgt,
1243 g->gr.compbit_store.mem.size, 1243 g->gr.compbit_store.mem.size,
1244 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1244 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1245 gk20a_mem_flag_read_only, 1245 gk20a_mem_flag_read_only,
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
index 214014ce..4a76bd6b 100644
--- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
@@ -44,9 +44,10 @@ void fb_gk20a_reset(struct gk20a *g)
44 44
45void gk20a_fb_init_hw(struct gk20a *g) 45void gk20a_fb_init_hw(struct gk20a *g)
46{ 46{
47 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(), 47 u32 addr = g->ops.mm.get_iova_addr(g,
48 g->ops.mm.get_iova_addr(g, g->mm.sysmem_flush.sgt->sgl, 0) 48 g->mm.sysmem_flush.priv.sgt->sgl, 0) >> 8;
49 >> 8); 49
50 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(), addr);
50} 51}
51 52
52static void gk20a_fb_set_mmu_page_size(struct gk20a *g) 53static void gk20a_fb_set_mmu_page_size(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 12bb3688..314d4551 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -954,7 +954,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
954 954
955 for (chid = 0; chid < f->num_channels; chid++) { 955 for (chid = 0; chid < f->num_channels; chid++) {
956 f->channel[chid].userd_iova = 956 f->channel[chid].userd_iova =
957 g->ops.mm.get_iova_addr(g, f->userd.sgt->sgl, 0) 957 g->ops.mm.get_iova_addr(g, f->userd.priv.sgt->sgl, 0)
958 + chid * f->userd_entry_size; 958 + chid * f->userd_entry_size;
959 f->channel[chid].userd_gpu_va = 959 f->channel[chid].userd_gpu_va =
960 f->userd.gpu_va + chid * f->userd_entry_size; 960 f->userd.gpu_va + chid * f->userd_entry_size;
@@ -3148,7 +3148,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3148 new_buf = !runlist->cur_buffer; 3148 new_buf = !runlist->cur_buffer;
3149 3149
3150 runlist_iova = g->ops.mm.get_iova_addr( 3150 runlist_iova = g->ops.mm.get_iova_addr(
3151 g, runlist->mem[new_buf].sgt->sgl, 0); 3151 g, runlist->mem[new_buf].priv.sgt->sgl, 0);
3152 3152
3153 gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", 3153 gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
3154 runlist_id, (u64)runlist_iova); 3154 runlist_id, (u64)runlist_iova);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 22093a34..f47d3b12 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1943,7 +1943,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1943 } 1943 }
1944 1944
1945 pm_ctx->mem.gpu_va = gk20a_gmmu_map(c->vm, 1945 pm_ctx->mem.gpu_va = gk20a_gmmu_map(c->vm,
1946 &pm_ctx->mem.sgt, 1946 &pm_ctx->mem.priv.sgt,
1947 pm_ctx->mem.size, 1947 pm_ctx->mem.size,
1948 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1948 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1949 gk20a_mem_flag_none, true, 1949 gk20a_mem_flag_none, true,
@@ -2205,7 +2205,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2205 2205
2206 /* Map ucode surface to GMMU */ 2206 /* Map ucode surface to GMMU */
2207 ucode_info->surface_desc.gpu_va = gk20a_gmmu_map(vm, 2207 ucode_info->surface_desc.gpu_va = gk20a_gmmu_map(vm,
2208 &ucode_info->surface_desc.sgt, 2208 &ucode_info->surface_desc.priv.sgt,
2209 ucode_info->surface_desc.size, 2209 ucode_info->surface_desc.size,
2210 0, /* flags */ 2210 0, /* flags */
2211 gk20a_mem_flag_read_only, 2211 gk20a_mem_flag_read_only,
@@ -2823,13 +2823,14 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2823 gk20a_dbg_fn(""); 2823 gk20a_dbg_fn("");
2824 2824
2825 /* Circular Buffer */ 2825 /* Circular Buffer */
2826 if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt == NULL)) { 2826 if (!c->vpr ||
2827 (gr->global_ctx_buffer[CIRCULAR_VPR].mem.priv.sgt == NULL)) {
2827 mem = &gr->global_ctx_buffer[CIRCULAR].mem; 2828 mem = &gr->global_ctx_buffer[CIRCULAR].mem;
2828 } else { 2829 } else {
2829 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem; 2830 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem;
2830 } 2831 }
2831 2832
2832 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 2833 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size,
2833 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2834 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2834 gk20a_mem_flag_none, true, mem->aperture); 2835 gk20a_mem_flag_none, true, mem->aperture);
2835 if (!gpu_va) 2836 if (!gpu_va)
@@ -2838,13 +2839,14 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2838 g_bfr_size[CIRCULAR_VA] = mem->size; 2839 g_bfr_size[CIRCULAR_VA] = mem->size;
2839 2840
2840 /* Attribute Buffer */ 2841 /* Attribute Buffer */
2841 if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt == NULL)) { 2842 if (!c->vpr ||
2843 (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.priv.sgt == NULL)) {
2842 mem = &gr->global_ctx_buffer[ATTRIBUTE].mem; 2844 mem = &gr->global_ctx_buffer[ATTRIBUTE].mem;
2843 } else { 2845 } else {
2844 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem; 2846 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem;
2845 } 2847 }
2846 2848
2847 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 2849 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size,
2848 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2850 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2849 gk20a_mem_flag_none, false, mem->aperture); 2851 gk20a_mem_flag_none, false, mem->aperture);
2850 if (!gpu_va) 2852 if (!gpu_va)
@@ -2853,13 +2855,14 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2853 g_bfr_size[ATTRIBUTE_VA] = mem->size; 2855 g_bfr_size[ATTRIBUTE_VA] = mem->size;
2854 2856
2855 /* Page Pool */ 2857 /* Page Pool */
2856 if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt == NULL)) { 2858 if (!c->vpr ||
2859 (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.priv.sgt == NULL)) {
2857 mem = &gr->global_ctx_buffer[PAGEPOOL].mem; 2860 mem = &gr->global_ctx_buffer[PAGEPOOL].mem;
2858 } else { 2861 } else {
2859 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem; 2862 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem;
2860 } 2863 }
2861 2864
2862 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 2865 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size,
2863 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2866 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2864 gk20a_mem_flag_none, true, mem->aperture); 2867 gk20a_mem_flag_none, true, mem->aperture);
2865 if (!gpu_va) 2868 if (!gpu_va)
@@ -2869,7 +2872,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2869 2872
2870 /* Golden Image */ 2873 /* Golden Image */
2871 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem; 2874 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem;
2872 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 0, 2875 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 0,
2873 gk20a_mem_flag_none, true, mem->aperture); 2876 gk20a_mem_flag_none, true, mem->aperture);
2874 if (!gpu_va) 2877 if (!gpu_va)
2875 goto clean_up; 2878 goto clean_up;
@@ -2878,7 +2881,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2878 2881
2879 /* Priv register Access Map */ 2882 /* Priv register Access Map */
2880 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem; 2883 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem;
2881 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 0, 2884 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 0,
2882 gk20a_mem_flag_none, true, mem->aperture); 2885 gk20a_mem_flag_none, true, mem->aperture);
2883 if (!gpu_va) 2886 if (!gpu_va)
2884 goto clean_up; 2887 goto clean_up;
@@ -2950,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2950 goto err_free_ctx; 2953 goto err_free_ctx;
2951 2954
2952 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, 2955 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm,
2953 &gr_ctx->mem.sgt, 2956 &gr_ctx->mem.priv.sgt,
2954 gr_ctx->mem.size, 2957 gr_ctx->mem.size,
2955 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE, 2958 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE,
2956 gk20a_mem_flag_none, true, 2959 gk20a_mem_flag_none, true,
@@ -3196,7 +3199,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3196 } 3199 }
3197 3200
3198 /* allocate patch buffer */ 3201 /* allocate patch buffer */
3199 if (ch_ctx->patch_ctx.mem.sgt == NULL) { 3202 if (ch_ctx->patch_ctx.mem.priv.sgt == NULL) {
3200 err = gr_gk20a_alloc_channel_patch_ctx(g, c); 3203 err = gr_gk20a_alloc_channel_patch_ctx(g, c);
3201 if (err) { 3204 if (err) {
3202 nvgpu_err(g, 3205 nvgpu_err(g,
@@ -4735,7 +4738,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4735 gk20a_dbg_fn(""); 4738 gk20a_dbg_fn("");
4736 4739
4737 /* init mmu debug buffer */ 4740 /* init mmu debug buffer */
4738 addr = g->ops.mm.get_iova_addr(g, gr->mmu_wr_mem.sgt->sgl, 0); 4741 addr = g->ops.mm.get_iova_addr(g, gr->mmu_wr_mem.priv.sgt->sgl, 0);
4739 addr >>= fb_mmu_debug_wr_addr_alignment_v(); 4742 addr >>= fb_mmu_debug_wr_addr_alignment_v();
4740 4743
4741 gk20a_writel(g, fb_mmu_debug_wr_r(), 4744 gk20a_writel(g, fb_mmu_debug_wr_r(),
@@ -4745,7 +4748,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4745 fb_mmu_debug_wr_vol_false_f() | 4748 fb_mmu_debug_wr_vol_false_f() |
4746 fb_mmu_debug_wr_addr_f(addr)); 4749 fb_mmu_debug_wr_addr_f(addr));
4747 4750
4748 addr = g->ops.mm.get_iova_addr(g, gr->mmu_rd_mem.sgt->sgl, 0); 4751 addr = g->ops.mm.get_iova_addr(g, gr->mmu_rd_mem.priv.sgt->sgl, 0);
4749 addr >>= fb_mmu_debug_rd_addr_alignment_v(); 4752 addr >>= fb_mmu_debug_rd_addr_alignment_v();
4750 4753
4751 gk20a_writel(g, fb_mmu_debug_rd_r(), 4754 gk20a_writel(g, fb_mmu_debug_rd_r(),
@@ -8405,7 +8408,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8405 } 8408 }
8406 if (!pm_ctx_ready) { 8409 if (!pm_ctx_ready) {
8407 /* Make sure ctx buffer was initialized */ 8410 /* Make sure ctx buffer was initialized */
8408 if (!ch_ctx->pm_ctx.mem.pages) { 8411 if (!ch_ctx->pm_ctx.mem.priv.pages) {
8409 nvgpu_err(g, 8412 nvgpu_err(g,
8410 "Invalid ctx buffer"); 8413 "Invalid ctx buffer");
8411 err = -EINVAL; 8414 err = -EINVAL;
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
index b92dda6d..1958c11c 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_common.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c
@@ -96,7 +96,7 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
96 compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem); 96 compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
97 else 97 else
98 compbit_store_iova = g->ops.mm.get_iova_addr(g, 98 compbit_store_iova = g->ops.mm.get_iova_addr(g,
99 gr->compbit_store.mem.sgt->sgl, 0); 99 gr->compbit_store.mem.priv.sgt->sgl, 0);
100 100
101 compbit_base_post_divide64 = compbit_store_iova >> 101 compbit_base_post_divide64 = compbit_store_iova >>
102 ltc_ltcs_ltss_cbc_base_alignment_shift_v(); 102 ltc_ltcs_ltss_cbc_base_alignment_shift_v();
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 1db52c85..69e00c5e 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -817,27 +817,28 @@ static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order,
817 gk20a_dbg(gpu_dbg_pte, "alloc_pages failed"); 817 gk20a_dbg(gpu_dbg_pte, "alloc_pages failed");
818 goto err_out; 818 goto err_out;
819 } 819 }
820 entry->mem.sgt = nvgpu_kzalloc(g, sizeof(*entry->mem.sgt)); 820 entry->mem.priv.sgt = nvgpu_kzalloc(g, sizeof(*entry->mem.priv.sgt));
821 if (!entry->mem.sgt) { 821 if (!entry->mem.priv.sgt) {
822 gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table"); 822 gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table");
823 goto err_alloced; 823 goto err_alloced;
824 } 824 }
825 err = sg_alloc_table(entry->mem.sgt, 1, GFP_KERNEL); 825 err = sg_alloc_table(entry->mem.priv.sgt, 1, GFP_KERNEL);
826 if (err) { 826 if (err) {
827 gk20a_dbg(gpu_dbg_pte, "sg_alloc_table failed"); 827 gk20a_dbg(gpu_dbg_pte, "sg_alloc_table failed");
828 goto err_sg_table; 828 goto err_sg_table;
829 } 829 }
830 sg_set_page(entry->mem.sgt->sgl, pages, len, 0); 830 sg_set_page(entry->mem.priv.sgt->sgl, pages, len, 0);
831 entry->mem.cpu_va = page_address(pages); 831 entry->mem.cpu_va = page_address(pages);
832 memset(entry->mem.cpu_va, 0, len); 832 memset(entry->mem.cpu_va, 0, len);
833 entry->mem.size = len; 833 entry->mem.size = len;
834 entry->mem.aperture = APERTURE_SYSMEM; 834 entry->mem.aperture = APERTURE_SYSMEM;
835 FLUSH_CPU_DCACHE(entry->mem.cpu_va, sg_phys(entry->mem.sgt->sgl), len); 835 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
836 sg_phys(entry->mem.priv.sgt->sgl), len);
836 837
837 return 0; 838 return 0;
838 839
839err_sg_table: 840err_sg_table:
840 nvgpu_kfree(vm->mm->g, entry->mem.sgt); 841 nvgpu_kfree(vm->mm->g, entry->mem.priv.sgt);
841err_alloced: 842err_alloced:
842 __free_pages(pages, order); 843 __free_pages(pages, order);
843err_out: 844err_out:
@@ -854,9 +855,9 @@ static void free_gmmu_phys_pages(struct vm_gk20a *vm,
854 free_pages((unsigned long)entry->mem.cpu_va, get_order(entry->mem.size)); 855 free_pages((unsigned long)entry->mem.cpu_va, get_order(entry->mem.size));
855 entry->mem.cpu_va = NULL; 856 entry->mem.cpu_va = NULL;
856 857
857 sg_free_table(entry->mem.sgt); 858 sg_free_table(entry->mem.priv.sgt);
858 nvgpu_kfree(vm->mm->g, entry->mem.sgt); 859 nvgpu_kfree(vm->mm->g, entry->mem.priv.sgt);
859 entry->mem.sgt = NULL; 860 entry->mem.priv.sgt = NULL;
860 entry->mem.size = 0; 861 entry->mem.size = 0;
861 entry->mem.aperture = APERTURE_INVALID; 862 entry->mem.aperture = APERTURE_INVALID;
862} 863}
@@ -864,16 +865,16 @@ static void free_gmmu_phys_pages(struct vm_gk20a *vm,
864static int map_gmmu_phys_pages(struct gk20a_mm_entry *entry) 865static int map_gmmu_phys_pages(struct gk20a_mm_entry *entry)
865{ 866{
866 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 867 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
867 sg_phys(entry->mem.sgt->sgl), 868 sg_phys(entry->mem.priv.sgt->sgl),
868 entry->mem.sgt->sgl->length); 869 entry->mem.priv.sgt->sgl->length);
869 return 0; 870 return 0;
870} 871}
871 872
872static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry) 873static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry)
873{ 874{
874 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 875 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
875 sg_phys(entry->mem.sgt->sgl), 876 sg_phys(entry->mem.priv.sgt->sgl),
876 entry->mem.sgt->sgl->length); 877 entry->mem.priv.sgt->sgl->length);
877} 878}
878 879
879static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 880static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
@@ -941,7 +942,7 @@ int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
941 return 0; 942 return 0;
942 943
943 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 944 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
944 sg_phys(entry->mem.sgt->sgl), 945 sg_phys(entry->mem.priv.sgt->sgl),
945 entry->mem.size); 946 entry->mem.size);
946 } else { 947 } else {
947 int err = nvgpu_mem_begin(g, &entry->mem); 948 int err = nvgpu_mem_begin(g, &entry->mem);
@@ -967,7 +968,7 @@ void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
967 return; 968 return;
968 969
969 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 970 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
970 sg_phys(entry->mem.sgt->sgl), 971 sg_phys(entry->mem.priv.sgt->sgl),
971 entry->mem.size); 972 entry->mem.size);
972 } else { 973 } else {
973 nvgpu_mem_end(g, &entry->mem); 974 nvgpu_mem_end(g, &entry->mem);
@@ -1028,9 +1029,9 @@ static int gk20a_zalloc_gmmu_page_table(struct vm_gk20a *vm,
1028 1029
1029 gk20a_dbg(gpu_dbg_pte, "entry = 0x%p, addr=%08llx, size %d, woff %x", 1030 gk20a_dbg(gpu_dbg_pte, "entry = 0x%p, addr=%08llx, size %d, woff %x",
1030 entry, 1031 entry,
1031 (entry->mem.sgt && entry->mem.aperture == APERTURE_SYSMEM) ? 1032 (entry->mem.priv.sgt &&
1032 g->ops.mm.get_iova_addr(g, entry->mem.sgt->sgl, 0) 1033 entry->mem.aperture == APERTURE_SYSMEM) ?
1033 : 0, 1034 g->ops.mm.get_iova_addr(g, entry->mem.priv.sgt->sgl, 0) : 0,
1034 order, entry->woffset); 1035 order, entry->woffset);
1035 if (err) 1036 if (err)
1036 return err; 1037 return err;
@@ -1726,7 +1727,7 @@ static struct sg_table *gk20a_vidbuf_map_dma_buf(
1726{ 1727{
1727 struct gk20a_vidmem_buf *buf = attach->dmabuf->priv; 1728 struct gk20a_vidmem_buf *buf = attach->dmabuf->priv;
1728 1729
1729 return buf->mem->sgt; 1730 return buf->mem->priv.sgt;
1730} 1731}
1731 1732
1732static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach, 1733static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach,
@@ -2398,7 +2399,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2398 g->ops.mm.gmmu_map( 2399 g->ops.mm.gmmu_map(
2399 vm, 2400 vm,
2400 !fixed_mapping ? 0 : *compbits_win_gva, /* va */ 2401 !fixed_mapping ? 0 : *compbits_win_gva, /* va */
2401 g->gr.compbit_store.mem.sgt, 2402 g->gr.compbit_store.mem.priv.sgt,
2402 cacheline_offset_start, /* sg offset */ 2403 cacheline_offset_start, /* sg offset */
2403 mapped_buffer->ctag_map_win_size, /* size */ 2404 mapped_buffer->ctag_map_win_size, /* size */
2404 small_pgsz_index, 2405 small_pgsz_index,
@@ -2518,7 +2519,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
2518 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 2519 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
2519 return -EINVAL; 2520 return -EINVAL;
2520 2521
2521 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 2522 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
2522 2523
2523 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 2524 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
2524 page_alloc_chunk, list_entry) { 2525 page_alloc_chunk, list_entry) {
@@ -2580,14 +2581,14 @@ u64 gk20a_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem,
2580 u64 addr; 2581 u64 addr;
2581 2582
2582 if (mem->aperture == APERTURE_VIDMEM) { 2583 if (mem->aperture == APERTURE_VIDMEM) {
2583 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 2584 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
2584 2585
2585 /* This API should not be used with > 1 chunks */ 2586 /* This API should not be used with > 1 chunks */
2586 WARN_ON(alloc->nr_chunks != 1); 2587 WARN_ON(alloc->nr_chunks != 1);
2587 2588
2588 addr = alloc->base; 2589 addr = alloc->base;
2589 } else { 2590 } else {
2590 addr = g->ops.mm.get_iova_addr(g, mem->sgt->sgl, flags); 2591 addr = g->ops.mm.get_iova_addr(g, mem->priv.sgt->sgl, flags);
2591 } 2592 }
2592 2593
2593 return addr; 2594 return addr;
@@ -2619,8 +2620,8 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
2619 while ((mem = get_pending_mem_desc(mm)) != NULL) { 2620 while ((mem = get_pending_mem_desc(mm)) != NULL) {
2620 gk20a_gmmu_clear_vidmem_mem(g, mem); 2621 gk20a_gmmu_clear_vidmem_mem(g, mem);
2621 nvgpu_free(mem->allocator, 2622 nvgpu_free(mem->allocator,
2622 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 2623 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
2623 gk20a_free_sgtable(g, &mem->sgt); 2624 gk20a_free_sgtable(g, &mem->priv.sgt);
2624 2625
2625 WARN_ON(atomic64_sub_return(mem->size, 2626 WARN_ON(atomic64_sub_return(mem->size,
2626 &g->mm.vidmem.bytes_pending) < 0); 2627 &g->mm.vidmem.bytes_pending) < 0);
@@ -2774,7 +2775,7 @@ u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry)
2774 u64 base; 2775 u64 base;
2775 2776
2776 if (g->mm.has_physical_mode) 2777 if (g->mm.has_physical_mode)
2777 base = sg_phys(entry->mem.sgt->sgl); 2778 base = sg_phys(entry->mem.priv.sgt->sgl);
2778 else 2779 else
2779 base = gk20a_mem_get_base_addr(g, &entry->mem, 0); 2780 base = gk20a_mem_get_base_addr(g, &entry->mem, 0);
2780 2781
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 7fac811e..94dc0b6f 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -539,7 +539,7 @@ static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem)
539{ 539{
540 /* FIXME: the sgt/sgl may get null if this is accessed e.g. in an isr 540 /* FIXME: the sgt/sgl may get null if this is accessed e.g. in an isr
541 * during channel deletion - attempt to fix at least null derefs */ 541 * during channel deletion - attempt to fix at least null derefs */
542 struct sg_table *sgt = mem->sgt; 542 struct sg_table *sgt = mem->priv.sgt;
543 543
544 if (sgt) { 544 if (sgt) {
545 struct scatterlist *sgl = sgt->sgl; 545 struct scatterlist *sgl = sgt->sgl;
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index b6afa748..7f0edbb2 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -1075,7 +1075,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1075 u32 *acr_ucode_header_t210_load; 1075 u32 *acr_ucode_header_t210_load;
1076 u32 *acr_ucode_data_t210_load; 1076 u32 *acr_ucode_data_t210_load;
1077 1077
1078 start = g->ops.mm.get_iova_addr(g, acr->ucode_blob.sgt->sgl, 0); 1078 start = g->ops.mm.get_iova_addr(g, acr->ucode_blob.priv.sgt->sgl, 0);
1079 size = acr->ucode_blob.size; 1079 size = acr->ucode_blob.size;
1080 1080
1081 gm20b_dbg_pmu(""); 1081 gm20b_dbg_pmu("");
@@ -1419,7 +1419,8 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1419 goto err_done; 1419 goto err_done;
1420 } 1420 }
1421 1421
1422 acr->hsbl_ucode.gpu_va = gk20a_gmmu_map(vm, &acr->hsbl_ucode.sgt, 1422 acr->hsbl_ucode.gpu_va = gk20a_gmmu_map(vm,
1423 &acr->hsbl_ucode.priv.sgt,
1423 bl_sz, 1424 bl_sz,
1424 0, /* flags */ 1425 0, /* flags */
1425 gk20a_mem_flag_read_only, false, 1426 gk20a_mem_flag_read_only, false,
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 24e7ffad..7ae6abc2 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -842,7 +842,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
842 return err; 842 return err;
843 843
844 mem->gpu_va = gk20a_gmmu_map(vm, 844 mem->gpu_va = gk20a_gmmu_map(vm,
845 &mem->sgt, 845 &mem->priv.sgt,
846 size, 846 size,
847 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 847 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
848 gk20a_mem_flag_none, 848 gk20a_mem_flag_none,
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index c2e0fddf..c5149f22 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -41,7 +41,7 @@ static int gp10b_init_mm_setup_hw(struct gk20a *g)
41 g->ops.fb.set_mmu_page_size(g); 41 g->ops.fb.set_mmu_page_size(g);
42 42
43 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(), 43 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(),
44 (g->ops.mm.get_iova_addr(g, g->mm.sysmem_flush.sgt->sgl, 0) 44 (g->ops.mm.get_iova_addr(g, g->mm.sysmem_flush.priv.sgt->sgl, 0)
45 >> 8ULL)); 45 >> 8ULL));
46 46
47 g->ops.bus.bar1_bind(g, inst_block); 47 g->ops.bus.bar1_bind(g, inst_block);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h
new file mode 100644
index 00000000..8b1e646e
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __NVGPU_LINUX_NVGPU_MEM_H__
18#define __NVGPU_LINUX_NVGPU_MEM_H__
19
20struct page;
21struct sg_table;
22
23struct nvgpu_mem_priv {
24 struct page **pages;
25 struct sg_table *sgt;
26 unsigned long flags;
27};
28
29#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
index ae5dcc6e..1590ee7a 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
@@ -17,10 +17,11 @@
17#ifndef __NVGPU_NVGPU_MEM_H__ 17#ifndef __NVGPU_NVGPU_MEM_H__
18#define __NVGPU_NVGPU_MEM_H__ 18#define __NVGPU_NVGPU_MEM_H__
19 19
20#include <linux/types.h> 20#include <nvgpu/types.h>
21
22#include <nvgpu/list.h> 21#include <nvgpu/list.h>
23 22
23#include <nvgpu/linux/nvgpu_mem.h>
24
24struct page; 25struct page;
25struct sg_table; 26struct sg_table;
26 27
@@ -39,18 +40,32 @@ enum nvgpu_aperture {
39}; 40};
40 41
41struct nvgpu_mem { 42struct nvgpu_mem {
42 void *cpu_va; /* sysmem only */ 43 /*
43 struct page **pages; /* sysmem only */ 44 * Populated for all nvgpu_mem structs - vidmem or system.
44 struct sg_table *sgt; 45 */
45 enum nvgpu_aperture aperture; 46 enum nvgpu_aperture aperture;
46 size_t size; 47 size_t size;
47 u64 gpu_va; 48 u64 gpu_va;
48 bool fixed; /* vidmem only */ 49 bool skip_wmb;
49 bool user_mem; /* vidmem only */ 50
50 struct nvgpu_allocator *allocator; /* vidmem only */ 51 /*
51 struct nvgpu_list_node clear_list_entry; /* vidmem only */ 52 * Only populated for a sysmem allocation.
52 bool skip_wmb; 53 */
53 unsigned long flags; 54 void *cpu_va;
55
56 /*
57 * Fields only populated for vidmem allocations.
58 */
59 bool fixed;
60 bool user_mem;
61 struct nvgpu_allocator *allocator;
62 struct nvgpu_list_node clear_list_entry;
63
64 /*
65 * This is defined by the system specific header. It can be empty if
66 * there's no system specific stuff for a given system.
67 */
68 struct nvgpu_mem_priv priv;
54}; 69};
55 70
56static inline struct nvgpu_mem * 71static inline struct nvgpu_mem *
@@ -61,11 +76,6 @@ nvgpu_mem_from_clear_list_entry(struct nvgpu_list_node *node)
61 clear_list_entry)); 76 clear_list_entry));
62}; 77};
63 78
64struct nvgpu_mem_sub {
65 u32 offset;
66 u32 size;
67};
68
69static inline const char *nvgpu_aperture_str(enum nvgpu_aperture aperture) 79static inline const char *nvgpu_aperture_str(enum nvgpu_aperture aperture)
70{ 80{
71 switch (aperture) { 81 switch (aperture) {
diff --git a/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c
index 7ce66707..852dcdf2 100644
--- a/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c
@@ -138,13 +138,13 @@ static void gk20a_tegra_secure_destroy(struct gk20a *g,
138{ 138{
139 DEFINE_DMA_ATTRS(attrs); 139 DEFINE_DMA_ATTRS(attrs);
140 140
141 if (desc->mem.sgt) { 141 if (desc->mem.priv.sgt) {
142 phys_addr_t pa = sg_phys(desc->mem.sgt->sgl); 142 phys_addr_t pa = sg_phys(desc->mem.priv.sgt->sgl);
143 dma_free_attrs(&tegra_vpr_dev, desc->mem.size, 143 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
144 (void *)(uintptr_t)pa, 144 (void *)(uintptr_t)pa,
145 pa, __DMA_ATTR(attrs)); 145 pa, __DMA_ATTR(attrs));
146 gk20a_free_sgtable(g, &desc->mem.sgt); 146 gk20a_free_sgtable(g, &desc->mem.priv.sgt);
147 desc->mem.sgt = NULL; 147 desc->mem.priv.sgt = NULL;
148 } 148 }
149} 149}
150 150
@@ -184,7 +184,7 @@ int gk20a_tegra_secure_alloc(struct device *dev,
184 184
185 desc->destroy = gk20a_tegra_secure_destroy; 185 desc->destroy = gk20a_tegra_secure_destroy;
186 186
187 desc->mem.sgt = sgt; 187 desc->mem.priv.sgt = sgt;
188 desc->mem.size = size; 188 desc->mem.size = size;
189 desc->mem.aperture = APERTURE_SYSMEM; 189 desc->mem.aperture = APERTURE_SYSMEM;
190 190
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 67def777..9122e48b 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -270,7 +270,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
270 } 270 }
271 271
272 /* bar1 va */ 272 /* bar1 va */
273 f->userd.gpu_va = vgpu_bar1_map(g, &f->userd.sgt, f->userd.size); 273 f->userd.gpu_va = vgpu_bar1_map(g, &f->userd.priv.sgt, f->userd.size);
274 if (!f->userd.gpu_va) { 274 if (!f->userd.gpu_va) {
275 dev_err(d, "gmmu mapping failed\n"); 275 dev_err(d, "gmmu mapping failed\n");
276 goto clean_up; 276 goto clean_up;
@@ -304,7 +304,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
304 304
305 for (chid = 0; chid < f->num_channels; chid++) { 305 for (chid = 0; chid < f->num_channels; chid++) {
306 f->channel[chid].userd_iova = 306 f->channel[chid].userd_iova =
307 g->ops.mm.get_iova_addr(g, f->userd.sgt->sgl, 0) 307 g->ops.mm.get_iova_addr(g, f->userd.priv.sgt->sgl, 0)
308 + chid * f->userd_entry_size; 308 + chid * f->userd_entry_size;
309 f->channel[chid].userd_gpu_va = 309 f->channel[chid].userd_gpu_va =
310 f->userd.gpu_va + chid * f->userd_entry_size; 310 f->userd.gpu_va + chid * f->userd_entry_size;
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 702ae97b..2a674feb 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -558,7 +558,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
558 } 558 }
559 559
560 /* allocate patch buffer */ 560 /* allocate patch buffer */
561 if (ch_ctx->patch_ctx.mem.pages == NULL) { 561 if (ch_ctx->patch_ctx.mem.priv.pages == NULL) {
562 err = vgpu_gr_alloc_channel_patch_ctx(g, c); 562 err = vgpu_gr_alloc_channel_patch_ctx(g, c);
563 if (err) { 563 if (err) {
564 nvgpu_err(g, "fail to allocate patch buffer"); 564 nvgpu_err(g, "fail to allocate patch buffer");