summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-06 18:30:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-20 19:14:32 -0400
commite32f62fadfcde413bcd9b5af61ad884e27ba2bf1 (patch)
treeeff606a0826841eae6ade5906acd9da589d1179a /drivers/gpu/nvgpu/gk20a
parent52bd58b560d0b3b49c03ef5c2637b67adeac8193 (diff)
gpu: nvgpu: Move Linux nvgpu_mem fields
Hide the Linux specific nvgpu_mem fields so that in subsequent patches core code can instead of using struct sg_table it can use mem_desc. Routines for accessing system specific fields will be added as needed. This is the first step in a fairly major overhaul of the GMMU mapping routines. There are numerous issues with the current design (or lack there of): massively coupled code, system dependencies, disorganization, etc. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I2e7d3ae3a07468cfc17c1c642d28ed1b0952474d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464076 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fb_gk20a.c7
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c33
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c53
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h2
7 files changed, 54 insertions, 49 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 18432c55..391f6612 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -1239,7 +1239,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1239 } 1239 }
1240 1240
1241 /* map backing store to gpu virtual space */ 1241 /* map backing store to gpu virtual space */
1242 vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.mem.sgt, 1242 vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.mem.priv.sgt,
1243 g->gr.compbit_store.mem.size, 1243 g->gr.compbit_store.mem.size,
1244 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1244 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1245 gk20a_mem_flag_read_only, 1245 gk20a_mem_flag_read_only,
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
index 214014ce..4a76bd6b 100644
--- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
@@ -44,9 +44,10 @@ void fb_gk20a_reset(struct gk20a *g)
44 44
45void gk20a_fb_init_hw(struct gk20a *g) 45void gk20a_fb_init_hw(struct gk20a *g)
46{ 46{
47 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(), 47 u32 addr = g->ops.mm.get_iova_addr(g,
48 g->ops.mm.get_iova_addr(g, g->mm.sysmem_flush.sgt->sgl, 0) 48 g->mm.sysmem_flush.priv.sgt->sgl, 0) >> 8;
49 >> 8); 49
50 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(), addr);
50} 51}
51 52
52static void gk20a_fb_set_mmu_page_size(struct gk20a *g) 53static void gk20a_fb_set_mmu_page_size(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 12bb3688..314d4551 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -954,7 +954,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
954 954
955 for (chid = 0; chid < f->num_channels; chid++) { 955 for (chid = 0; chid < f->num_channels; chid++) {
956 f->channel[chid].userd_iova = 956 f->channel[chid].userd_iova =
957 g->ops.mm.get_iova_addr(g, f->userd.sgt->sgl, 0) 957 g->ops.mm.get_iova_addr(g, f->userd.priv.sgt->sgl, 0)
958 + chid * f->userd_entry_size; 958 + chid * f->userd_entry_size;
959 f->channel[chid].userd_gpu_va = 959 f->channel[chid].userd_gpu_va =
960 f->userd.gpu_va + chid * f->userd_entry_size; 960 f->userd.gpu_va + chid * f->userd_entry_size;
@@ -3148,7 +3148,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3148 new_buf = !runlist->cur_buffer; 3148 new_buf = !runlist->cur_buffer;
3149 3149
3150 runlist_iova = g->ops.mm.get_iova_addr( 3150 runlist_iova = g->ops.mm.get_iova_addr(
3151 g, runlist->mem[new_buf].sgt->sgl, 0); 3151 g, runlist->mem[new_buf].priv.sgt->sgl, 0);
3152 3152
3153 gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx", 3153 gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
3154 runlist_id, (u64)runlist_iova); 3154 runlist_id, (u64)runlist_iova);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 22093a34..f47d3b12 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1943,7 +1943,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1943 } 1943 }
1944 1944
1945 pm_ctx->mem.gpu_va = gk20a_gmmu_map(c->vm, 1945 pm_ctx->mem.gpu_va = gk20a_gmmu_map(c->vm,
1946 &pm_ctx->mem.sgt, 1946 &pm_ctx->mem.priv.sgt,
1947 pm_ctx->mem.size, 1947 pm_ctx->mem.size,
1948 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1948 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1949 gk20a_mem_flag_none, true, 1949 gk20a_mem_flag_none, true,
@@ -2205,7 +2205,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2205 2205
2206 /* Map ucode surface to GMMU */ 2206 /* Map ucode surface to GMMU */
2207 ucode_info->surface_desc.gpu_va = gk20a_gmmu_map(vm, 2207 ucode_info->surface_desc.gpu_va = gk20a_gmmu_map(vm,
2208 &ucode_info->surface_desc.sgt, 2208 &ucode_info->surface_desc.priv.sgt,
2209 ucode_info->surface_desc.size, 2209 ucode_info->surface_desc.size,
2210 0, /* flags */ 2210 0, /* flags */
2211 gk20a_mem_flag_read_only, 2211 gk20a_mem_flag_read_only,
@@ -2823,13 +2823,14 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2823 gk20a_dbg_fn(""); 2823 gk20a_dbg_fn("");
2824 2824
2825 /* Circular Buffer */ 2825 /* Circular Buffer */
2826 if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt == NULL)) { 2826 if (!c->vpr ||
2827 (gr->global_ctx_buffer[CIRCULAR_VPR].mem.priv.sgt == NULL)) {
2827 mem = &gr->global_ctx_buffer[CIRCULAR].mem; 2828 mem = &gr->global_ctx_buffer[CIRCULAR].mem;
2828 } else { 2829 } else {
2829 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem; 2830 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem;
2830 } 2831 }
2831 2832
2832 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 2833 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size,
2833 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2834 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2834 gk20a_mem_flag_none, true, mem->aperture); 2835 gk20a_mem_flag_none, true, mem->aperture);
2835 if (!gpu_va) 2836 if (!gpu_va)
@@ -2838,13 +2839,14 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2838 g_bfr_size[CIRCULAR_VA] = mem->size; 2839 g_bfr_size[CIRCULAR_VA] = mem->size;
2839 2840
2840 /* Attribute Buffer */ 2841 /* Attribute Buffer */
2841 if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt == NULL)) { 2842 if (!c->vpr ||
2843 (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.priv.sgt == NULL)) {
2842 mem = &gr->global_ctx_buffer[ATTRIBUTE].mem; 2844 mem = &gr->global_ctx_buffer[ATTRIBUTE].mem;
2843 } else { 2845 } else {
2844 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem; 2846 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem;
2845 } 2847 }
2846 2848
2847 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 2849 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size,
2848 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2850 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2849 gk20a_mem_flag_none, false, mem->aperture); 2851 gk20a_mem_flag_none, false, mem->aperture);
2850 if (!gpu_va) 2852 if (!gpu_va)
@@ -2853,13 +2855,14 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2853 g_bfr_size[ATTRIBUTE_VA] = mem->size; 2855 g_bfr_size[ATTRIBUTE_VA] = mem->size;
2854 2856
2855 /* Page Pool */ 2857 /* Page Pool */
2856 if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt == NULL)) { 2858 if (!c->vpr ||
2859 (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.priv.sgt == NULL)) {
2857 mem = &gr->global_ctx_buffer[PAGEPOOL].mem; 2860 mem = &gr->global_ctx_buffer[PAGEPOOL].mem;
2858 } else { 2861 } else {
2859 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem; 2862 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem;
2860 } 2863 }
2861 2864
2862 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 2865 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size,
2863 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2866 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2864 gk20a_mem_flag_none, true, mem->aperture); 2867 gk20a_mem_flag_none, true, mem->aperture);
2865 if (!gpu_va) 2868 if (!gpu_va)
@@ -2869,7 +2872,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2869 2872
2870 /* Golden Image */ 2873 /* Golden Image */
2871 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem; 2874 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem;
2872 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 0, 2875 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 0,
2873 gk20a_mem_flag_none, true, mem->aperture); 2876 gk20a_mem_flag_none, true, mem->aperture);
2874 if (!gpu_va) 2877 if (!gpu_va)
2875 goto clean_up; 2878 goto clean_up;
@@ -2878,7 +2881,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2878 2881
2879 /* Priv register Access Map */ 2882 /* Priv register Access Map */
2880 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem; 2883 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem;
2881 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 0, 2884 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 0,
2882 gk20a_mem_flag_none, true, mem->aperture); 2885 gk20a_mem_flag_none, true, mem->aperture);
2883 if (!gpu_va) 2886 if (!gpu_va)
2884 goto clean_up; 2887 goto clean_up;
@@ -2950,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2950 goto err_free_ctx; 2953 goto err_free_ctx;
2951 2954
2952 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, 2955 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm,
2953 &gr_ctx->mem.sgt, 2956 &gr_ctx->mem.priv.sgt,
2954 gr_ctx->mem.size, 2957 gr_ctx->mem.size,
2955 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE, 2958 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE,
2956 gk20a_mem_flag_none, true, 2959 gk20a_mem_flag_none, true,
@@ -3196,7 +3199,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3196 } 3199 }
3197 3200
3198 /* allocate patch buffer */ 3201 /* allocate patch buffer */
3199 if (ch_ctx->patch_ctx.mem.sgt == NULL) { 3202 if (ch_ctx->patch_ctx.mem.priv.sgt == NULL) {
3200 err = gr_gk20a_alloc_channel_patch_ctx(g, c); 3203 err = gr_gk20a_alloc_channel_patch_ctx(g, c);
3201 if (err) { 3204 if (err) {
3202 nvgpu_err(g, 3205 nvgpu_err(g,
@@ -4735,7 +4738,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4735 gk20a_dbg_fn(""); 4738 gk20a_dbg_fn("");
4736 4739
4737 /* init mmu debug buffer */ 4740 /* init mmu debug buffer */
4738 addr = g->ops.mm.get_iova_addr(g, gr->mmu_wr_mem.sgt->sgl, 0); 4741 addr = g->ops.mm.get_iova_addr(g, gr->mmu_wr_mem.priv.sgt->sgl, 0);
4739 addr >>= fb_mmu_debug_wr_addr_alignment_v(); 4742 addr >>= fb_mmu_debug_wr_addr_alignment_v();
4740 4743
4741 gk20a_writel(g, fb_mmu_debug_wr_r(), 4744 gk20a_writel(g, fb_mmu_debug_wr_r(),
@@ -4745,7 +4748,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4745 fb_mmu_debug_wr_vol_false_f() | 4748 fb_mmu_debug_wr_vol_false_f() |
4746 fb_mmu_debug_wr_addr_f(addr)); 4749 fb_mmu_debug_wr_addr_f(addr));
4747 4750
4748 addr = g->ops.mm.get_iova_addr(g, gr->mmu_rd_mem.sgt->sgl, 0); 4751 addr = g->ops.mm.get_iova_addr(g, gr->mmu_rd_mem.priv.sgt->sgl, 0);
4749 addr >>= fb_mmu_debug_rd_addr_alignment_v(); 4752 addr >>= fb_mmu_debug_rd_addr_alignment_v();
4750 4753
4751 gk20a_writel(g, fb_mmu_debug_rd_r(), 4754 gk20a_writel(g, fb_mmu_debug_rd_r(),
@@ -8405,7 +8408,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8405 } 8408 }
8406 if (!pm_ctx_ready) { 8409 if (!pm_ctx_ready) {
8407 /* Make sure ctx buffer was initialized */ 8410 /* Make sure ctx buffer was initialized */
8408 if (!ch_ctx->pm_ctx.mem.pages) { 8411 if (!ch_ctx->pm_ctx.mem.priv.pages) {
8409 nvgpu_err(g, 8412 nvgpu_err(g,
8410 "Invalid ctx buffer"); 8413 "Invalid ctx buffer");
8411 err = -EINVAL; 8414 err = -EINVAL;
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
index b92dda6d..1958c11c 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_common.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c
@@ -96,7 +96,7 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
96 compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem); 96 compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
97 else 97 else
98 compbit_store_iova = g->ops.mm.get_iova_addr(g, 98 compbit_store_iova = g->ops.mm.get_iova_addr(g,
99 gr->compbit_store.mem.sgt->sgl, 0); 99 gr->compbit_store.mem.priv.sgt->sgl, 0);
100 100
101 compbit_base_post_divide64 = compbit_store_iova >> 101 compbit_base_post_divide64 = compbit_store_iova >>
102 ltc_ltcs_ltss_cbc_base_alignment_shift_v(); 102 ltc_ltcs_ltss_cbc_base_alignment_shift_v();
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 1db52c85..69e00c5e 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -817,27 +817,28 @@ static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order,
817 gk20a_dbg(gpu_dbg_pte, "alloc_pages failed"); 817 gk20a_dbg(gpu_dbg_pte, "alloc_pages failed");
818 goto err_out; 818 goto err_out;
819 } 819 }
820 entry->mem.sgt = nvgpu_kzalloc(g, sizeof(*entry->mem.sgt)); 820 entry->mem.priv.sgt = nvgpu_kzalloc(g, sizeof(*entry->mem.priv.sgt));
821 if (!entry->mem.sgt) { 821 if (!entry->mem.priv.sgt) {
822 gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table"); 822 gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table");
823 goto err_alloced; 823 goto err_alloced;
824 } 824 }
825 err = sg_alloc_table(entry->mem.sgt, 1, GFP_KERNEL); 825 err = sg_alloc_table(entry->mem.priv.sgt, 1, GFP_KERNEL);
826 if (err) { 826 if (err) {
827 gk20a_dbg(gpu_dbg_pte, "sg_alloc_table failed"); 827 gk20a_dbg(gpu_dbg_pte, "sg_alloc_table failed");
828 goto err_sg_table; 828 goto err_sg_table;
829 } 829 }
830 sg_set_page(entry->mem.sgt->sgl, pages, len, 0); 830 sg_set_page(entry->mem.priv.sgt->sgl, pages, len, 0);
831 entry->mem.cpu_va = page_address(pages); 831 entry->mem.cpu_va = page_address(pages);
832 memset(entry->mem.cpu_va, 0, len); 832 memset(entry->mem.cpu_va, 0, len);
833 entry->mem.size = len; 833 entry->mem.size = len;
834 entry->mem.aperture = APERTURE_SYSMEM; 834 entry->mem.aperture = APERTURE_SYSMEM;
835 FLUSH_CPU_DCACHE(entry->mem.cpu_va, sg_phys(entry->mem.sgt->sgl), len); 835 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
836 sg_phys(entry->mem.priv.sgt->sgl), len);
836 837
837 return 0; 838 return 0;
838 839
839err_sg_table: 840err_sg_table:
840 nvgpu_kfree(vm->mm->g, entry->mem.sgt); 841 nvgpu_kfree(vm->mm->g, entry->mem.priv.sgt);
841err_alloced: 842err_alloced:
842 __free_pages(pages, order); 843 __free_pages(pages, order);
843err_out: 844err_out:
@@ -854,9 +855,9 @@ static void free_gmmu_phys_pages(struct vm_gk20a *vm,
854 free_pages((unsigned long)entry->mem.cpu_va, get_order(entry->mem.size)); 855 free_pages((unsigned long)entry->mem.cpu_va, get_order(entry->mem.size));
855 entry->mem.cpu_va = NULL; 856 entry->mem.cpu_va = NULL;
856 857
857 sg_free_table(entry->mem.sgt); 858 sg_free_table(entry->mem.priv.sgt);
858 nvgpu_kfree(vm->mm->g, entry->mem.sgt); 859 nvgpu_kfree(vm->mm->g, entry->mem.priv.sgt);
859 entry->mem.sgt = NULL; 860 entry->mem.priv.sgt = NULL;
860 entry->mem.size = 0; 861 entry->mem.size = 0;
861 entry->mem.aperture = APERTURE_INVALID; 862 entry->mem.aperture = APERTURE_INVALID;
862} 863}
@@ -864,16 +865,16 @@ static void free_gmmu_phys_pages(struct vm_gk20a *vm,
864static int map_gmmu_phys_pages(struct gk20a_mm_entry *entry) 865static int map_gmmu_phys_pages(struct gk20a_mm_entry *entry)
865{ 866{
866 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 867 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
867 sg_phys(entry->mem.sgt->sgl), 868 sg_phys(entry->mem.priv.sgt->sgl),
868 entry->mem.sgt->sgl->length); 869 entry->mem.priv.sgt->sgl->length);
869 return 0; 870 return 0;
870} 871}
871 872
872static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry) 873static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry)
873{ 874{
874 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 875 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
875 sg_phys(entry->mem.sgt->sgl), 876 sg_phys(entry->mem.priv.sgt->sgl),
876 entry->mem.sgt->sgl->length); 877 entry->mem.priv.sgt->sgl->length);
877} 878}
878 879
879static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 880static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
@@ -941,7 +942,7 @@ int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
941 return 0; 942 return 0;
942 943
943 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 944 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
944 sg_phys(entry->mem.sgt->sgl), 945 sg_phys(entry->mem.priv.sgt->sgl),
945 entry->mem.size); 946 entry->mem.size);
946 } else { 947 } else {
947 int err = nvgpu_mem_begin(g, &entry->mem); 948 int err = nvgpu_mem_begin(g, &entry->mem);
@@ -967,7 +968,7 @@ void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
967 return; 968 return;
968 969
969 FLUSH_CPU_DCACHE(entry->mem.cpu_va, 970 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
970 sg_phys(entry->mem.sgt->sgl), 971 sg_phys(entry->mem.priv.sgt->sgl),
971 entry->mem.size); 972 entry->mem.size);
972 } else { 973 } else {
973 nvgpu_mem_end(g, &entry->mem); 974 nvgpu_mem_end(g, &entry->mem);
@@ -1028,9 +1029,9 @@ static int gk20a_zalloc_gmmu_page_table(struct vm_gk20a *vm,
1028 1029
1029 gk20a_dbg(gpu_dbg_pte, "entry = 0x%p, addr=%08llx, size %d, woff %x", 1030 gk20a_dbg(gpu_dbg_pte, "entry = 0x%p, addr=%08llx, size %d, woff %x",
1030 entry, 1031 entry,
1031 (entry->mem.sgt && entry->mem.aperture == APERTURE_SYSMEM) ? 1032 (entry->mem.priv.sgt &&
1032 g->ops.mm.get_iova_addr(g, entry->mem.sgt->sgl, 0) 1033 entry->mem.aperture == APERTURE_SYSMEM) ?
1033 : 0, 1034 g->ops.mm.get_iova_addr(g, entry->mem.priv.sgt->sgl, 0) : 0,
1034 order, entry->woffset); 1035 order, entry->woffset);
1035 if (err) 1036 if (err)
1036 return err; 1037 return err;
@@ -1726,7 +1727,7 @@ static struct sg_table *gk20a_vidbuf_map_dma_buf(
1726{ 1727{
1727 struct gk20a_vidmem_buf *buf = attach->dmabuf->priv; 1728 struct gk20a_vidmem_buf *buf = attach->dmabuf->priv;
1728 1729
1729 return buf->mem->sgt; 1730 return buf->mem->priv.sgt;
1730} 1731}
1731 1732
1732static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach, 1733static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach,
@@ -2398,7 +2399,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2398 g->ops.mm.gmmu_map( 2399 g->ops.mm.gmmu_map(
2399 vm, 2400 vm,
2400 !fixed_mapping ? 0 : *compbits_win_gva, /* va */ 2401 !fixed_mapping ? 0 : *compbits_win_gva, /* va */
2401 g->gr.compbit_store.mem.sgt, 2402 g->gr.compbit_store.mem.priv.sgt,
2402 cacheline_offset_start, /* sg offset */ 2403 cacheline_offset_start, /* sg offset */
2403 mapped_buffer->ctag_map_win_size, /* size */ 2404 mapped_buffer->ctag_map_win_size, /* size */
2404 small_pgsz_index, 2405 small_pgsz_index,
@@ -2518,7 +2519,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
2518 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 2519 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
2519 return -EINVAL; 2520 return -EINVAL;
2520 2521
2521 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 2522 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
2522 2523
2523 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 2524 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
2524 page_alloc_chunk, list_entry) { 2525 page_alloc_chunk, list_entry) {
@@ -2580,14 +2581,14 @@ u64 gk20a_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem,
2580 u64 addr; 2581 u64 addr;
2581 2582
2582 if (mem->aperture == APERTURE_VIDMEM) { 2583 if (mem->aperture == APERTURE_VIDMEM) {
2583 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 2584 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
2584 2585
2585 /* This API should not be used with > 1 chunks */ 2586 /* This API should not be used with > 1 chunks */
2586 WARN_ON(alloc->nr_chunks != 1); 2587 WARN_ON(alloc->nr_chunks != 1);
2587 2588
2588 addr = alloc->base; 2589 addr = alloc->base;
2589 } else { 2590 } else {
2590 addr = g->ops.mm.get_iova_addr(g, mem->sgt->sgl, flags); 2591 addr = g->ops.mm.get_iova_addr(g, mem->priv.sgt->sgl, flags);
2591 } 2592 }
2592 2593
2593 return addr; 2594 return addr;
@@ -2619,8 +2620,8 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
2619 while ((mem = get_pending_mem_desc(mm)) != NULL) { 2620 while ((mem = get_pending_mem_desc(mm)) != NULL) {
2620 gk20a_gmmu_clear_vidmem_mem(g, mem); 2621 gk20a_gmmu_clear_vidmem_mem(g, mem);
2621 nvgpu_free(mem->allocator, 2622 nvgpu_free(mem->allocator,
2622 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 2623 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
2623 gk20a_free_sgtable(g, &mem->sgt); 2624 gk20a_free_sgtable(g, &mem->priv.sgt);
2624 2625
2625 WARN_ON(atomic64_sub_return(mem->size, 2626 WARN_ON(atomic64_sub_return(mem->size,
2626 &g->mm.vidmem.bytes_pending) < 0); 2627 &g->mm.vidmem.bytes_pending) < 0);
@@ -2774,7 +2775,7 @@ u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry)
2774 u64 base; 2775 u64 base;
2775 2776
2776 if (g->mm.has_physical_mode) 2777 if (g->mm.has_physical_mode)
2777 base = sg_phys(entry->mem.sgt->sgl); 2778 base = sg_phys(entry->mem.priv.sgt->sgl);
2778 else 2779 else
2779 base = gk20a_mem_get_base_addr(g, &entry->mem, 0); 2780 base = gk20a_mem_get_base_addr(g, &entry->mem, 0);
2780 2781
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 7fac811e..94dc0b6f 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -539,7 +539,7 @@ static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem)
539{ 539{
540 /* FIXME: the sgt/sgl may get null if this is accessed e.g. in an isr 540 /* FIXME: the sgt/sgl may get null if this is accessed e.g. in an isr
541 * during channel deletion - attempt to fix at least null derefs */ 541 * during channel deletion - attempt to fix at least null derefs */
542 struct sg_table *sgt = mem->sgt; 542 struct sg_table *sgt = mem->priv.sgt;
543 543
544 if (sgt) { 544 if (sgt) {
545 struct scatterlist *sgl = sgt->sgl; 545 struct scatterlist *sgl = sgt->sgl;