summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 97b7aa80..cd34e769 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1151,7 +1151,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
1151 struct gk20a_fence *gk20a_fence_out = NULL; 1151 struct gk20a_fence *gk20a_fence_out = NULL;
1152 struct gk20a_fence *gk20a_last_fence = NULL; 1152 struct gk20a_fence *gk20a_last_fence = NULL;
1153 struct nvgpu_page_alloc *alloc = NULL; 1153 struct nvgpu_page_alloc *alloc = NULL;
1154 struct page_alloc_chunk *chunk = NULL; 1154 struct nvgpu_mem_sgl *sgl = NULL;
1155 int err = 0; 1155 int err = 0;
1156 1156
1157 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 1157 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
@@ -1159,16 +1159,16 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
1159 1159
1160 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); 1160 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
1161 1161
1162 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 1162 sgl = alloc->sgl;
1163 page_alloc_chunk, list_entry) { 1163 while (sgl) {
1164 if (gk20a_last_fence) 1164 if (gk20a_last_fence)
1165 gk20a_fence_put(gk20a_last_fence); 1165 gk20a_fence_put(gk20a_last_fence);
1166 1166
1167 err = gk20a_ce_execute_ops(g, 1167 err = gk20a_ce_execute_ops(g,
1168 g->mm.vidmem.ce_ctx_id, 1168 g->mm.vidmem.ce_ctx_id,
1169 0, 1169 0,
1170 chunk->base, 1170 nvgpu_mem_sgl_phys(sgl),
1171 chunk->length, 1171 nvgpu_mem_sgl_length(sgl),
1172 0x00000000, 1172 0x00000000,
1173 NVGPU_CE_DST_LOCATION_LOCAL_FB, 1173 NVGPU_CE_DST_LOCATION_LOCAL_FB,
1174 NVGPU_CE_MEMSET, 1174 NVGPU_CE_MEMSET,
@@ -1183,6 +1183,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
1183 } 1183 }
1184 1184
1185 gk20a_last_fence = gk20a_fence_out; 1185 gk20a_last_fence = gk20a_fence_out;
1186 sgl = nvgpu_mem_sgl_next(sgl);
1186 } 1187 }
1187 1188
1188 if (gk20a_last_fence) { 1189 if (gk20a_last_fence) {
@@ -1262,10 +1263,10 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr)
1262 return addr; 1263 return addr;
1263} 1264}
1264 1265
1265u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova) 1266u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, u64 iova)
1266{ 1267{
1267 /* ensure it is not vidmem allocation */ 1268 /* ensure it is not vidmem allocation */
1268 WARN_ON(is_vidmem_page_alloc((u64)iova)); 1269 WARN_ON(is_vidmem_page_alloc(iova));
1269 1270
1270 if (device_is_iommuable(dev_from_gk20a(g)) && 1271 if (device_is_iommuable(dev_from_gk20a(g)) &&
1271 g->ops.mm.get_physical_addr_bits) 1272 g->ops.mm.get_physical_addr_bits)
@@ -2167,11 +2168,6 @@ u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g)
2167 return 34; 2168 return 34;
2168} 2169}
2169 2170
2170u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags)
2171{
2172 return phys;
2173}
2174
2175const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, 2171const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
2176 u32 big_page_size) 2172 u32 big_page_size)
2177{ 2173{