summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 7b08387e..9e6dc74c 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -809,7 +809,7 @@ static void gk20a_remove_mm_ce_support(struct mm_gk20a *mm)
809 struct gk20a *g = gk20a_from_mm(mm); 809 struct gk20a *g = gk20a_from_mm(mm);
810 810
811 if (mm->vidmem.ce_ctx_id != (u32)~0) 811 if (mm->vidmem.ce_ctx_id != (u32)~0)
812 gk20a_ce_delete_context(g->dev, mm->vidmem.ce_ctx_id); 812 gk20a_ce_delete_context_priv(g, mm->vidmem.ce_ctx_id);
813 813
814 mm->vidmem.ce_ctx_id = (u32)~0; 814 mm->vidmem.ce_ctx_id = (u32)~0;
815 815
@@ -1220,11 +1220,10 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
1220 u32 num_pages = 1 << order; 1220 u32 num_pages = 1 << order;
1221 u32 len = num_pages * PAGE_SIZE; 1221 u32 len = num_pages * PAGE_SIZE;
1222 int err; 1222 int err;
1223 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
1224 1223
1225 gk20a_dbg_fn(""); 1224 gk20a_dbg_fn("");
1226 1225
1227 if (platform->is_fmodel) 1226 if (g->is_fmodel)
1228 return alloc_gmmu_phys_pages(vm, order, entry); 1227 return alloc_gmmu_phys_pages(vm, order, entry);
1229 1228
1230 /* 1229 /*
@@ -1250,7 +1249,6 @@ void free_gmmu_pages(struct vm_gk20a *vm,
1250 struct gk20a_mm_entry *entry) 1249 struct gk20a_mm_entry *entry)
1251{ 1250{
1252 struct gk20a *g = gk20a_from_vm(vm); 1251 struct gk20a *g = gk20a_from_vm(vm);
1253 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
1254 1252
1255 gk20a_dbg_fn(""); 1253 gk20a_dbg_fn("");
1256 1254
@@ -1260,7 +1258,7 @@ void free_gmmu_pages(struct vm_gk20a *vm,
1260 if (entry->woffset) /* fake shadow mem */ 1258 if (entry->woffset) /* fake shadow mem */
1261 return; 1259 return;
1262 1260
1263 if (platform->is_fmodel) { 1261 if (g->is_fmodel) {
1264 free_gmmu_phys_pages(vm, entry); 1262 free_gmmu_phys_pages(vm, entry);
1265 return; 1263 return;
1266 } 1264 }
@@ -1270,11 +1268,9 @@ void free_gmmu_pages(struct vm_gk20a *vm,
1270 1268
1271int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) 1269int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
1272{ 1270{
1273 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
1274
1275 gk20a_dbg_fn(""); 1271 gk20a_dbg_fn("");
1276 1272
1277 if (platform->is_fmodel) 1273 if (g->is_fmodel)
1278 return map_gmmu_phys_pages(entry); 1274 return map_gmmu_phys_pages(entry);
1279 1275
1280 if (IS_ENABLED(CONFIG_ARM64)) { 1276 if (IS_ENABLED(CONFIG_ARM64)) {
@@ -1296,11 +1292,9 @@ int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
1296 1292
1297void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) 1293void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
1298{ 1294{
1299 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
1300
1301 gk20a_dbg_fn(""); 1295 gk20a_dbg_fn("");
1302 1296
1303 if (platform->is_fmodel) { 1297 if (g->is_fmodel) {
1304 unmap_gmmu_phys_pages(entry); 1298 unmap_gmmu_phys_pages(entry);
1305 return; 1299 return;
1306 } 1300 }
@@ -4070,6 +4064,7 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
4070 struct mapped_buffer_node *mapped_buffer; 4064 struct mapped_buffer_node *mapped_buffer;
4071 struct vm_reserved_va_node *va_node, *va_node_tmp; 4065 struct vm_reserved_va_node *va_node, *va_node_tmp;
4072 struct rb_node *node; 4066 struct rb_node *node;
4067 struct gk20a *g = vm->mm->g;
4073 4068
4074 gk20a_dbg_fn(""); 4069 gk20a_dbg_fn("");
4075 4070
@@ -4078,7 +4073,7 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
4078 * pool involves unmapping a GMMU mapping which means aquiring the 4073 * pool involves unmapping a GMMU mapping which means aquiring the
4079 * update_gmmu_lock. 4074 * update_gmmu_lock.
4080 */ 4075 */
4081 if (!gk20a_platform_has_syncpoints(gk20a_from_vm(vm)->dev)) { 4076 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_HAS_SYNCPOINTS)) {
4082 if (vm->sema_pool) { 4077 if (vm->sema_pool) {
4083 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); 4078 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
4084 nvgpu_semaphore_pool_put(vm->sema_pool); 4079 nvgpu_semaphore_pool_put(vm->sema_pool);
@@ -4172,7 +4167,7 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4172 /* 4167 /*
4173 * Don't waste the memory on semaphores if we don't need them. 4168 * Don't waste the memory on semaphores if we don't need them.
4174 */ 4169 */
4175 if (gk20a_platform_has_syncpoints(g->dev)) 4170 if (g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_HAS_SYNCPOINTS)
4176 return 0; 4171 return 0;
4177 4172
4178 if (vm->sema_pool) 4173 if (vm->sema_pool)