summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-03-25 15:47:12 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 22:02:43 -0400
commit87226707cfbabbf38a1a954dedd3575e67bbe48d (patch)
tree81bc42cef59de51bfdb8228aae80f6b6807bbfbc /drivers
parent69ef2e6fa3417f271a47d26ee3768b143cd7448f (diff)
gpu: nvgpu: Fix error paths in init vm
Error paths called the wrong cleanup sections. Change-Id: I603af77bf8e3981c029bcf6d582882e51847f137 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/722949
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 3d2e5450..0ddd9ecb 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -2273,10 +2273,8 @@ int gk20a_init_vm(struct mm_gk20a *mm,
2273 vm->pdb.entries = kzalloc(sizeof(struct gk20a_mm_entry) * 2273 vm->pdb.entries = kzalloc(sizeof(struct gk20a_mm_entry) *
2274 (pde_hi + 1), GFP_KERNEL); 2274 (pde_hi + 1), GFP_KERNEL);
2275 2275
2276 if (!vm->pdb.entries) { 2276 if (!vm->pdb.entries)
2277 err = -ENOMEM; 2277 return -ENOMEM;
2278 goto clean_up_pdes;
2279 }
2280 2278
2281 gk20a_dbg_info("init space for %s va_limit=0x%llx num_pdes=%d", 2279 gk20a_dbg_info("init space for %s va_limit=0x%llx num_pdes=%d",
2282 name, vm->va_limit, pde_hi + 1); 2280 name, vm->va_limit, pde_hi + 1);
@@ -2284,7 +2282,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
2284 /* allocate the page table directory */ 2282 /* allocate the page table directory */
2285 err = gk20a_zalloc_gmmu_page_table(vm, 0, &vm->mmu_levels[0], &vm->pdb); 2283 err = gk20a_zalloc_gmmu_page_table(vm, 0, &vm->mmu_levels[0], &vm->pdb);
2286 if (err) 2284 if (err)
2287 goto clean_up_ptes; 2285 goto clean_up_pdes;
2288 2286
2289 /* First 16GB of the address space goes towards small pages. What ever 2287 /* First 16GB of the address space goes towards small pages. What ever
2290 * remains is allocated to large pages. */ 2288 * remains is allocated to large pages. */
@@ -2308,7 +2306,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
2308 low_hole_pages, /*start*/ 2306 low_hole_pages, /*start*/
2309 num_small_pages - low_hole_pages);/* length*/ 2307 num_small_pages - low_hole_pages);/* length*/
2310 if (err) 2308 if (err)
2311 goto clean_up_map_pde; 2309 goto clean_up_ptes;
2312 2310
2313 if (big_pages) { 2311 if (big_pages) {
2314 u32 start = (u32)(small_vma_size >> 2312 u32 start = (u32)(small_vma_size >>
@@ -2336,8 +2334,6 @@ int gk20a_init_vm(struct mm_gk20a *mm,
2336 2334
2337clean_up_small_allocator: 2335clean_up_small_allocator:
2338 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]); 2336 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]);
2339clean_up_map_pde:
2340 unmap_gmmu_pages(&vm->pdb);
2341clean_up_ptes: 2337clean_up_ptes:
2342 free_gmmu_pages(vm, &vm->pdb); 2338 free_gmmu_pages(vm, &vm->pdb);
2343clean_up_pdes: 2339clean_up_pdes: