summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/mm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/mm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index f97d9ebd..03325cce 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -42,7 +42,7 @@ static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm,
42 struct nvgpu_vm_area *vm_area; 42 struct nvgpu_vm_area *vm_area;
43 43
44 vm_area = nvgpu_vm_area_find(vm, base); 44 vm_area = nvgpu_vm_area_find(vm, base);
45 if (!vm_area) { 45 if (vm_area == NULL) {
46 return GMMU_PAGE_SIZE_SMALL; 46 return GMMU_PAGE_SIZE_SMALL;
47 } 47 }
48 48
@@ -55,7 +55,7 @@ static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm,
55static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm, 55static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm,
56 u64 base, u64 size) 56 u64 base, u64 size)
57{ 57{
58 if (!base) { 58 if (base == 0ULL) {
59 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { 59 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
60 return GMMU_PAGE_SIZE_BIG; 60 return GMMU_PAGE_SIZE_BIG;
61 } 61 }
@@ -233,7 +233,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
233 true, 233 true,
234 false, 234 false,
235 "system"); 235 "system");
236 if (!mm->pmu.vm) { 236 if (mm->pmu.vm == NULL) {
237 return -ENOMEM; 237 return -ENOMEM;
238 } 238 }
239 239
@@ -275,7 +275,7 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm)
275 NV_MM_DEFAULT_KERNEL_SIZE, 275 NV_MM_DEFAULT_KERNEL_SIZE,
276 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, 276 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
277 false, false, "cde"); 277 false, false, "cde");
278 if (!mm->cde.vm) { 278 if (mm->cde.vm == NULL) {
279 return -ENOMEM; 279 return -ENOMEM;
280 } 280 }
281 return 0; 281 return 0;
@@ -291,7 +291,7 @@ static int nvgpu_init_ce_vm(struct mm_gk20a *mm)
291 NV_MM_DEFAULT_KERNEL_SIZE, 291 NV_MM_DEFAULT_KERNEL_SIZE,
292 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, 292 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
293 false, false, "ce"); 293 false, false, "ce");
294 if (!mm->ce.vm) { 294 if (mm->ce.vm == NULL) {
295 return -ENOMEM; 295 return -ENOMEM;
296 } 296 }
297 return 0; 297 return 0;
@@ -386,7 +386,7 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm)
386 mm->bar1.aperture_size, 386 mm->bar1.aperture_size,
387 true, false, 387 true, false,
388 "bar1"); 388 "bar1");
389 if (!mm->bar1.vm) { 389 if (mm->bar1.vm == NULL) {
390 return -ENOMEM; 390 return -ENOMEM;
391 } 391 }
392 392
@@ -442,8 +442,8 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
442 * this requires fixed allocations in vidmem which must be 442 * this requires fixed allocations in vidmem which must be
443 * allocated before all other buffers 443 * allocated before all other buffers
444 */ 444 */
445 if (g->ops.pmu.alloc_blob_space 445 if (g->ops.pmu.alloc_blob_space != NULL &&
446 && !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { 446 !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) {
447 err = g->ops.pmu.alloc_blob_space(g, 0, &g->acr.ucode_blob); 447 err = g->ops.pmu.alloc_blob_space(g, 0, &g->acr.ucode_blob);
448 if (err) { 448 if (err) {
449 return err; 449 return err;