summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/falcon/falcon.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/comptags.c9
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c10
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c12
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c23
5 files changed, 53 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon.c b/drivers/gpu/nvgpu/common/falcon/falcon.c
index 81ba5e81..6e5a477d 100644
--- a/drivers/gpu/nvgpu/common/falcon/falcon.c
+++ b/drivers/gpu/nvgpu/common/falcon/falcon.c
@@ -397,10 +397,11 @@ int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn,
397 return status; 397 return status;
398} 398}
399 399
400void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id) 400int nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
401{ 401{
402 struct nvgpu_falcon *flcn = NULL; 402 struct nvgpu_falcon *flcn = NULL;
403 struct gpu_ops *gops = &g->ops; 403 struct gpu_ops *gops = &g->ops;
404 int err = 0;
404 405
405 switch (flcn_id) { 406 switch (flcn_id) {
406 case FALCON_ID_PMU: 407 case FALCON_ID_PMU:
@@ -431,12 +432,15 @@ void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
431 break; 432 break;
432 default: 433 default:
433 nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id); 434 nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
435 err = -ENODEV;
434 break; 436 break;
435 }; 437 };
436 438
437 /* call to HAL method to assign flcn base & ops to selected falcon */ 439 if (err != 0) {
438 if (flcn) { 440 return err;
439 flcn->g = g;
440 gops->falcon.falcon_hal_sw_init(flcn);
441 } 441 }
442
443 /* call to HAL method to assign flcn base & ops to selected falcon */
444 flcn->g = g;
445 return gops->falcon.falcon_hal_sw_init(flcn);
442} 446}
diff --git a/drivers/gpu/nvgpu/common/mm/comptags.c b/drivers/gpu/nvgpu/common/mm/comptags.c
index e6c99702..334236ec 100644
--- a/drivers/gpu/nvgpu/common/mm/comptags.c
+++ b/drivers/gpu/nvgpu/common/mm/comptags.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -66,7 +66,12 @@ int gk20a_comptag_allocator_init(struct gk20a *g,
66 struct gk20a_comptag_allocator *allocator, 66 struct gk20a_comptag_allocator *allocator,
67 unsigned long size) 67 unsigned long size)
68{ 68{
69 nvgpu_mutex_init(&allocator->lock); 69 int err = nvgpu_mutex_init(&allocator->lock);
70
71 if (err != 0) {
72 nvgpu_err(g, "Error in allocator.lock mutex initialization");
73 return err;
74 }
70 75
71 /* 76 /*
72 * 0th comptag is special and is never used. The base for this bitmap 77 * 0th comptag is special and is never used. The base for this bitmap
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 6be619ed..54f621ae 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
2 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
@@ -375,7 +377,7 @@ clean_up_vm:
375static int nvgpu_init_mm_setup_sw(struct gk20a *g) 377static int nvgpu_init_mm_setup_sw(struct gk20a *g)
376{ 378{
377 struct mm_gk20a *mm = &g->mm; 379 struct mm_gk20a *mm = &g->mm;
378 int err; 380 int err = 0;
379 381
380 if (mm->sw_ready) { 382 if (mm->sw_ready) {
381 nvgpu_log_info(g, "skip init"); 383 nvgpu_log_info(g, "skip init");
@@ -383,7 +385,11 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
383 } 385 }
384 386
385 mm->g = g; 387 mm->g = g;
386 nvgpu_mutex_init(&mm->l2_op_lock); 388 err = nvgpu_mutex_init(&mm->l2_op_lock);
389 if (err != 0) {
390 nvgpu_err(g, "Error in l2_op_lock mutex initialization");
391 return err;
392 }
387 393
388 /*TBD: make channel vm size configurable */ 394 /*TBD: make channel vm size configurable */
389 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - 395 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index 335ef360..77e20c38 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -90,6 +90,8 @@ int nvgpu_pd_cache_init(struct gk20a *g)
90{ 90{
91 struct nvgpu_pd_cache *cache; 91 struct nvgpu_pd_cache *cache;
92 u32 i; 92 u32 i;
93 int err = 0;
94
93 95
94 /* 96 /*
95 * This gets called from finalize_poweron() so we need to make sure we 97 * This gets called from finalize_poweron() so we need to make sure we
@@ -111,9 +113,15 @@ int nvgpu_pd_cache_init(struct gk20a *g)
111 } 113 }
112 114
113 cache->mem_tree = NULL; 115 cache->mem_tree = NULL;
114 g->mm.pd_cache = cache;
115 nvgpu_mutex_init(&cache->lock);
116 116
117 err = nvgpu_mutex_init(&cache->lock);
118 if (err != 0) {
119 nvgpu_err(g, "Error in cache.lock initialization");
120 nvgpu_kfree(g, cache);
121 return err;
122 }
123
124 g->mm.pd_cache = cache;
117 pd_dbg(g, "PD cache initialized!"); 125 pd_dbg(g, "PD cache initialized!");
118 126
119 return 0; 127 return 0;
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index b364f4d6..3cb8ed60 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -284,7 +284,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
284 bool userspace_managed, 284 bool userspace_managed,
285 char *name) 285 char *name)
286{ 286{
287 int err; 287 int err = 0;
288 char alloc_name[32]; 288 char alloc_name[32];
289 u64 kernel_vma_flags; 289 u64 kernel_vma_flags;
290 u64 user_vma_start, user_vma_limit; 290 u64 user_vma_start, user_vma_limit;
@@ -476,8 +476,19 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
476 476
477 vm->mapped_buffers = NULL; 477 vm->mapped_buffers = NULL;
478 478
479 nvgpu_mutex_init(&vm->syncpt_ro_map_lock); 479 err = nvgpu_mutex_init(&vm->syncpt_ro_map_lock);
480 nvgpu_mutex_init(&vm->update_gmmu_lock); 480 if (err != 0) {
481 nvgpu_err(g,
482 "Error in syncpt_ro_map_lock mutex initialization");
483 goto clean_up_allocators;
484 }
485
486 err = nvgpu_mutex_init(&vm->update_gmmu_lock);
487 if (err != 0) {
488 nvgpu_err(g, "Error in update_gmmu_lock mutex initialization");
489 goto clean_up_ro_map_lock;
490 }
491
481 nvgpu_ref_init(&vm->ref); 492 nvgpu_ref_init(&vm->ref);
482 nvgpu_init_list_node(&vm->vm_area_list); 493 nvgpu_init_list_node(&vm->vm_area_list);
483 494
@@ -489,12 +500,16 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
489 if (vm->va_limit > 4ULL * SZ_1G) { 500 if (vm->va_limit > 4ULL * SZ_1G) {
490 err = nvgpu_init_sema_pool(vm); 501 err = nvgpu_init_sema_pool(vm);
491 if (err) { 502 if (err) {
492 goto clean_up_allocators; 503 goto clean_up_gmmu_lock;
493 } 504 }
494 } 505 }
495 506
496 return 0; 507 return 0;
497 508
509clean_up_gmmu_lock:
510 nvgpu_mutex_destroy(&vm->update_gmmu_lock);
511clean_up_ro_map_lock:
512 nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock);
498clean_up_allocators: 513clean_up_allocators:
499 if (nvgpu_alloc_initialized(&vm->kernel)) { 514 if (nvgpu_alloc_initialized(&vm->kernel)) {
500 nvgpu_alloc_destroy(&vm->kernel); 515 nvgpu_alloc_destroy(&vm->kernel);