summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Benech <nbenech@nvidia.com>2018-08-23 16:23:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 23:39:08 -0400
commit2eface802a4aea417206bcdda689a65cf47d300b (patch)
tree502af9d48004af4edf8f02a2a7cf751ef5a11325
parentb44c7fdb114a63ab98fffc0f246776b56399ff64 (diff)
gpu: nvgpu: Fix mutex MISRA 17.7 violations
MISRA Rule-17.7 requires the return value of all functions to be used. Fix is either to use the return value or change the function to return void. This patch contains fix for calls to nvgpu_mutex_init and improves related error handling. JIRA NVGPU-677 Change-Id: I609fa138520cc7ccfdd5aa0e7fd28c8ca0b3a21c Signed-off-by: Nicolas Benech <nbenech@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1805598 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/falcon/falcon.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/comptags.c9
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c10
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c12
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c23
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c13
-rw-r--r--drivers/gpu/nvgpu/gk20a/flcn_gk20a.c20
-rw-r--r--drivers/gpu/nvgpu/gk20a/flcn_gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c20
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c33
-rw-r--r--drivers/gpu/nvgpu/gp106/flcn_gp106.c20
-rw-r--r--drivers/gpu/nvgpu/gp106/flcn_gp106.h4
-rw-r--r--drivers/gpu/nvgpu/gv100/flcn_gv100.c22
-rw-r--r--drivers/gpu/nvgpu/gv100/flcn_gv100.h2
-rw-r--r--drivers/gpu/nvgpu/gv100/nvlink_gv100.c6
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c8
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/falcon.h14
18 files changed, 168 insertions, 68 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon.c b/drivers/gpu/nvgpu/common/falcon/falcon.c
index 81ba5e81..6e5a477d 100644
--- a/drivers/gpu/nvgpu/common/falcon/falcon.c
+++ b/drivers/gpu/nvgpu/common/falcon/falcon.c
@@ -397,10 +397,11 @@ int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn,
397 return status; 397 return status;
398} 398}
399 399
400void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id) 400int nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
401{ 401{
402 struct nvgpu_falcon *flcn = NULL; 402 struct nvgpu_falcon *flcn = NULL;
403 struct gpu_ops *gops = &g->ops; 403 struct gpu_ops *gops = &g->ops;
404 int err = 0;
404 405
405 switch (flcn_id) { 406 switch (flcn_id) {
406 case FALCON_ID_PMU: 407 case FALCON_ID_PMU:
@@ -431,12 +432,15 @@ void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
431 break; 432 break;
432 default: 433 default:
433 nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id); 434 nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
435 err = -ENODEV;
434 break; 436 break;
435 }; 437 };
436 438
437 /* call to HAL method to assign flcn base & ops to selected falcon */ 439 if (err != 0) {
438 if (flcn) { 440 return err;
439 flcn->g = g;
440 gops->falcon.falcon_hal_sw_init(flcn);
441 } 441 }
442
443 /* call to HAL method to assign flcn base & ops to selected falcon */
444 flcn->g = g;
445 return gops->falcon.falcon_hal_sw_init(flcn);
442} 446}
diff --git a/drivers/gpu/nvgpu/common/mm/comptags.c b/drivers/gpu/nvgpu/common/mm/comptags.c
index e6c99702..334236ec 100644
--- a/drivers/gpu/nvgpu/common/mm/comptags.c
+++ b/drivers/gpu/nvgpu/common/mm/comptags.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -66,7 +66,12 @@ int gk20a_comptag_allocator_init(struct gk20a *g,
66 struct gk20a_comptag_allocator *allocator, 66 struct gk20a_comptag_allocator *allocator,
67 unsigned long size) 67 unsigned long size)
68{ 68{
69 nvgpu_mutex_init(&allocator->lock); 69 int err = nvgpu_mutex_init(&allocator->lock);
70
71 if (err != 0) {
72 nvgpu_err(g, "Error in allocator.lock mutex initialization");
73 return err;
74 }
70 75
71 /* 76 /*
72 * 0th comptag is special and is never used. The base for this bitmap 77 * 0th comptag is special and is never used. The base for this bitmap
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 6be619ed..54f621ae 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
2 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
@@ -375,7 +377,7 @@ clean_up_vm:
375static int nvgpu_init_mm_setup_sw(struct gk20a *g) 377static int nvgpu_init_mm_setup_sw(struct gk20a *g)
376{ 378{
377 struct mm_gk20a *mm = &g->mm; 379 struct mm_gk20a *mm = &g->mm;
378 int err; 380 int err = 0;
379 381
380 if (mm->sw_ready) { 382 if (mm->sw_ready) {
381 nvgpu_log_info(g, "skip init"); 383 nvgpu_log_info(g, "skip init");
@@ -383,7 +385,11 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
383 } 385 }
384 386
385 mm->g = g; 387 mm->g = g;
386 nvgpu_mutex_init(&mm->l2_op_lock); 388 err = nvgpu_mutex_init(&mm->l2_op_lock);
389 if (err != 0) {
390 nvgpu_err(g, "Error in l2_op_lock mutex initialization");
391 return err;
392 }
387 393
388 /*TBD: make channel vm size configurable */ 394 /*TBD: make channel vm size configurable */
389 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - 395 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index 335ef360..77e20c38 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -90,6 +90,8 @@ int nvgpu_pd_cache_init(struct gk20a *g)
90{ 90{
91 struct nvgpu_pd_cache *cache; 91 struct nvgpu_pd_cache *cache;
92 u32 i; 92 u32 i;
93 int err = 0;
94
93 95
94 /* 96 /*
95 * This gets called from finalize_poweron() so we need to make sure we 97 * This gets called from finalize_poweron() so we need to make sure we
@@ -111,9 +113,15 @@ int nvgpu_pd_cache_init(struct gk20a *g)
111 } 113 }
112 114
113 cache->mem_tree = NULL; 115 cache->mem_tree = NULL;
114 g->mm.pd_cache = cache;
115 nvgpu_mutex_init(&cache->lock);
116 116
117 err = nvgpu_mutex_init(&cache->lock);
118 if (err != 0) {
119 nvgpu_err(g, "Error in cache.lock initialization");
120 nvgpu_kfree(g, cache);
121 return err;
122 }
123
124 g->mm.pd_cache = cache;
117 pd_dbg(g, "PD cache initialized!"); 125 pd_dbg(g, "PD cache initialized!");
118 126
119 return 0; 127 return 0;
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index b364f4d6..3cb8ed60 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -284,7 +284,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
284 bool userspace_managed, 284 bool userspace_managed,
285 char *name) 285 char *name)
286{ 286{
287 int err; 287 int err = 0;
288 char alloc_name[32]; 288 char alloc_name[32];
289 u64 kernel_vma_flags; 289 u64 kernel_vma_flags;
290 u64 user_vma_start, user_vma_limit; 290 u64 user_vma_start, user_vma_limit;
@@ -476,8 +476,19 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
476 476
477 vm->mapped_buffers = NULL; 477 vm->mapped_buffers = NULL;
478 478
479 nvgpu_mutex_init(&vm->syncpt_ro_map_lock); 479 err = nvgpu_mutex_init(&vm->syncpt_ro_map_lock);
480 nvgpu_mutex_init(&vm->update_gmmu_lock); 480 if (err != 0) {
481 nvgpu_err(g,
482 "Error in syncpt_ro_map_lock mutex initialization");
483 goto clean_up_allocators;
484 }
485
486 err = nvgpu_mutex_init(&vm->update_gmmu_lock);
487 if (err != 0) {
488 nvgpu_err(g, "Error in update_gmmu_lock mutex initialization");
489 goto clean_up_ro_map_lock;
490 }
491
481 nvgpu_ref_init(&vm->ref); 492 nvgpu_ref_init(&vm->ref);
482 nvgpu_init_list_node(&vm->vm_area_list); 493 nvgpu_init_list_node(&vm->vm_area_list);
483 494
@@ -489,12 +500,16 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
489 if (vm->va_limit > 4ULL * SZ_1G) { 500 if (vm->va_limit > 4ULL * SZ_1G) {
490 err = nvgpu_init_sema_pool(vm); 501 err = nvgpu_init_sema_pool(vm);
491 if (err) { 502 if (err) {
492 goto clean_up_allocators; 503 goto clean_up_gmmu_lock;
493 } 504 }
494 } 505 }
495 506
496 return 0; 507 return 0;
497 508
509clean_up_gmmu_lock:
510 nvgpu_mutex_destroy(&vm->update_gmmu_lock);
511clean_up_ro_map_lock:
512 nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock);
498clean_up_allocators: 513clean_up_allocators:
499 if (nvgpu_alloc_initialized(&vm->kernel)) { 514 if (nvgpu_alloc_initialized(&vm->kernel)) {
500 nvgpu_alloc_destroy(&vm->kernel); 515 nvgpu_alloc_destroy(&vm->kernel);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index f06bf1c5..9dfe3083 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -696,6 +696,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
696 u32 active_engine_id, pbdma_id, engine_id; 696 u32 active_engine_id, pbdma_id, engine_id;
697 int flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ? 697 int flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ?
698 NVGPU_DMA_FORCE_CONTIGUOUS : 0; 698 NVGPU_DMA_FORCE_CONTIGUOUS : 0;
699 int err = 0;
699 700
700 nvgpu_log_fn(g, " "); 701 nvgpu_log_fn(g, " ");
701 702
@@ -733,7 +734,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
733 f->num_runlist_entries, runlist_size); 734 f->num_runlist_entries, runlist_size);
734 735
735 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 736 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
736 int err = nvgpu_dma_alloc_flags_sys(g, flags, 737 err = nvgpu_dma_alloc_flags_sys(g, flags,
737 runlist_size, 738 runlist_size,
738 &runlist->mem[i]); 739 &runlist->mem[i]);
739 if (err) { 740 if (err) {
@@ -741,7 +742,13 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
741 goto clean_up_runlist; 742 goto clean_up_runlist;
742 } 743 }
743 } 744 }
744 nvgpu_mutex_init(&runlist->runlist_lock); 745
746 err = nvgpu_mutex_init(&runlist->runlist_lock);
747 if (err != 0) {
748 nvgpu_err(g,
749 "Error in runlist_lock mutex initialization");
750 goto clean_up_runlist;
751 }
745 752
746 /* None of buffers is pinned if this value doesn't change. 753 /* None of buffers is pinned if this value doesn't change.
747 Otherwise, one of them (cur_buffer) must have been pinned. */ 754 Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -773,7 +780,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
773clean_up_runlist: 780clean_up_runlist:
774 gk20a_fifo_delete_runlist(f); 781 gk20a_fifo_delete_runlist(f);
775 nvgpu_log_fn(g, "fail"); 782 nvgpu_log_fn(g, "fail");
776 return -ENOMEM; 783 return err;
777} 784}
778 785
779u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g) 786u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
index 2f715ae1..5fa4dd53 100644
--- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
@@ -707,9 +707,10 @@ void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
707 gk20a_falcon_engine_dependency_ops(flcn); 707 gk20a_falcon_engine_dependency_ops(flcn);
708} 708}
709 709
710void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn) 710int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
711{ 711{
712 struct gk20a *g = flcn->g; 712 struct gk20a *g = flcn->g;
713 int err = 0;
713 714
714 switch (flcn->flcn_id) { 715 switch (flcn->flcn_id) {
715 case FALCON_ID_PMU: 716 case FALCON_ID_PMU:
@@ -726,28 +727,35 @@ void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
726 flcn->flcn_base = FALCON_FECS_BASE; 727 flcn->flcn_base = FALCON_FECS_BASE;
727 flcn->is_falcon_supported = true; 728 flcn->is_falcon_supported = true;
728 flcn->is_interrupt_enabled = false; 729 flcn->is_interrupt_enabled = false;
729 break; 730 break;
730 case FALCON_ID_GPCCS: 731 case FALCON_ID_GPCCS:
731 flcn->flcn_base = FALCON_GPCCS_BASE; 732 flcn->flcn_base = FALCON_GPCCS_BASE;
732 flcn->is_falcon_supported = true; 733 flcn->is_falcon_supported = true;
733 flcn->is_interrupt_enabled = false; 734 flcn->is_interrupt_enabled = false;
734 break; 735 break;
735 case FALCON_ID_NVDEC: 736 case FALCON_ID_NVDEC:
736 flcn->flcn_base = FALCON_NVDEC_BASE; 737 flcn->flcn_base = FALCON_NVDEC_BASE;
737 flcn->is_falcon_supported = false; 738 flcn->is_falcon_supported = false;
738 flcn->is_interrupt_enabled = false; 739 flcn->is_interrupt_enabled = false;
739 break; 740 break;
740 default: 741 default:
741 flcn->is_falcon_supported = false; 742 flcn->is_falcon_supported = false;
742 nvgpu_err(g, "Invalid flcn request"); 743 nvgpu_err(g, "Invalid flcn request");
744 err = -ENODEV;
743 break; 745 break;
744 } 746 }
745 747
746 if (flcn->is_falcon_supported) { 748 if (flcn->is_falcon_supported) {
747 nvgpu_mutex_init(&flcn->copy_lock); 749 err = nvgpu_mutex_init(&flcn->copy_lock);
748 gk20a_falcon_ops(flcn); 750 if (err != 0) {
751 nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
752 } else {
753 gk20a_falcon_ops(flcn);
754 }
749 } else { 755 } else {
750 nvgpu_log_info(g, "falcon 0x%x not supported on %s", 756 nvgpu_log_info(g, "falcon 0x%x not supported on %s",
751 flcn->flcn_id, g->name); 757 flcn->flcn_id, g->name);
752 } 758 }
759
760 return err;
753} 761}
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h
index 95d46251..7f7ee89e 100644
--- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,7 +23,7 @@
23#define __FLCN_GK20A_H__ 23#define __FLCN_GK20A_H__
24 24
25void gk20a_falcon_ops(struct nvgpu_falcon *flcn); 25void gk20a_falcon_ops(struct nvgpu_falcon *flcn);
26void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn); 26int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
27void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn); 27void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn);
28 28
29#endif /* __FLCN_GK20A_H__ */ 29#endif /* __FLCN_GK20A_H__ */
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index f5e35927..1c34c152 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -150,7 +150,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
150 150
151int gk20a_finalize_poweron(struct gk20a *g) 151int gk20a_finalize_poweron(struct gk20a *g)
152{ 152{
153 int err; 153 int err = 0;
154#if defined(CONFIG_TEGRA_GK20A_NVHOST) 154#if defined(CONFIG_TEGRA_GK20A_NVHOST)
155 u32 nr_pages; 155 u32 nr_pages;
156#endif 156#endif
@@ -182,9 +182,21 @@ int gk20a_finalize_poweron(struct gk20a *g)
182 } 182 }
183 183
184 /* init interface layer support for PMU falcon */ 184 /* init interface layer support for PMU falcon */
185 nvgpu_flcn_sw_init(g, FALCON_ID_PMU); 185 err = nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
186 nvgpu_flcn_sw_init(g, FALCON_ID_SEC2); 186 if (err != 0) {
187 nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC); 187 nvgpu_err(g, "failed to sw init FALCON_ID_PMU");
188 goto done;
189 }
190 err = nvgpu_flcn_sw_init(g, FALCON_ID_SEC2);
191 if (err != 0) {
192 nvgpu_err(g, "failed to sw init FALCON_ID_SEC2");
193 goto done;
194 }
195 err = nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC);
196 if (err != 0) {
197 nvgpu_err(g, "failed to sw init FALCON_ID_NVDEC");
198 goto done;
199 }
188 200
189 if (g->ops.bios.init) { 201 if (g->ops.bios.init) {
190 err = g->ops.bios.init(g); 202 err = g->ops.bios.init(g);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index be00f708..898dfec8 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -1263,7 +1263,7 @@ struct gpu_ops {
1263 u32 (*get_link_control_status)(struct gk20a *g); 1263 u32 (*get_link_control_status)(struct gk20a *g);
1264 } xve; 1264 } xve;
1265 struct { 1265 struct {
1266 void (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn); 1266 int (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn);
1267 } falcon; 1267 } falcon;
1268 struct { 1268 struct {
1269 void (*enable_priv_ring)(struct gk20a *g); 1269 void (*enable_priv_ring)(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 39d6879b..2969743b 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -3983,10 +3983,14 @@ static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
3983int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr) 3983int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
3984{ 3984{
3985 struct zbc_entry zbc_val; 3985 struct zbc_entry zbc_val;
3986 u32 i; 3986 u32 i = 0;
3987 int err; 3987 int err = 0;
3988 3988
3989 nvgpu_mutex_init(&gr->zbc_lock); 3989 err = nvgpu_mutex_init(&gr->zbc_lock);
3990 if (err != 0) {
3991 nvgpu_err(g, "Error in zbc_lock mutex initialization");
3992 return err;
3993 }
3990 3994
3991 /* load default color table */ 3995 /* load default color table */
3992 zbc_val.type = GK20A_ZBC_TYPE_COLOR; 3996 zbc_val.type = GK20A_ZBC_TYPE_COLOR;
@@ -4749,7 +4753,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
4749static int gk20a_init_gr_setup_sw(struct gk20a *g) 4753static int gk20a_init_gr_setup_sw(struct gk20a *g)
4750{ 4754{
4751 struct gr_gk20a *gr = &g->gr; 4755 struct gr_gk20a *gr = &g->gr;
4752 int err; 4756 int err = 0;
4753 4757
4754 nvgpu_log_fn(g, " "); 4758 nvgpu_log_fn(g, " ");
4755 4759
@@ -4761,7 +4765,11 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
4761 gr->g = g; 4765 gr->g = g;
4762 4766
4763#if defined(CONFIG_GK20A_CYCLE_STATS) 4767#if defined(CONFIG_GK20A_CYCLE_STATS)
4764 nvgpu_mutex_init(&g->gr.cs_lock); 4768 err = nvgpu_mutex_init(&g->gr.cs_lock);
4769 if (err != 0) {
4770 nvgpu_err(g, "Error in gr.cs_lock mutex initialization");
4771 return err;
4772 }
4765#endif 4773#endif
4766 4774
4767 err = gr_gk20a_init_gr_config(g, gr); 4775 err = gr_gk20a_init_gr_config(g, gr);
@@ -4802,7 +4810,12 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
4802 if (g->ops.gr.init_gfxp_wfi_timeout_count) 4810 if (g->ops.gr.init_gfxp_wfi_timeout_count)
4803 g->ops.gr.init_gfxp_wfi_timeout_count(g); 4811 g->ops.gr.init_gfxp_wfi_timeout_count(g);
4804 4812
4805 nvgpu_mutex_init(&gr->ctx_mutex); 4813 err = nvgpu_mutex_init(&gr->ctx_mutex);
4814 if (err != 0) {
4815 nvgpu_err(g, "Error in gr.ctx_mutex initialization");
4816 goto clean_up;
4817 }
4818
4806 nvgpu_spinlock_init(&gr->ch_tlb_lock); 4819 nvgpu_spinlock_init(&gr->ch_tlb_lock);
4807 4820
4808 gr->remove_support = gk20a_remove_gr_support; 4821 gr->remove_support = gk20a_remove_gr_support;
@@ -4869,12 +4882,16 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
4869 4882
4870int gk20a_init_gr_support(struct gk20a *g) 4883int gk20a_init_gr_support(struct gk20a *g)
4871{ 4884{
4872 u32 err; 4885 int err = 0;
4873 4886
4874 nvgpu_log_fn(g, " "); 4887 nvgpu_log_fn(g, " ");
4875 4888
4876 /* this is required before gr_gk20a_init_ctx_state */ 4889 /* this is required before gr_gk20a_init_ctx_state */
4877 nvgpu_mutex_init(&g->gr.fecs_mutex); 4890 err = nvgpu_mutex_init(&g->gr.fecs_mutex);
4891 if (err != 0) {
4892 nvgpu_err(g, "Error in gr.fecs_mutex initialization");
4893 return err;
4894 }
4878 4895
4879 err = gr_gk20a_init_ctxsw(g); 4896 err = gr_gk20a_init_ctxsw(g);
4880 if (err) 4897 if (err)
diff --git a/drivers/gpu/nvgpu/gp106/flcn_gp106.c b/drivers/gpu/nvgpu/gp106/flcn_gp106.c
index 5959086d..168d94d3 100644
--- a/drivers/gpu/nvgpu/gp106/flcn_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/flcn_gp106.c
@@ -53,9 +53,10 @@ static void gp106_falcon_ops(struct nvgpu_falcon *flcn)
53 gp106_falcon_engine_dependency_ops(flcn); 53 gp106_falcon_engine_dependency_ops(flcn);
54} 54}
55 55
56void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn) 56int gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
57{ 57{
58 struct gk20a *g = flcn->g; 58 struct gk20a *g = flcn->g;
59 int err = 0;
59 60
60 switch (flcn->flcn_id) { 61 switch (flcn->flcn_id) {
61 case FALCON_ID_PMU: 62 case FALCON_ID_PMU:
@@ -72,28 +73,35 @@ void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
72 flcn->flcn_base = FALCON_FECS_BASE; 73 flcn->flcn_base = FALCON_FECS_BASE;
73 flcn->is_falcon_supported = true; 74 flcn->is_falcon_supported = true;
74 flcn->is_interrupt_enabled = false; 75 flcn->is_interrupt_enabled = false;
75 break; 76 break;
76 case FALCON_ID_GPCCS: 77 case FALCON_ID_GPCCS:
77 flcn->flcn_base = FALCON_GPCCS_BASE; 78 flcn->flcn_base = FALCON_GPCCS_BASE;
78 flcn->is_falcon_supported = true; 79 flcn->is_falcon_supported = true;
79 flcn->is_interrupt_enabled = false; 80 flcn->is_interrupt_enabled = false;
80 break; 81 break;
81 case FALCON_ID_NVDEC: 82 case FALCON_ID_NVDEC:
82 flcn->flcn_base = FALCON_NVDEC_BASE; 83 flcn->flcn_base = FALCON_NVDEC_BASE;
83 flcn->is_falcon_supported = true; 84 flcn->is_falcon_supported = true;
84 flcn->is_interrupt_enabled = true; 85 flcn->is_interrupt_enabled = true;
85 break; 86 break;
86 default: 87 default:
87 flcn->is_falcon_supported = false; 88 flcn->is_falcon_supported = false;
88 nvgpu_err(g, "Invalid flcn request"); 89 nvgpu_err(g, "Invalid flcn request");
90 err = -ENODEV;
89 break; 91 break;
90 } 92 }
91 93
92 if (flcn->is_falcon_supported) { 94 if (flcn->is_falcon_supported) {
93 nvgpu_mutex_init(&flcn->copy_lock); 95 err = nvgpu_mutex_init(&flcn->copy_lock);
94 gp106_falcon_ops(flcn); 96 if (err != 0) {
97 nvgpu_err(g, "Error in copy_lock mutex initialization");
98 } else {
99 gp106_falcon_ops(flcn);
100 }
95 } else { 101 } else {
96 nvgpu_info(g, "falcon 0x%x not supported on %s", 102 nvgpu_info(g, "falcon 0x%x not supported on %s",
97 flcn->flcn_id, g->name); 103 flcn->flcn_id, g->name);
98 } 104 }
105
106 return err;
99} 107}
diff --git a/drivers/gpu/nvgpu/gp106/flcn_gp106.h b/drivers/gpu/nvgpu/gp106/flcn_gp106.h
index a558e4ef..49275234 100644
--- a/drivers/gpu/nvgpu/gp106/flcn_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/flcn_gp106.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,6 +22,6 @@
22#ifndef __FLCN_GP106_H__ 22#ifndef __FLCN_GP106_H__
23#define __FLCN_GP106_H__ 23#define __FLCN_GP106_H__
24 24
25void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn); 25int gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
26 26
27#endif /* __FLCN_GP106_H__ */ 27#endif /* __FLCN_GP106_H__ */
diff --git a/drivers/gpu/nvgpu/gv100/flcn_gv100.c b/drivers/gpu/nvgpu/gv100/flcn_gv100.c
index 5820d6c9..5167e3f0 100644
--- a/drivers/gpu/nvgpu/gv100/flcn_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/flcn_gv100.c
@@ -29,27 +29,29 @@
29 29
30#include <nvgpu/hw/gv100/hw_falcon_gv100.h> 30#include <nvgpu/hw/gv100/hw_falcon_gv100.h>
31 31
32void gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn) 32int gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
33{ 33{
34 struct gk20a *g = flcn->g; 34 struct gk20a *g = flcn->g;
35 int err = 0;
35 36
36 switch (flcn->flcn_id) { 37 if (flcn->flcn_id == FALCON_ID_MINION) {
37 case FALCON_ID_MINION:
38 flcn->flcn_base = g->nvlink.minion_base; 38 flcn->flcn_base = g->nvlink.minion_base;
39 flcn->is_falcon_supported = true; 39 flcn->is_falcon_supported = true;
40 flcn->is_interrupt_enabled = true; 40 flcn->is_interrupt_enabled = true;
41 break;
42 default:
43 break;
44 }
45 41
46 if (flcn->is_falcon_supported) { 42 err = nvgpu_mutex_init(&flcn->copy_lock);
47 nvgpu_mutex_init(&flcn->copy_lock); 43 if (err != 0) {
44 nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
45 return err;
46 }
47
48 gk20a_falcon_ops(flcn); 48 gk20a_falcon_ops(flcn);
49 } else { 49 } else {
50 /* 50 /*
51 * Fall back 51 * Fall back
52 */ 52 */
53 gp106_falcon_hal_sw_init(flcn); 53 err = gp106_falcon_hal_sw_init(flcn);
54 } 54 }
55
56 return err;
55} 57}
diff --git a/drivers/gpu/nvgpu/gv100/flcn_gv100.h b/drivers/gpu/nvgpu/gv100/flcn_gv100.h
index 869e74c8..9207519a 100644
--- a/drivers/gpu/nvgpu/gv100/flcn_gv100.h
+++ b/drivers/gpu/nvgpu/gv100/flcn_gv100.h
@@ -22,6 +22,6 @@
22#ifndef __FLCN_GV100_H__ 22#ifndef __FLCN_GV100_H__
23#define __FLCN_GV100_H__ 23#define __FLCN_GV100_H__
24 24
25void gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn); 25int gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
26 26
27#endif /* __FLCN_GV100_H__ */ 27#endif /* __FLCN_GV100_H__ */
diff --git a/drivers/gpu/nvgpu/gv100/nvlink_gv100.c b/drivers/gpu/nvgpu/gv100/nvlink_gv100.c
index b39e4165..3e1b2cda 100644
--- a/drivers/gpu/nvgpu/gv100/nvlink_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/nvlink_gv100.c
@@ -2715,7 +2715,11 @@ int gv100_nvlink_early_init(struct gk20a *g)
2715 goto nvlink_init_exit; 2715 goto nvlink_init_exit;
2716 } 2716 }
2717 2717
2718 nvgpu_flcn_sw_init(g, FALCON_ID_MINION); 2718 err = nvgpu_flcn_sw_init(g, FALCON_ID_MINION);
2719 if (err != 0) {
2720 nvgpu_err(g, "failed to sw init FALCON_ID_MINION");
2721 goto nvlink_init_exit;
2722 }
2719 2723
2720 g->nvlink.discovered_links &= ~g->nvlink.link_disable_mask; 2724 g->nvlink.discovered_links &= ~g->nvlink.link_disable_mask;
2721 nvgpu_log(g, gpu_dbg_nvlink, "link_disable_mask = 0x%08x (from VBIOS)", 2725 nvgpu_log(g, gpu_dbg_nvlink, "link_disable_mask = 0x%08x (from VBIOS)",
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index 73b7dae7..ceadc1c1 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -167,11 +167,15 @@ static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
167 167
168static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) 168static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
169{ 169{
170 int err; 170 int err = 0;
171 171
172 nvgpu_log_fn(g, " "); 172 nvgpu_log_fn(g, " ");
173 173
174 nvgpu_mutex_init(&g->mm.hub_isr_mutex); 174 err = nvgpu_mutex_init(&g->mm.hub_isr_mutex);
175 if (err != 0) {
176 nvgpu_err(g, "Error in hub_isr_mutex initialization");
177 return err;
178 }
175 179
176 err = gv11b_mm_mmu_fault_info_buf_init(g); 180 err = gv11b_mm_mmu_fault_info_buf_init(g);
177 181
diff --git a/drivers/gpu/nvgpu/include/nvgpu/falcon.h b/drivers/gpu/nvgpu/include/nvgpu/falcon.h
index 2920e281..55dca035 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/falcon.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/falcon.h
@@ -29,12 +29,12 @@
29/* 29/*
30 * Falcon Id Defines 30 * Falcon Id Defines
31 */ 31 */
32#define FALCON_ID_PMU (0) 32#define FALCON_ID_PMU (0U)
33#define FALCON_ID_FECS (2) 33#define FALCON_ID_FECS (2U)
34#define FALCON_ID_GPCCS (3) 34#define FALCON_ID_GPCCS (3U)
35#define FALCON_ID_NVDEC (4) 35#define FALCON_ID_NVDEC (4U)
36#define FALCON_ID_SEC2 (7) 36#define FALCON_ID_SEC2 (7U)
37#define FALCON_ID_MINION (10) 37#define FALCON_ID_MINION (10U)
38 38
39/* 39/*
40 * Falcon Base address Defines 40 * Falcon Base address Defines
@@ -317,6 +317,6 @@ int nvgpu_flcn_queue_push(struct nvgpu_falcon *flcn,
317void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn, 317void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn,
318 struct nvgpu_falcon_queue *queue); 318 struct nvgpu_falcon_queue *queue);
319 319
320void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id); 320int nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id);
321 321
322#endif /* __FALCON_H__ */ 322#endif /* __FALCON_H__ */