diff options
author | Nicolas Benech <nbenech@nvidia.com> | 2018-08-23 16:23:52 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-09-05 23:39:08 -0400 |
commit | 2eface802a4aea417206bcdda689a65cf47d300b (patch) | |
tree | 502af9d48004af4edf8f02a2a7cf751ef5a11325 /drivers/gpu/nvgpu/gk20a | |
parent | b44c7fdb114a63ab98fffc0f246776b56399ff64 (diff) |
gpu: nvgpu: Fix mutex MISRA 17.7 violations
MISRA Rule-17.7 requires the return value of all functions to be used.
Fix is either to use the return value or change the function to return
void. This patch contains fix for calls to nvgpu_mutex_init and
improves related error handling.
JIRA NVGPU-677
Change-Id: I609fa138520cc7ccfdd5aa0e7fd28c8ca0b3a21c
Signed-off-by: Nicolas Benech <nbenech@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1805598
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 13 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/flcn_gk20a.c | 20 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/flcn_gk20a.h | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.c | 20 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 33 |
6 files changed, 68 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index f06bf1c5..9dfe3083 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -696,6 +696,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
696 | u32 active_engine_id, pbdma_id, engine_id; | 696 | u32 active_engine_id, pbdma_id, engine_id; |
697 | int flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ? | 697 | int flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ? |
698 | NVGPU_DMA_FORCE_CONTIGUOUS : 0; | 698 | NVGPU_DMA_FORCE_CONTIGUOUS : 0; |
699 | int err = 0; | ||
699 | 700 | ||
700 | nvgpu_log_fn(g, " "); | 701 | nvgpu_log_fn(g, " "); |
701 | 702 | ||
@@ -733,7 +734,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
733 | f->num_runlist_entries, runlist_size); | 734 | f->num_runlist_entries, runlist_size); |
734 | 735 | ||
735 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { | 736 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { |
736 | int err = nvgpu_dma_alloc_flags_sys(g, flags, | 737 | err = nvgpu_dma_alloc_flags_sys(g, flags, |
737 | runlist_size, | 738 | runlist_size, |
738 | &runlist->mem[i]); | 739 | &runlist->mem[i]); |
739 | if (err) { | 740 | if (err) { |
@@ -741,7 +742,13 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
741 | goto clean_up_runlist; | 742 | goto clean_up_runlist; |
742 | } | 743 | } |
743 | } | 744 | } |
744 | nvgpu_mutex_init(&runlist->runlist_lock); | 745 | |
746 | err = nvgpu_mutex_init(&runlist->runlist_lock); | ||
747 | if (err != 0) { | ||
748 | nvgpu_err(g, | ||
749 | "Error in runlist_lock mutex initialization"); | ||
750 | goto clean_up_runlist; | ||
751 | } | ||
745 | 752 | ||
746 | /* None of buffers is pinned if this value doesn't change. | 753 | /* None of buffers is pinned if this value doesn't change. |
747 | Otherwise, one of them (cur_buffer) must have been pinned. */ | 754 | Otherwise, one of them (cur_buffer) must have been pinned. */ |
@@ -773,7 +780,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
773 | clean_up_runlist: | 780 | clean_up_runlist: |
774 | gk20a_fifo_delete_runlist(f); | 781 | gk20a_fifo_delete_runlist(f); |
775 | nvgpu_log_fn(g, "fail"); | 782 | nvgpu_log_fn(g, "fail"); |
776 | return -ENOMEM; | 783 | return err; |
777 | } | 784 | } |
778 | 785 | ||
779 | u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g) | 786 | u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g) |
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c index 2f715ae1..5fa4dd53 100644 --- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c | |||
@@ -707,9 +707,10 @@ void gk20a_falcon_ops(struct nvgpu_falcon *flcn) | |||
707 | gk20a_falcon_engine_dependency_ops(flcn); | 707 | gk20a_falcon_engine_dependency_ops(flcn); |
708 | } | 708 | } |
709 | 709 | ||
710 | void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn) | 710 | int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn) |
711 | { | 711 | { |
712 | struct gk20a *g = flcn->g; | 712 | struct gk20a *g = flcn->g; |
713 | int err = 0; | ||
713 | 714 | ||
714 | switch (flcn->flcn_id) { | 715 | switch (flcn->flcn_id) { |
715 | case FALCON_ID_PMU: | 716 | case FALCON_ID_PMU: |
@@ -726,28 +727,35 @@ void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn) | |||
726 | flcn->flcn_base = FALCON_FECS_BASE; | 727 | flcn->flcn_base = FALCON_FECS_BASE; |
727 | flcn->is_falcon_supported = true; | 728 | flcn->is_falcon_supported = true; |
728 | flcn->is_interrupt_enabled = false; | 729 | flcn->is_interrupt_enabled = false; |
729 | break; | 730 | break; |
730 | case FALCON_ID_GPCCS: | 731 | case FALCON_ID_GPCCS: |
731 | flcn->flcn_base = FALCON_GPCCS_BASE; | 732 | flcn->flcn_base = FALCON_GPCCS_BASE; |
732 | flcn->is_falcon_supported = true; | 733 | flcn->is_falcon_supported = true; |
733 | flcn->is_interrupt_enabled = false; | 734 | flcn->is_interrupt_enabled = false; |
734 | break; | 735 | break; |
735 | case FALCON_ID_NVDEC: | 736 | case FALCON_ID_NVDEC: |
736 | flcn->flcn_base = FALCON_NVDEC_BASE; | 737 | flcn->flcn_base = FALCON_NVDEC_BASE; |
737 | flcn->is_falcon_supported = false; | 738 | flcn->is_falcon_supported = false; |
738 | flcn->is_interrupt_enabled = false; | 739 | flcn->is_interrupt_enabled = false; |
739 | break; | 740 | break; |
740 | default: | 741 | default: |
741 | flcn->is_falcon_supported = false; | 742 | flcn->is_falcon_supported = false; |
742 | nvgpu_err(g, "Invalid flcn request"); | 743 | nvgpu_err(g, "Invalid flcn request"); |
744 | err = -ENODEV; | ||
743 | break; | 745 | break; |
744 | } | 746 | } |
745 | 747 | ||
746 | if (flcn->is_falcon_supported) { | 748 | if (flcn->is_falcon_supported) { |
747 | nvgpu_mutex_init(&flcn->copy_lock); | 749 | err = nvgpu_mutex_init(&flcn->copy_lock); |
748 | gk20a_falcon_ops(flcn); | 750 | if (err != 0) { |
751 | nvgpu_err(g, "Error in flcn.copy_lock mutex initialization"); | ||
752 | } else { | ||
753 | gk20a_falcon_ops(flcn); | ||
754 | } | ||
749 | } else { | 755 | } else { |
750 | nvgpu_log_info(g, "falcon 0x%x not supported on %s", | 756 | nvgpu_log_info(g, "falcon 0x%x not supported on %s", |
751 | flcn->flcn_id, g->name); | 757 | flcn->flcn_id, g->name); |
752 | } | 758 | } |
759 | |||
760 | return err; | ||
753 | } | 761 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h index 95d46251..7f7ee89e 100644 --- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -23,7 +23,7 @@ | |||
23 | #define __FLCN_GK20A_H__ | 23 | #define __FLCN_GK20A_H__ |
24 | 24 | ||
25 | void gk20a_falcon_ops(struct nvgpu_falcon *flcn); | 25 | void gk20a_falcon_ops(struct nvgpu_falcon *flcn); |
26 | void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn); | 26 | int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn); |
27 | void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn); | 27 | void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn); |
28 | 28 | ||
29 | #endif /* __FLCN_GK20A_H__ */ | 29 | #endif /* __FLCN_GK20A_H__ */ |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index f5e35927..1c34c152 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c | |||
@@ -150,7 +150,7 @@ int gk20a_prepare_poweroff(struct gk20a *g) | |||
150 | 150 | ||
151 | int gk20a_finalize_poweron(struct gk20a *g) | 151 | int gk20a_finalize_poweron(struct gk20a *g) |
152 | { | 152 | { |
153 | int err; | 153 | int err = 0; |
154 | #if defined(CONFIG_TEGRA_GK20A_NVHOST) | 154 | #if defined(CONFIG_TEGRA_GK20A_NVHOST) |
155 | u32 nr_pages; | 155 | u32 nr_pages; |
156 | #endif | 156 | #endif |
@@ -182,9 +182,21 @@ int gk20a_finalize_poweron(struct gk20a *g) | |||
182 | } | 182 | } |
183 | 183 | ||
184 | /* init interface layer support for PMU falcon */ | 184 | /* init interface layer support for PMU falcon */ |
185 | nvgpu_flcn_sw_init(g, FALCON_ID_PMU); | 185 | err = nvgpu_flcn_sw_init(g, FALCON_ID_PMU); |
186 | nvgpu_flcn_sw_init(g, FALCON_ID_SEC2); | 186 | if (err != 0) { |
187 | nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC); | 187 | nvgpu_err(g, "failed to sw init FALCON_ID_PMU"); |
188 | goto done; | ||
189 | } | ||
190 | err = nvgpu_flcn_sw_init(g, FALCON_ID_SEC2); | ||
191 | if (err != 0) { | ||
192 | nvgpu_err(g, "failed to sw init FALCON_ID_SEC2"); | ||
193 | goto done; | ||
194 | } | ||
195 | err = nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC); | ||
196 | if (err != 0) { | ||
197 | nvgpu_err(g, "failed to sw init FALCON_ID_NVDEC"); | ||
198 | goto done; | ||
199 | } | ||
188 | 200 | ||
189 | if (g->ops.bios.init) { | 201 | if (g->ops.bios.init) { |
190 | err = g->ops.bios.init(g); | 202 | err = g->ops.bios.init(g); |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index be00f708..898dfec8 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -1263,7 +1263,7 @@ struct gpu_ops { | |||
1263 | u32 (*get_link_control_status)(struct gk20a *g); | 1263 | u32 (*get_link_control_status)(struct gk20a *g); |
1264 | } xve; | 1264 | } xve; |
1265 | struct { | 1265 | struct { |
1266 | void (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn); | 1266 | int (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn); |
1267 | } falcon; | 1267 | } falcon; |
1268 | struct { | 1268 | struct { |
1269 | void (*enable_priv_ring)(struct gk20a *g); | 1269 | void (*enable_priv_ring)(struct gk20a *g); |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 39d6879b..2969743b 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -3983,10 +3983,14 @@ static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) | |||
3983 | int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr) | 3983 | int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr) |
3984 | { | 3984 | { |
3985 | struct zbc_entry zbc_val; | 3985 | struct zbc_entry zbc_val; |
3986 | u32 i; | 3986 | u32 i = 0; |
3987 | int err; | 3987 | int err = 0; |
3988 | 3988 | ||
3989 | nvgpu_mutex_init(&gr->zbc_lock); | 3989 | err = nvgpu_mutex_init(&gr->zbc_lock); |
3990 | if (err != 0) { | ||
3991 | nvgpu_err(g, "Error in zbc_lock mutex initialization"); | ||
3992 | return err; | ||
3993 | } | ||
3990 | 3994 | ||
3991 | /* load default color table */ | 3995 | /* load default color table */ |
3992 | zbc_val.type = GK20A_ZBC_TYPE_COLOR; | 3996 | zbc_val.type = GK20A_ZBC_TYPE_COLOR; |
@@ -4749,7 +4753,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g) | |||
4749 | static int gk20a_init_gr_setup_sw(struct gk20a *g) | 4753 | static int gk20a_init_gr_setup_sw(struct gk20a *g) |
4750 | { | 4754 | { |
4751 | struct gr_gk20a *gr = &g->gr; | 4755 | struct gr_gk20a *gr = &g->gr; |
4752 | int err; | 4756 | int err = 0; |
4753 | 4757 | ||
4754 | nvgpu_log_fn(g, " "); | 4758 | nvgpu_log_fn(g, " "); |
4755 | 4759 | ||
@@ -4761,7 +4765,11 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
4761 | gr->g = g; | 4765 | gr->g = g; |
4762 | 4766 | ||
4763 | #if defined(CONFIG_GK20A_CYCLE_STATS) | 4767 | #if defined(CONFIG_GK20A_CYCLE_STATS) |
4764 | nvgpu_mutex_init(&g->gr.cs_lock); | 4768 | err = nvgpu_mutex_init(&g->gr.cs_lock); |
4769 | if (err != 0) { | ||
4770 | nvgpu_err(g, "Error in gr.cs_lock mutex initialization"); | ||
4771 | return err; | ||
4772 | } | ||
4765 | #endif | 4773 | #endif |
4766 | 4774 | ||
4767 | err = gr_gk20a_init_gr_config(g, gr); | 4775 | err = gr_gk20a_init_gr_config(g, gr); |
@@ -4802,7 +4810,12 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
4802 | if (g->ops.gr.init_gfxp_wfi_timeout_count) | 4810 | if (g->ops.gr.init_gfxp_wfi_timeout_count) |
4803 | g->ops.gr.init_gfxp_wfi_timeout_count(g); | 4811 | g->ops.gr.init_gfxp_wfi_timeout_count(g); |
4804 | 4812 | ||
4805 | nvgpu_mutex_init(&gr->ctx_mutex); | 4813 | err = nvgpu_mutex_init(&gr->ctx_mutex); |
4814 | if (err != 0) { | ||
4815 | nvgpu_err(g, "Error in gr.ctx_mutex initialization"); | ||
4816 | goto clean_up; | ||
4817 | } | ||
4818 | |||
4806 | nvgpu_spinlock_init(&gr->ch_tlb_lock); | 4819 | nvgpu_spinlock_init(&gr->ch_tlb_lock); |
4807 | 4820 | ||
4808 | gr->remove_support = gk20a_remove_gr_support; | 4821 | gr->remove_support = gk20a_remove_gr_support; |
@@ -4869,12 +4882,16 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) | |||
4869 | 4882 | ||
4870 | int gk20a_init_gr_support(struct gk20a *g) | 4883 | int gk20a_init_gr_support(struct gk20a *g) |
4871 | { | 4884 | { |
4872 | u32 err; | 4885 | int err = 0; |
4873 | 4886 | ||
4874 | nvgpu_log_fn(g, " "); | 4887 | nvgpu_log_fn(g, " "); |
4875 | 4888 | ||
4876 | /* this is required before gr_gk20a_init_ctx_state */ | 4889 | /* this is required before gr_gk20a_init_ctx_state */ |
4877 | nvgpu_mutex_init(&g->gr.fecs_mutex); | 4890 | err = nvgpu_mutex_init(&g->gr.fecs_mutex); |
4891 | if (err != 0) { | ||
4892 | nvgpu_err(g, "Error in gr.fecs_mutex initialization"); | ||
4893 | return err; | ||
4894 | } | ||
4878 | 4895 | ||
4879 | err = gr_gk20a_init_ctxsw(g); | 4896 | err = gr_gk20a_init_ctxsw(g); |
4880 | if (err) | 4897 | if (err) |