summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScott Long <scottl@nvidia.com>2018-07-26 18:41:20 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-02 22:16:45 -0400
commit93027eb2093b4d41238f814c42ad5a553d964ce5 (patch)
tree15e094bb6f4ad8aab0785181f460db1ce24f8619
parent7216f3dd71cc023ec4d8ef0c9a90a554c0f09362 (diff)
gpu: nvgpu: fix MISRA Rule 10.1 issues in SIM code
Fix MISRA rule 10.1 violations in gr_gk20a_init_ctx_vars_sim(). Instead of logically ORing alloc_xxx_list_yyy() results into the signed err variable just bail immediately if an allocation request fails. Also made changes to sync gr_gk20a_init_ctx_vars_sim() behavior with gr_gk20a_init_ctx_vars_fw() behavior: * return a valid errno on failure * free any previously allocated resources on failure JIRA NVGPU-650 Change-Id: Ie5ea78438da59896da2a9f562d01e46ffaf56dec Signed-off-by: Scott Long <scottl@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1787042 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c110
1 files changed, 86 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
index 6d6352df..80252aaa 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
@@ -30,7 +30,7 @@
30 30
31int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) 31int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
32{ 32{
33 int err = 0; 33 int err = -ENOMEM;
34 u32 i, temp; 34 u32 i, temp;
35 35
36 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, 36 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info,
@@ -39,8 +39,9 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
39 g->gr.ctx_vars.dynamic = true; 39 g->gr.ctx_vars.dynamic = true;
40 g->gr.netlist = GR_NETLIST_DYNAMIC; 40 g->gr.netlist = GR_NETLIST_DYNAMIC;
41 41
42 if(!g->sim->esc_readl) { 42 if (g->sim->esc_readl == NULL) {
43 nvgpu_err(g, "Invalid pointer to query function."); 43 nvgpu_err(g, "Invalid pointer to query function.");
44 err = -ENOENT;
44 goto fail; 45 goto fail;
45 } 46 }
46 47
@@ -89,28 +90,69 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
89 g->sim->esc_readl(g, "GRCTX_REG_LIST_PPC_COUNT", 0, 90 g->sim->esc_readl(g, "GRCTX_REG_LIST_PPC_COUNT", 0,
90 &g->gr.ctx_vars.ctxsw_regs.ppc.count); 91 &g->gr.ctx_vars.ctxsw_regs.ppc.count);
91 92
92 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.inst); 93 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.inst) == NULL) {
93 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.data);
94 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.inst);
95 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.data);
96 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_bundle_init);
97 err |= !alloc_av64_list_gk20a(g, &g->gr.ctx_vars.sw_bundle64_init);
98 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_method_init);
99 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.sw_ctx_load);
100 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_non_ctx_load);
101 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_veid_bundle_init);
102 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.sys);
103 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.gpc);
104 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.tpc);
105 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
106 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.ppc);
107 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
108 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
109 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
110 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.etpc);
111
112 if (err)
113 goto fail; 94 goto fail;
95 }
96 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.data) == NULL) {
97 goto fail;
98 }
99 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.inst) == NULL) {
100 goto fail;
101 }
102 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.data) == NULL) {
103 goto fail;
104 }
105 if (alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_bundle_init) == NULL) {
106 goto fail;
107 }
108 if (alloc_av64_list_gk20a(g,
109 &g->gr.ctx_vars.sw_bundle64_init) == NULL) {
110 goto fail;
111 }
112 if (alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_method_init) == NULL) {
113 goto fail;
114 }
115 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.sw_ctx_load) == NULL) {
116 goto fail;
117 }
118 if (alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_non_ctx_load) == NULL) {
119 goto fail;
120 }
121 if (alloc_av_list_gk20a(g,
122 &g->gr.ctx_vars.sw_veid_bundle_init) == NULL) {
123 goto fail;
124 }
125 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.sys) == NULL) {
126 goto fail;
127 }
128 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.gpc) == NULL) {
129 goto fail;
130 }
131 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.tpc) == NULL) {
132 goto fail;
133 }
134 if (alloc_aiv_list_gk20a(g,
135 &g->gr.ctx_vars.ctxsw_regs.zcull_gpc) == NULL) {
136 goto fail;
137 }
138 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.ppc) == NULL) {
139 goto fail;
140 }
141 if (alloc_aiv_list_gk20a(g,
142 &g->gr.ctx_vars.ctxsw_regs.pm_sys) == NULL) {
143 goto fail;
144 }
145 if (alloc_aiv_list_gk20a(g,
146 &g->gr.ctx_vars.ctxsw_regs.pm_gpc) == NULL) {
147 goto fail;
148 }
149 if (alloc_aiv_list_gk20a(g,
150 &g->gr.ctx_vars.ctxsw_regs.pm_tpc) == NULL) {
151 goto fail;
152 }
153 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.etpc) == NULL) {
154 goto fail;
155 }
114 156
115 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.inst.count; i++) 157 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.inst.count; i++)
116 g->sim->esc_readl(g, "GRCTX_UCODE_INST_FECS", 158 g->sim->esc_readl(g, "GRCTX_UCODE_INST_FECS",
@@ -285,6 +327,26 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
285 return 0; 327 return 0;
286fail: 328fail:
287 nvgpu_err(g, "failed querying grctx info from chiplib"); 329 nvgpu_err(g, "failed querying grctx info from chiplib");
288 return err;
289 330
331 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.inst.l);
332 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.data.l);
333 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.inst.l);
334 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.data.l);
335 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle_init.l);
336 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle64_init.l);
337 nvgpu_kfree(g, g->gr.ctx_vars.sw_method_init.l);
338 nvgpu_kfree(g, g->gr.ctx_vars.sw_ctx_load.l);
339 nvgpu_kfree(g, g->gr.ctx_vars.sw_non_ctx_load.l);
340 nvgpu_kfree(g, g->gr.ctx_vars.sw_veid_bundle_init.l);
341 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.sys.l);
342 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc.l);
343 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.tpc.l);
344 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l);
345 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.ppc.l);
346 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_sys.l);
347 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_gpc.l);
348 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_tpc.l);
349 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.etpc.l);
350
351 return err;
290} 352}