summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/pstate
diff options
context:
space:
mode:
authorsmadhavan <smadhavan@nvidia.com>2018-09-06 04:38:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-12 08:36:04 -0400
commitc7a3b6db10900e0aabc29ca7307908875d685036 (patch)
tree1ee88207c5149344841b1423d0cb920498f844b0 /drivers/gpu/nvgpu/pstate
parentc615002d22b4675d08404eb7cc7087d4418eccdb (diff)
gpu: nvgpu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: I8046a09fa7ffc74c3d737ba57132a0a9ae2ff195 Signed-off-by: smadhavan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797699 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Nitin Kumbhar <nkumbhar@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/pstate')
-rw-r--r--drivers/gpu/nvgpu/pstate/pstate.c129
1 files changed, 86 insertions, 43 deletions
diff --git a/drivers/gpu/nvgpu/pstate/pstate.c b/drivers/gpu/nvgpu/pstate/pstate.c
index 9b7d9b7e..616d6747 100644
--- a/drivers/gpu/nvgpu/pstate/pstate.c
+++ b/drivers/gpu/nvgpu/pstate/pstate.c
@@ -35,8 +35,9 @@ static int pstate_sw_setup(struct gk20a *g);
35 35
36void gk20a_deinit_pstate_support(struct gk20a *g) 36void gk20a_deinit_pstate_support(struct gk20a *g)
37{ 37{
38 if (g->ops.clk.mclk_deinit) 38 if (g->ops.clk.mclk_deinit) {
39 g->ops.clk.mclk_deinit(g); 39 g->ops.clk.mclk_deinit(g);
40 }
40 41
41 nvgpu_mutex_destroy(&g->perf_pmu.pstatesobjs.pstate_mutex); 42 nvgpu_mutex_destroy(&g->perf_pmu.pstatesobjs.pstate_mutex);
42} 43}
@@ -49,69 +50,84 @@ int gk20a_init_pstate_support(struct gk20a *g)
49 nvgpu_log_fn(g, " "); 50 nvgpu_log_fn(g, " ");
50 51
51 err = volt_rail_sw_setup(g); 52 err = volt_rail_sw_setup(g);
52 if (err) 53 if (err) {
53 return err; 54 return err;
55 }
54 56
55 err = volt_dev_sw_setup(g); 57 err = volt_dev_sw_setup(g);
56 if (err) 58 if (err) {
57 return err; 59 return err;
60 }
58 61
59 err = volt_policy_sw_setup(g); 62 err = volt_policy_sw_setup(g);
60 if (err) 63 if (err) {
61 return err; 64 return err;
65 }
62 66
63 err = clk_vin_sw_setup(g); 67 err = clk_vin_sw_setup(g);
64 if (err) 68 if (err) {
65 return err; 69 return err;
70 }
66 71
67 err = clk_fll_sw_setup(g); 72 err = clk_fll_sw_setup(g);
68 if (err) 73 if (err) {
69 return err; 74 return err;
75 }
70 76
71 err = therm_domain_sw_setup(g); 77 err = therm_domain_sw_setup(g);
72 if (err) 78 if (err) {
73 return err; 79 return err;
80 }
74 81
75 err = vfe_var_sw_setup(g); 82 err = vfe_var_sw_setup(g);
76 if (err) 83 if (err) {
77 return err; 84 return err;
85 }
78 86
79 err = vfe_equ_sw_setup(g); 87 err = vfe_equ_sw_setup(g);
80 if (err) 88 if (err) {
81 return err; 89 return err;
90 }
82 91
83 err = clk_domain_sw_setup(g); 92 err = clk_domain_sw_setup(g);
84 if (err) 93 if (err) {
85 return err; 94 return err;
95 }
86 96
87 err = clk_vf_point_sw_setup(g); 97 err = clk_vf_point_sw_setup(g);
88 if (err) 98 if (err) {
89 return err; 99 return err;
100 }
90 101
91 err = clk_prog_sw_setup(g); 102 err = clk_prog_sw_setup(g);
92 if (err) 103 if (err) {
93 return err; 104 return err;
105 }
94 106
95 err = pstate_sw_setup(g); 107 err = pstate_sw_setup(g);
96 if (err) 108 if (err) {
97 return err; 109 return err;
110 }
98 111
99 if(g->ops.clk.support_pmgr_domain) { 112 if(g->ops.clk.support_pmgr_domain) {
100 err = pmgr_domain_sw_setup(g); 113 err = pmgr_domain_sw_setup(g);
101 if (err) 114 if (err) {
102 return err; 115 return err;
116 }
103 } 117 }
104 118
105 if (g->ops.clk.support_clk_freq_controller) { 119 if (g->ops.clk.support_clk_freq_controller) {
106 err = clk_freq_controller_sw_setup(g); 120 err = clk_freq_controller_sw_setup(g);
107 if (err) 121 if (err) {
108 return err; 122 return err;
123 }
109 } 124 }
110 125
111 if(g->ops.clk.support_lpwr_pg) { 126 if(g->ops.clk.support_lpwr_pg) {
112 err = nvgpu_lpwr_pg_setup(g); 127 err = nvgpu_lpwr_pg_setup(g);
113 if (err) 128 if (err) {
114 return err; 129 return err;
130 }
115 } 131 }
116 132
117 return err; 133 return err;
@@ -133,16 +149,19 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g)
133 } 149 }
134 150
135 err = volt_rail_pmu_setup(g); 151 err = volt_rail_pmu_setup(g);
136 if (err) 152 if (err) {
137 return err; 153 return err;
154 }
138 155
139 err = volt_dev_pmu_setup(g); 156 err = volt_dev_pmu_setup(g);
140 if (err) 157 if (err) {
141 return err; 158 return err;
159 }
142 160
143 err = volt_policy_pmu_setup(g); 161 err = volt_policy_pmu_setup(g);
144 if (err) 162 if (err) {
145 return err; 163 return err;
164 }
146 165
147 err = g->ops.pmu_ver.volt.volt_send_load_cmd_to_pmu(g); 166 err = g->ops.pmu_ver.volt.volt_send_load_cmd_to_pmu(g);
148 if (err) { 167 if (err) {
@@ -153,52 +172,64 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g)
153 } 172 }
154 173
155 err = therm_domain_pmu_setup(g); 174 err = therm_domain_pmu_setup(g);
156 if (err) 175 if (err) {
157 return err; 176 return err;
177 }
158 178
159 err = vfe_var_pmu_setup(g); 179 err = vfe_var_pmu_setup(g);
160 if (err) 180 if (err) {
161 return err; 181 return err;
182 }
162 183
163 err = vfe_equ_pmu_setup(g); 184 err = vfe_equ_pmu_setup(g);
164 if (err) 185 if (err) {
165 return err; 186 return err;
187 }
166 188
167 err = clk_domain_pmu_setup(g); 189 err = clk_domain_pmu_setup(g);
168 if (err) 190 if (err) {
169 return err; 191 return err;
192 }
170 193
171 err = clk_prog_pmu_setup(g); 194 err = clk_prog_pmu_setup(g);
172 if (err) 195 if (err) {
173 return err; 196 return err;
197 }
174 198
175 err = clk_vin_pmu_setup(g); 199 err = clk_vin_pmu_setup(g);
176 if (err) 200 if (err) {
177 return err; 201 return err;
202 }
178 203
179 err = clk_fll_pmu_setup(g); 204 err = clk_fll_pmu_setup(g);
180 if (err) 205 if (err) {
181 return err; 206 return err;
207 }
182 208
183 err = clk_vf_point_pmu_setup(g); 209 err = clk_vf_point_pmu_setup(g);
184 if (err) 210 if (err) {
185 return err; 211 return err;
212 }
186 213
187 if (g->ops.clk.support_clk_freq_controller) { 214 if (g->ops.clk.support_clk_freq_controller) {
188 err = clk_freq_controller_pmu_setup(g); 215 err = clk_freq_controller_pmu_setup(g);
189 if (err) 216 if (err) {
190 return err; 217 return err;
218 }
191 } 219 }
192 err = clk_pmu_vin_load(g); 220 err = clk_pmu_vin_load(g);
193 if (err) 221 if (err) {
194 return err; 222 return err;
223 }
195 224
196 err = g->ops.pmu_ver.clk.perf_pmu_vfe_load(g); 225 err = g->ops.pmu_ver.clk.perf_pmu_vfe_load(g);
197 if (err) 226 if (err) {
198 return err; 227 return err;
228 }
199 229
200 if (g->ops.clk.support_pmgr_domain) 230 if (g->ops.clk.support_pmgr_domain) {
201 err = pmgr_domain_pmu_setup(g); 231 err = pmgr_domain_pmu_setup(g);
232 }
202 233
203 return err; 234 return err;
204} 235}
@@ -211,8 +242,9 @@ static int pstate_construct_super(struct gk20a *g, struct boardobj **ppboardobj,
211 int err; 242 int err;
212 243
213 err = boardobj_construct_super(g, ppboardobj, size, args); 244 err = boardobj_construct_super(g, ppboardobj, size, args);
214 if (err) 245 if (err) {
215 return err; 246 return err;
247 }
216 248
217 pstate = (struct pstate *)*ppboardobj; 249 pstate = (struct pstate *)*ppboardobj;
218 250
@@ -239,9 +271,10 @@ static struct pstate *pstate_construct(struct gk20a *g, void *args)
239 271
240 if ((tmp->super.type != CTRL_PERF_PSTATE_TYPE_3X) || 272 if ((tmp->super.type != CTRL_PERF_PSTATE_TYPE_3X) ||
241 (pstate_construct_3x(g, (struct boardobj **)&pstate, 273 (pstate_construct_3x(g, (struct boardobj **)&pstate,
242 sizeof(struct pstate), args))) 274 sizeof(struct pstate), args))) {
243 nvgpu_err(g, 275 nvgpu_err(g,
244 "error constructing pstate num=%u", tmp->num); 276 "error constructing pstate num=%u", tmp->num);
277 }
245 278
246 return pstate; 279 return pstate;
247} 280}
@@ -330,8 +363,9 @@ static int parse_pstate_table_5x(struct gk20a *g,
330 ((hdr->base_entry_size != VBIOS_PSTATE_BASE_ENTRY_5X_SIZE_2) && 363 ((hdr->base_entry_size != VBIOS_PSTATE_BASE_ENTRY_5X_SIZE_2) &&
331 (hdr->base_entry_size != VBIOS_PSTATE_BASE_ENTRY_5X_SIZE_3)) || 364 (hdr->base_entry_size != VBIOS_PSTATE_BASE_ENTRY_5X_SIZE_3)) ||
332 (hdr->clock_entry_size != VBIOS_PSTATE_CLOCK_ENTRY_5X_SIZE_6) || 365 (hdr->clock_entry_size != VBIOS_PSTATE_CLOCK_ENTRY_5X_SIZE_6) ||
333 (hdr->clock_entry_count > CLK_SET_INFO_MAX_SIZE)) 366 (hdr->clock_entry_count > CLK_SET_INFO_MAX_SIZE)) {
334 return -EINVAL; 367 return -EINVAL;
368 }
335 369
336 p += hdr->header_size; 370 p += hdr->header_size;
337 371
@@ -341,20 +375,24 @@ static int parse_pstate_table_5x(struct gk20a *g,
341 for (i = 0; i < hdr->base_entry_count; i++, p += entry_size) { 375 for (i = 0; i < hdr->base_entry_count; i++, p += entry_size) {
342 entry = (struct vbios_pstate_entry_5x *)p; 376 entry = (struct vbios_pstate_entry_5x *)p;
343 377
344 if (entry->pstate_level == VBIOS_PERFLEVEL_SKIP_ENTRY) 378 if (entry->pstate_level == VBIOS_PERFLEVEL_SKIP_ENTRY) {
345 continue; 379 continue;
380 }
346 381
347 err = parse_pstate_entry_5x(g, hdr, entry, &_pstate); 382 err = parse_pstate_entry_5x(g, hdr, entry, &_pstate);
348 if (err) 383 if (err) {
349 goto done; 384 goto done;
385 }
350 386
351 pstate = pstate_construct(g, &_pstate); 387 pstate = pstate_construct(g, &_pstate);
352 if (!pstate) 388 if (!pstate) {
353 goto done; 389 goto done;
390 }
354 391
355 err = pstate_insert(g, pstate, i); 392 err = pstate_insert(g, pstate, i);
356 if (err) 393 if (err) {
357 goto done; 394 goto done;
395 }
358 } 396 }
359 397
360done: 398done:
@@ -371,8 +409,9 @@ static int pstate_sw_setup(struct gk20a *g)
371 nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq); 409 nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq);
372 410
373 err = nvgpu_mutex_init(&g->perf_pmu.pstatesobjs.pstate_mutex); 411 err = nvgpu_mutex_init(&g->perf_pmu.pstatesobjs.pstate_mutex);
374 if (err) 412 if (err) {
375 return err; 413 return err;
414 }
376 415
377 err = boardobjgrpconstruct_e32(g, &g->perf_pmu.pstatesobjs.super); 416 err = boardobjgrpconstruct_e32(g, &g->perf_pmu.pstatesobjs.super);
378 if (err) { 417 if (err) {
@@ -401,8 +440,9 @@ static int pstate_sw_setup(struct gk20a *g)
401 440
402 err = parse_pstate_table_5x(g, hdr); 441 err = parse_pstate_table_5x(g, hdr);
403done: 442done:
404 if (err) 443 if (err) {
405 nvgpu_mutex_destroy(&g->perf_pmu.pstatesobjs.pstate_mutex); 444 nvgpu_mutex_destroy(&g->perf_pmu.pstatesobjs.pstate_mutex);
445 }
406 return err; 446 return err;
407} 447}
408 448
@@ -418,8 +458,9 @@ struct pstate *pstate_find(struct gk20a *g, u32 num)
418 struct pstate *, pstate, i) { 458 struct pstate *, pstate, i) {
419 nvgpu_log_info(g, "pstate=%p num=%u (looking for num=%u)", 459 nvgpu_log_info(g, "pstate=%p num=%u (looking for num=%u)",
420 pstate, pstate->num, num); 460 pstate, pstate->num, num);
421 if (pstate->num == num) 461 if (pstate->num == num) {
422 return pstate; 462 return pstate;
463 }
423 } 464 }
424 return NULL; 465 return NULL;
425} 466}
@@ -433,13 +474,15 @@ struct clk_set_info *pstate_get_clk_set_info(struct gk20a *g,
433 474
434 nvgpu_log_info(g, "pstate = %p", pstate); 475 nvgpu_log_info(g, "pstate = %p", pstate);
435 476
436 if (!pstate) 477 if (!pstate) {
437 return NULL; 478 return NULL;
479 }
438 480
439 for (clkidx = 0; clkidx < pstate->clklist.num_info; clkidx++) { 481 for (clkidx = 0; clkidx < pstate->clklist.num_info; clkidx++) {
440 info = &pstate->clklist.clksetinfo[clkidx]; 482 info = &pstate->clklist.clksetinfo[clkidx];
441 if (info->clkwhich == clkwhich) 483 if (info->clkwhich == clkwhich) {
442 return info; 484 return info;
485 }
443 } 486 }
444 return NULL; 487 return NULL;
445} 488}