summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/lpwr/lpwr.c
diff options
context:
space:
mode:
authorsmadhavan <smadhavan@nvidia.com>2018-09-06 04:38:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-12 08:36:04 -0400
commitc7a3b6db10900e0aabc29ca7307908875d685036 (patch)
tree1ee88207c5149344841b1423d0cb920498f844b0 /drivers/gpu/nvgpu/lpwr/lpwr.c
parentc615002d22b4675d08404eb7cc7087d4418eccdb (diff)
gpu: nvgpu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: I8046a09fa7ffc74c3d737ba57132a0a9ae2ff195 Signed-off-by: smadhavan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797699 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Nitin Kumbhar <nkumbhar@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/lpwr/lpwr.c')
-rw-r--r--drivers/gpu/nvgpu/lpwr/lpwr.c79
1 files changed, 52 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.c b/drivers/gpu/nvgpu/lpwr/lpwr.c
index 3be8269a..a536bf9e 100644
--- a/drivers/gpu/nvgpu/lpwr/lpwr.c
+++ b/drivers/gpu/nvgpu/lpwr/lpwr.c
@@ -42,14 +42,16 @@ static int get_lpwr_idx_table(struct gk20a *g)
42 42
43 lpwr_idx_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, 43 lpwr_idx_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g,
44 g->bios.perf_token, LOWPOWER_TABLE); 44 g->bios.perf_token, LOWPOWER_TABLE);
45 if (lpwr_idx_table_ptr == NULL) 45 if (lpwr_idx_table_ptr == NULL) {
46 return -EINVAL; 46 return -EINVAL;
47 }
47 48
48 memcpy(&header, lpwr_idx_table_ptr, 49 memcpy(&header, lpwr_idx_table_ptr,
49 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_header)); 50 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_header));
50 51
51 if (header.entry_count >= LPWR_VBIOS_IDX_ENTRY_COUNT_MAX) 52 if (header.entry_count >= LPWR_VBIOS_IDX_ENTRY_COUNT_MAX) {
52 return -EINVAL; 53 return -EINVAL;
54 }
53 55
54 pidx_data->base_sampling_period = (u16)header.base_sampling_period; 56 pidx_data->base_sampling_period = (u16)header.base_sampling_period;
55 57
@@ -84,8 +86,9 @@ static int get_lpwr_gr_table(struct gk20a *g)
84 86
85 lpwr_gr_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, 87 lpwr_gr_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g,
86 g->bios.perf_token, LOWPOWER_GR_TABLE); 88 g->bios.perf_token, LOWPOWER_GR_TABLE);
87 if (lpwr_gr_table_ptr == NULL) 89 if (lpwr_gr_table_ptr == NULL) {
88 return -EINVAL; 90 return -EINVAL;
91 }
89 92
90 memcpy(&header, lpwr_gr_table_ptr, 93 memcpy(&header, lpwr_gr_table_ptr,
91 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_header)); 94 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_header));
@@ -106,9 +109,10 @@ static int get_lpwr_gr_table(struct gk20a *g)
106 NVGPU_PMU_GR_FEATURE_MASK_ALL; 109 NVGPU_PMU_GR_FEATURE_MASK_ALL;
107 110
108 if (!BIOS_GET_FIELD(entry.feautre_mask, 111 if (!BIOS_GET_FIELD(entry.feautre_mask,
109 NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG)) 112 NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG)) {
110 pgr_data->entry[idx].feature_mask &= 113 pgr_data->entry[idx].feature_mask &=
111 ~NVGPU_PMU_GR_FEATURE_MASK_RPPG; 114 ~NVGPU_PMU_GR_FEATURE_MASK_RPPG;
115 }
112 } 116 }
113 117
114 } 118 }
@@ -128,14 +132,16 @@ static int get_lpwr_ms_table(struct gk20a *g)
128 132
129 lpwr_ms_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, 133 lpwr_ms_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g,
130 g->bios.perf_token, LOWPOWER_MS_TABLE); 134 g->bios.perf_token, LOWPOWER_MS_TABLE);
131 if (lpwr_ms_table_ptr == NULL) 135 if (lpwr_ms_table_ptr == NULL) {
132 return -EINVAL; 136 return -EINVAL;
137 }
133 138
134 memcpy(&header, lpwr_ms_table_ptr, 139 memcpy(&header, lpwr_ms_table_ptr,
135 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_header)); 140 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_header));
136 141
137 if (header.entry_count >= LPWR_VBIOS_MS_ENTRY_COUNT_MAX) 142 if (header.entry_count >= LPWR_VBIOS_MS_ENTRY_COUNT_MAX) {
138 return -EINVAL; 143 return -EINVAL;
144 }
139 145
140 pms_data->default_entry_idx = (u8)header.default_entry_idx; 146 pms_data->default_entry_idx = (u8)header.default_entry_idx;
141 147
@@ -157,19 +163,22 @@ static int get_lpwr_ms_table(struct gk20a *g)
157 NVGPU_PMU_MS_FEATURE_MASK_ALL; 163 NVGPU_PMU_MS_FEATURE_MASK_ALL;
158 164
159 if (!BIOS_GET_FIELD(entry.feautre_mask, 165 if (!BIOS_GET_FIELD(entry.feautre_mask,
160 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING)) 166 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING)) {
161 pms_data->entry[idx].feature_mask &= 167 pms_data->entry[idx].feature_mask &=
162 ~NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING; 168 ~NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING;
169 }
163 170
164 if (!BIOS_GET_FIELD(entry.feautre_mask, 171 if (!BIOS_GET_FIELD(entry.feautre_mask,
165 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR)) 172 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR)) {
166 pms_data->entry[idx].feature_mask &= 173 pms_data->entry[idx].feature_mask &=
167 ~NVGPU_PMU_MS_FEATURE_MASK_SW_ASR; 174 ~NVGPU_PMU_MS_FEATURE_MASK_SW_ASR;
175 }
168 176
169 if (!BIOS_GET_FIELD(entry.feautre_mask, 177 if (!BIOS_GET_FIELD(entry.feautre_mask,
170 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG)) 178 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG)) {
171 pms_data->entry[idx].feature_mask &= 179 pms_data->entry[idx].feature_mask &=
172 ~NVGPU_PMU_MS_FEATURE_MASK_RPPG; 180 ~NVGPU_PMU_MS_FEATURE_MASK_RPPG;
181 }
173 } 182 }
174 183
175 pms_data->entry[idx].dynamic_current_logic = 184 pms_data->entry[idx].dynamic_current_logic =
@@ -189,12 +198,14 @@ u32 nvgpu_lpwr_pg_setup(struct gk20a *g)
189 nvgpu_log_fn(g, " "); 198 nvgpu_log_fn(g, " ");
190 199
191 err = get_lpwr_gr_table(g); 200 err = get_lpwr_gr_table(g);
192 if (err) 201 if (err) {
193 return err; 202 return err;
203 }
194 204
195 err = get_lpwr_ms_table(g); 205 err = get_lpwr_ms_table(g);
196 if (err) 206 if (err) {
197 return err; 207 return err;
208 }
198 209
199 err = get_lpwr_idx_table(g); 210 err = get_lpwr_idx_table(g);
200 211
@@ -232,13 +243,15 @@ int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate)
232 243
233 pstate_info = pstate_get_clk_set_info(g, pstate, 244 pstate_info = pstate_get_clk_set_info(g, pstate,
234 clkwhich_mclk); 245 clkwhich_mclk);
235 if (!pstate_info) 246 if (!pstate_info) {
236 return -EINVAL; 247 return -EINVAL;
248 }
237 249
238 if (pstate_info->max_mhz > 250 if (pstate_info->max_mhz >
239 MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ) 251 MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ) {
240 payload |= 252 payload |=
241 NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED; 253 NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED;
254 }
242 255
243 if (payload != g->perf_pmu.lpwr.mclk_change_cache) { 256 if (payload != g->perf_pmu.lpwr.mclk_change_cache) {
244 g->perf_pmu.lpwr.mclk_change_cache = payload; 257 g->perf_pmu.lpwr.mclk_change_cache = payload;
@@ -311,14 +324,16 @@ u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num)
311 324
312 nvgpu_log_fn(g, " "); 325 nvgpu_log_fn(g, " ");
313 326
314 if (!pstate) 327 if (!pstate) {
315 return 0; 328 return 0;
329 }
316 330
317 ms_idx = pidx_data->entry[pstate->lpwr_entry_idx].ms_idx; 331 ms_idx = pidx_data->entry[pstate->lpwr_entry_idx].ms_idx;
318 if (pms_data->entry[ms_idx].ms_enabled) 332 if (pms_data->entry[ms_idx].ms_enabled) {
319 return 1; 333 return 1;
320 else 334 } else {
321 return 0; 335 return 0;
336 }
322} 337}
323 338
324u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num) 339u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
@@ -332,14 +347,16 @@ u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
332 347
333 nvgpu_log_fn(g, " "); 348 nvgpu_log_fn(g, " ");
334 349
335 if (!pstate) 350 if (!pstate) {
336 return 0; 351 return 0;
352 }
337 353
338 idx = pidx_data->entry[pstate->lpwr_entry_idx].gr_idx; 354 idx = pidx_data->entry[pstate->lpwr_entry_idx].gr_idx;
339 if (pgr_data->entry[idx].gr_enabled) 355 if (pgr_data->entry[idx].gr_enabled) {
340 return 1; 356 return 1;
341 else 357 } else {
342 return 0; 358 return 0;
359 }
343} 360}
344 361
345 362
@@ -353,8 +370,9 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
353 370
354 nvgpu_log_fn(g, " "); 371 nvgpu_log_fn(g, " ");
355 372
356 if (pstate_lock) 373 if (pstate_lock) {
357 nvgpu_clk_arb_pstate_change_lock(g, true); 374 nvgpu_clk_arb_pstate_change_lock(g, true);
375 }
358 nvgpu_mutex_acquire(&pmu->pg_mutex); 376 nvgpu_mutex_acquire(&pmu->pg_mutex);
359 377
360 present_pstate = nvgpu_clk_arb_get_current_pstate(g); 378 present_pstate = nvgpu_clk_arb_get_current_pstate(g);
@@ -362,20 +380,23 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
362 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, 380 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
363 present_pstate); 381 present_pstate);
364 if (is_mscg_supported && g->mscg_enabled) { 382 if (is_mscg_supported && g->mscg_enabled) {
365 if (!pmu->mscg_stat) 383 if (!pmu->mscg_stat) {
366 pmu->mscg_stat = PMU_MSCG_ENABLED; 384 pmu->mscg_stat = PMU_MSCG_ENABLED;
385 }
367 } 386 }
368 387
369 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g, 388 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
370 present_pstate); 389 present_pstate);
371 if (is_rppg_supported) { 390 if (is_rppg_supported) {
372 if (g->support_pmu && g->can_elpg) 391 if (g->support_pmu && g->can_elpg) {
373 status = nvgpu_pmu_enable_elpg(g); 392 status = nvgpu_pmu_enable_elpg(g);
393 }
374 } 394 }
375 395
376 nvgpu_mutex_release(&pmu->pg_mutex); 396 nvgpu_mutex_release(&pmu->pg_mutex);
377 if (pstate_lock) 397 if (pstate_lock) {
378 nvgpu_clk_arb_pstate_change_lock(g, false); 398 nvgpu_clk_arb_pstate_change_lock(g, false);
399 }
379 400
380 return status; 401 return status;
381} 402}
@@ -390,8 +411,9 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
390 411
391 nvgpu_log_fn(g, " "); 412 nvgpu_log_fn(g, " ");
392 413
393 if (pstate_lock) 414 if (pstate_lock) {
394 nvgpu_clk_arb_pstate_change_lock(g, true); 415 nvgpu_clk_arb_pstate_change_lock(g, true);
416 }
395 nvgpu_mutex_acquire(&pmu->pg_mutex); 417 nvgpu_mutex_acquire(&pmu->pg_mutex);
396 418
397 present_pstate = nvgpu_clk_arb_get_current_pstate(g); 419 present_pstate = nvgpu_clk_arb_get_current_pstate(g);
@@ -401,22 +423,25 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
401 if (is_rppg_supported) { 423 if (is_rppg_supported) {
402 if (g->support_pmu && g->elpg_enabled) { 424 if (g->support_pmu && g->elpg_enabled) {
403 status = nvgpu_pmu_disable_elpg(g); 425 status = nvgpu_pmu_disable_elpg(g);
404 if (status) 426 if (status) {
405 goto exit_unlock; 427 goto exit_unlock;
428 }
406 } 429 }
407 } 430 }
408 431
409 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, 432 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
410 present_pstate); 433 present_pstate);
411 if (is_mscg_supported && g->mscg_enabled) { 434 if (is_mscg_supported && g->mscg_enabled) {
412 if (pmu->mscg_stat) 435 if (pmu->mscg_stat) {
413 pmu->mscg_stat = PMU_MSCG_DISABLED; 436 pmu->mscg_stat = PMU_MSCG_DISABLED;
437 }
414 } 438 }
415 439
416exit_unlock: 440exit_unlock:
417 nvgpu_mutex_release(&pmu->pg_mutex); 441 nvgpu_mutex_release(&pmu->pg_mutex);
418 if (pstate_lock) 442 if (pstate_lock) {
419 nvgpu_clk_arb_pstate_change_lock(g, false); 443 nvgpu_clk_arb_pstate_change_lock(g, false);
444 }
420 445
421 nvgpu_log_fn(g, "done"); 446 nvgpu_log_fn(g, "done");
422 return status; 447 return status;