summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 05:29:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-21 18:44:28 -0400
commite988951ccab1031022ac354bbe8f53e1dc849b7a (patch)
tree7fe8d7fa8b46f501c2e1a873b84873a5173478d5 /drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
parent652da8116966af2a8438a9a9f135a11b4e5c6c7b (diff)
gpu: nvgpu: common: pmu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I497fbdb07bb2ec5a404046f06db3c713b3859e8e Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1799525 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
index 964b1488..73893f2c 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c
@@ -65,8 +65,9 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
65 struct pmu_payload payload; 65 struct pmu_payload payload;
66 u32 seq; 66 u32 seq;
67 67
68 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 68 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
69 return 0; 69 return 0;
70 }
70 71
71 nvgpu_log_fn(g, " "); 72 nvgpu_log_fn(g, " ");
72 73
@@ -74,9 +75,10 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
74 75
75 gk20a_pmu_init_perfmon_counter(g); 76 gk20a_pmu_init_perfmon_counter(g);
76 77
77 if (!pmu->sample_buffer) 78 if (!pmu->sample_buffer) {
78 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 79 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
79 2 * sizeof(u16)); 80 2 * sizeof(u16));
81 }
80 if (!pmu->sample_buffer) { 82 if (!pmu->sample_buffer) {
81 nvgpu_err(g, "failed to allocate perfmon sample buffer"); 83 nvgpu_err(g, "failed to allocate perfmon sample buffer");
82 return -ENOMEM; 84 return -ENOMEM;
@@ -134,8 +136,9 @@ int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu)
134 struct pmu_payload payload; 136 struct pmu_payload payload;
135 u32 seq; 137 u32 seq;
136 138
137 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 139 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
138 return 0; 140 return 0;
141 }
139 142
140 /* PERFMON Start */ 143 /* PERFMON Start */
141 memset(&cmd, 0, sizeof(struct pmu_cmd)); 144 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -183,8 +186,9 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu)
183 struct pmu_cmd cmd; 186 struct pmu_cmd cmd;
184 u32 seq; 187 u32 seq;
185 188
186 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 189 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
187 return 0; 190 return 0;
191 }
188 192
189 /* PERFMON Stop */ 193 /* PERFMON Stop */
190 memset(&cmd, 0, sizeof(struct pmu_cmd)); 194 memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -250,8 +254,9 @@ void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
250 254
251void nvgpu_pmu_reset_load_counters(struct gk20a *g) 255void nvgpu_pmu_reset_load_counters(struct gk20a *g)
252{ 256{
253 if (!g->power_on || gk20a_busy(g)) 257 if (!g->power_on || gk20a_busy(g)) {
254 return; 258 return;
259 }
255 260
256 gk20a_pmu_reset_idle_counter(g, 2); 261 gk20a_pmu_reset_idle_counter(g, 2);
257 gk20a_pmu_reset_idle_counter(g, 1); 262 gk20a_pmu_reset_idle_counter(g, 1);
@@ -288,8 +293,9 @@ int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu,
288 } 293 }
289 294
290 /* restart sampling */ 295 /* restart sampling */
291 if (pmu->perfmon_sampling_enabled) 296 if (pmu->perfmon_sampling_enabled) {
292 return g->ops.pmu.pmu_perfmon_start_sampling(&(g->pmu)); 297 return g->ops.pmu.pmu_perfmon_start_sampling(&(g->pmu));
298 }
293 299
294 return 0; 300 return 0;
295} 301}
@@ -301,8 +307,9 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu)
301 struct nv_pmu_rpc_struct_perfmon_init rpc; 307 struct nv_pmu_rpc_struct_perfmon_init rpc;
302 int status = 0; 308 int status = 0;
303 309
304 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 310 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
305 return 0; 311 return 0;
312 }
306 313
307 nvgpu_log_fn(g, " "); 314 nvgpu_log_fn(g, " ");
308 315
@@ -348,8 +355,9 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu)
348 struct nv_pmu_rpc_struct_perfmon_start rpc; 355 struct nv_pmu_rpc_struct_perfmon_start rpc;
349 int status = 0; 356 int status = 0;
350 357
351 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 358 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
352 return 0; 359 return 0;
360 }
353 361
354 nvgpu_log_fn(g, " "); 362 nvgpu_log_fn(g, " ");
355 363
@@ -365,8 +373,9 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu)
365 373
366 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_START\n"); 374 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_START\n");
367 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, START, &rpc, 0); 375 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, START, &rpc, 0);
368 if (status) 376 if (status) {
369 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); 377 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
378 }
370 379
371 return status; 380 return status;
372} 381}
@@ -377,8 +386,9 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu)
377 struct nv_pmu_rpc_struct_perfmon_stop rpc; 386 struct nv_pmu_rpc_struct_perfmon_stop rpc;
378 int status = 0; 387 int status = 0;
379 388
380 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 389 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
381 return 0; 390 return 0;
391 }
382 392
383 nvgpu_log_fn(g, " "); 393 nvgpu_log_fn(g, " ");
384 394
@@ -386,8 +396,9 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu)
386 /* PERFMON Stop */ 396 /* PERFMON Stop */
387 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n"); 397 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n");
388 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0); 398 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0);
389 if (status) 399 if (status) {
390 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); 400 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
401 }
391 402
392 return status; 403 return status;
393} 404}
@@ -398,8 +409,9 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu)
398 struct nv_pmu_rpc_struct_perfmon_query rpc; 409 struct nv_pmu_rpc_struct_perfmon_query rpc;
399 int status = 0; 410 int status = 0;
400 411
401 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) 412 if (!nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
402 return 0; 413 return 0;
414 }
403 415
404 nvgpu_log_fn(g, " "); 416 nvgpu_log_fn(g, " ");
405 pmu->perfmon_query = 0; 417 pmu->perfmon_query = 0;
@@ -407,8 +419,9 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu)
407 /* PERFMON QUERY */ 419 /* PERFMON QUERY */
408 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n"); 420 nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n");
409 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0); 421 PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0);
410 if (status) 422 if (status) {
411 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); 423 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
424 }
412 425
413 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 426 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
414 &pmu->perfmon_query, 1); 427 &pmu->perfmon_query, 1);