summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-30 01:07:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:35:54 -0400
commit0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (patch)
tree469b4746ebedb5843c631c547f102f72f5850ffa /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent97aa9f705a84186ef0f7f31487988cfd5a8a94e8 (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Icdeede22dd26fd70fae92aa791d35b115ef49e32 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797691 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c103
1 files changed, 65 insertions, 38 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 64e4a567..86cb04d9 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -51,11 +51,12 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
51 u32 i = 0, j = strlen(strings); 51 u32 i = 0, j = strlen(strings);
52 52
53 for (; i < j; i++) { 53 for (; i < j; i++) {
54 if (strings[i] == '%') 54 if (strings[i] == '%') {
55 if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { 55 if (strings[i + 1] == 'x' || strings[i + 1] == 'X') {
56 *hex_pos = i; 56 *hex_pos = i;
57 return true; 57 return true;
58 } 58 }
59 }
59 } 60 }
60 *hex_pos = -1; 61 *hex_pos = -1;
61 return false; 62 return false;
@@ -72,8 +73,9 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
72 73
73 /* allocate system memory to copy pmu trace buffer */ 74 /* allocate system memory to copy pmu trace buffer */
74 tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE); 75 tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE);
75 if (tracebuffer == NULL) 76 if (tracebuffer == NULL) {
76 return; 77 return;
78 }
77 79
78 /* read pmu traces into system memory buffer */ 80 /* read pmu traces into system memory buffer */
79 nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer, 81 nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer,
@@ -85,17 +87,20 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
85 nvgpu_err(g, "dump PMU trace buffer"); 87 nvgpu_err(g, "dump PMU trace buffer");
86 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { 88 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
87 for (j = 0; j < 0x40; j++) { 89 for (j = 0; j < 0x40; j++) {
88 if (trace1[(i / 4) + j]) 90 if (trace1[(i / 4) + j]) {
89 break; 91 break;
92 }
90 } 93 }
91 if (j == 0x40) 94 if (j == 0x40) {
92 break; 95 break;
96 }
93 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]); 97 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]);
94 l = 0; 98 l = 0;
95 m = 0; 99 m = 0;
96 while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) { 100 while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) {
97 if (k >= 40) 101 if (k >= 40) {
98 break; 102 break;
103 }
99 strncpy(part_str, (trace+i+20+m), k); 104 strncpy(part_str, (trace+i+20+m), k);
100 part_str[k] = '\0'; 105 part_str[k] = '\0';
101 count += scnprintf((buf + count), 0x40, "%s0x%x", 106 count += scnprintf((buf + count), 0x40, "%s0x%x",
@@ -277,8 +282,9 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
277 struct pmu_mutex *mutex; 282 struct pmu_mutex *mutex;
278 u32 data, owner, max_retry; 283 u32 data, owner, max_retry;
279 284
280 if (!pmu->initialized) 285 if (!pmu->initialized) {
281 return -EINVAL; 286 return -EINVAL;
287 }
282 288
283 BUG_ON(!token); 289 BUG_ON(!token);
284 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); 290 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
@@ -346,8 +352,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
346 struct pmu_mutex *mutex; 352 struct pmu_mutex *mutex;
347 u32 owner, data; 353 u32 owner, data;
348 354
349 if (!pmu->initialized) 355 if (!pmu->initialized) {
350 return -EINVAL; 356 return -EINVAL;
357 }
351 358
352 BUG_ON(!token); 359 BUG_ON(!token);
353 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); 360 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
@@ -364,8 +371,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
364 return -EINVAL; 371 return -EINVAL;
365 } 372 }
366 373
367 if (--mutex->ref_cnt > 0) 374 if (--mutex->ref_cnt > 0) {
368 return -EBUSY; 375 return -EBUSY;
376 }
369 377
370 gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), 378 gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
371 pwr_pmu_mutex_value_initial_lock_f()); 379 pwr_pmu_mutex_value_initial_lock_f());
@@ -386,32 +394,36 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
386{ 394{
387 u32 queue_head_size = 0; 395 u32 queue_head_size = 0;
388 396
389 if (g->ops.pmu.pmu_get_queue_head_size) 397 if (g->ops.pmu.pmu_get_queue_head_size) {
390 queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); 398 queue_head_size = g->ops.pmu.pmu_get_queue_head_size();
399 }
391 400
392 BUG_ON(!head || !queue_head_size); 401 BUG_ON(!head || !queue_head_size);
393 402
394 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 403 if (PMU_IS_COMMAND_QUEUE(queue->id)) {
395 404
396 if (queue->index >= queue_head_size) 405 if (queue->index >= queue_head_size) {
397 return -EINVAL; 406 return -EINVAL;
407 }
398 408
399 if (!set) 409 if (!set) {
400 *head = pwr_pmu_queue_head_address_v( 410 *head = pwr_pmu_queue_head_address_v(
401 gk20a_readl(g, 411 gk20a_readl(g,
402 g->ops.pmu.pmu_get_queue_head(queue->index))); 412 g->ops.pmu.pmu_get_queue_head(queue->index)));
403 else 413 } else {
404 gk20a_writel(g, 414 gk20a_writel(g,
405 g->ops.pmu.pmu_get_queue_head(queue->index), 415 g->ops.pmu.pmu_get_queue_head(queue->index),
406 pwr_pmu_queue_head_address_f(*head)); 416 pwr_pmu_queue_head_address_f(*head));
417 }
407 } else { 418 } else {
408 if (!set) 419 if (!set) {
409 *head = pwr_pmu_msgq_head_val_v( 420 *head = pwr_pmu_msgq_head_val_v(
410 gk20a_readl(g, pwr_pmu_msgq_head_r())); 421 gk20a_readl(g, pwr_pmu_msgq_head_r()));
411 else 422 } else {
412 gk20a_writel(g, 423 gk20a_writel(g,
413 pwr_pmu_msgq_head_r(), 424 pwr_pmu_msgq_head_r(),
414 pwr_pmu_msgq_head_val_f(*head)); 425 pwr_pmu_msgq_head_val_f(*head));
426 }
415 } 427 }
416 428
417 return 0; 429 return 0;
@@ -422,33 +434,36 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
422{ 434{
423 u32 queue_tail_size = 0; 435 u32 queue_tail_size = 0;
424 436
425 if (g->ops.pmu.pmu_get_queue_tail_size) 437 if (g->ops.pmu.pmu_get_queue_tail_size) {
426 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); 438 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
439 }
427 440
428 BUG_ON(!tail || !queue_tail_size); 441 BUG_ON(!tail || !queue_tail_size);
429 442
430 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 443 if (PMU_IS_COMMAND_QUEUE(queue->id)) {
431 444
432 if (queue->index >= queue_tail_size) 445 if (queue->index >= queue_tail_size) {
433 return -EINVAL; 446 return -EINVAL;
447 }
434 448
435 if (!set) 449 if (!set) {
436 *tail = pwr_pmu_queue_tail_address_v( 450 *tail = pwr_pmu_queue_tail_address_v(gk20a_readl(g,
437 gk20a_readl(g, 451 g->ops.pmu.pmu_get_queue_tail(queue->index)));
438 g->ops.pmu.pmu_get_queue_tail(queue->index))); 452 } else {
439 else
440 gk20a_writel(g, 453 gk20a_writel(g,
441 g->ops.pmu.pmu_get_queue_tail(queue->index), 454 g->ops.pmu.pmu_get_queue_tail(queue->index),
442 pwr_pmu_queue_tail_address_f(*tail)); 455 pwr_pmu_queue_tail_address_f(*tail));
456 }
443 457
444 } else { 458 } else {
445 if (!set) 459 if (!set) {
446 *tail = pwr_pmu_msgq_tail_val_v( 460 *tail = pwr_pmu_msgq_tail_val_v(
447 gk20a_readl(g, pwr_pmu_msgq_tail_r())); 461 gk20a_readl(g, pwr_pmu_msgq_tail_r()));
448 else 462 } else {
449 gk20a_writel(g, 463 gk20a_writel(g,
450 pwr_pmu_msgq_tail_r(), 464 pwr_pmu_msgq_tail_r(),
451 pwr_pmu_msgq_tail_val_f(*tail)); 465 pwr_pmu_msgq_tail_val_f(*tail));
466 }
452 } 467 }
453 468
454 return 0; 469 return 0;
@@ -459,18 +474,20 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
459 struct gk20a *g = gk20a_from_pmu(pmu); 474 struct gk20a *g = gk20a_from_pmu(pmu);
460 u32 queue_tail_size = 0; 475 u32 queue_tail_size = 0;
461 476
462 if (g->ops.pmu.pmu_get_queue_tail_size) 477 if (g->ops.pmu.pmu_get_queue_tail_size) {
463 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); 478 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
479 }
464 480
465 BUG_ON(!tail || !queue_tail_size); 481 BUG_ON(!tail || !queue_tail_size);
466 482
467 if (!set) 483 if (!set) {
468 *tail = pwr_pmu_msgq_tail_val_v( 484 *tail = pwr_pmu_msgq_tail_val_v(
469 gk20a_readl(g, pwr_pmu_msgq_tail_r())); 485 gk20a_readl(g, pwr_pmu_msgq_tail_r()));
470 else 486 } else {
471 gk20a_writel(g, 487 gk20a_writel(g,
472 pwr_pmu_msgq_tail_r(), 488 pwr_pmu_msgq_tail_r(),
473 pwr_pmu_msgq_tail_val_f(*tail)); 489 pwr_pmu_msgq_tail_val_f(*tail));
490 }
474} 491}
475 492
476int gk20a_init_pmu_setup_hw1(struct gk20a *g) 493int gk20a_init_pmu_setup_hw1(struct gk20a *g)
@@ -519,18 +536,20 @@ bool gk20a_pmu_is_engine_in_reset(struct gk20a *g)
519 536
520 pmc_enable = gk20a_readl(g, mc_enable_r()); 537 pmc_enable = gk20a_readl(g, mc_enable_r());
521 if (mc_enable_pwr_v(pmc_enable) == 538 if (mc_enable_pwr_v(pmc_enable) ==
522 mc_enable_pwr_disabled_v()) 539 mc_enable_pwr_disabled_v()) {
523 status = true; 540 status = true;
541 }
524 542
525 return status; 543 return status;
526} 544}
527 545
528int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset) 546int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset)
529{ 547{
530 if (do_reset) 548 if (do_reset) {
531 g->ops.mc.enable(g, mc_enable_pwr_enabled_f()); 549 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
532 else 550 } else {
533 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 551 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
552 }
534 553
535 return 0; 554 return 0;
536} 555}
@@ -547,8 +566,9 @@ u32 gk20a_pmu_pg_engines_list(struct gk20a *g)
547 566
548u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 567u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
549{ 568{
550 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 569 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
551 return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING; 570 return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING;
571 }
552 572
553 return 0; 573 return 0;
554} 574}
@@ -567,8 +587,9 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
567 struct pmu_cmd cmd; 587 struct pmu_cmd cmd;
568 u32 seq; 588 u32 seq;
569 589
570 if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) 590 if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) {
571 return; 591 return;
592 }
572 593
573 memset(&cmd, 0, sizeof(struct pmu_cmd)); 594 memset(&cmd, 0, sizeof(struct pmu_cmd));
574 cmd.hdr.unit_id = PMU_UNIT_PG; 595 cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -583,8 +604,9 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
583 pmu_handle_zbc_msg, pmu, &seq, ~0); 604 pmu_handle_zbc_msg, pmu, &seq, ~0);
584 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 605 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
585 &pmu->zbc_save_done, 1); 606 &pmu->zbc_save_done, 1);
586 if (!pmu->zbc_save_done) 607 if (!pmu->zbc_save_done) {
587 nvgpu_err(g, "ZBC save timeout"); 608 nvgpu_err(g, "ZBC save timeout");
609 }
588} 610}
589 611
590int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, 612int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
@@ -596,11 +618,12 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
596 618
597 switch (msg->msg_type) { 619 switch (msg->msg_type) {
598 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: 620 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION:
599 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) 621 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) {
600 nvgpu_clk_arb_send_thermal_alarm(pmu->g); 622 nvgpu_clk_arb_send_thermal_alarm(pmu->g);
601 else 623 } else {
602 gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d", 624 gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d",
603 msg->hw_slct_msg.mask); 625 msg->hw_slct_msg.mask);
626 }
604 break; 627 break;
605 default: 628 default:
606 gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type); 629 gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type);
@@ -687,8 +710,9 @@ bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu)
687 pwr_falcon_irqstat_exterr_true_f() | 710 pwr_falcon_irqstat_exterr_true_f() |
688 pwr_falcon_irqstat_swgen0_true_f(); 711 pwr_falcon_irqstat_swgen0_true_f();
689 712
690 if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) 713 if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) {
691 return true; 714 return true;
715 }
692 716
693 return false; 717 return false;
694} 718}
@@ -727,9 +751,11 @@ void gk20a_pmu_isr(struct gk20a *g)
727 nvgpu_pmu_dump_falcon_stats(pmu); 751 nvgpu_pmu_dump_falcon_stats(pmu);
728 if (gk20a_readl(g, pwr_pmu_mailbox_r 752 if (gk20a_readl(g, pwr_pmu_mailbox_r
729 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == 753 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) ==
730 PMU_MODE_MISMATCH_STATUS_VAL) 754 PMU_MODE_MISMATCH_STATUS_VAL) {
731 if (g->ops.pmu.dump_secure_fuses) 755 if (g->ops.pmu.dump_secure_fuses) {
732 g->ops.pmu.dump_secure_fuses(g); 756 g->ops.pmu.dump_secure_fuses(g);
757 }
758 }
733 } 759 }
734 if (intr & pwr_falcon_irqstat_exterr_true_f()) { 760 if (intr & pwr_falcon_irqstat_exterr_true_f()) {
735 nvgpu_err(g, 761 nvgpu_err(g,
@@ -741,8 +767,9 @@ void gk20a_pmu_isr(struct gk20a *g)
741 ~pwr_falcon_exterrstat_valid_m()); 767 ~pwr_falcon_exterrstat_valid_m());
742 } 768 }
743 769
744 if (g->ops.pmu.handle_ext_irq) 770 if (g->ops.pmu.handle_ext_irq) {
745 g->ops.pmu.handle_ext_irq(g, intr); 771 g->ops.pmu.handle_ext_irq(g, intr);
772 }
746 773
747 if (intr & pwr_falcon_irqstat_swgen0_true_f()) { 774 if (intr & pwr_falcon_irqstat_swgen0_true_f()) {
748 nvgpu_pmu_process_message(pmu); 775 nvgpu_pmu_process_message(pmu);