summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-07-11 02:12:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-09-25 03:18:59 -0400
commit350bb74859eb6eb0d0ba7c8e6792a0b4e48849b4 (patch)
tree3756bbd4d958930a4d999f95b8ef90694e3cf3c1 /drivers
parentb5556c74905974b0b19defb775f7301a341982a0 (diff)
gpu: nvgpu: PMU debug reorg
- Moved PMU debug related code to pmu_debug.c Print pmu trace buffer Moved PMU controller/engine status dump debug code Moved ELPG stats dump code - Removed PMU falcon controller status dump code & used nvgpu_flcn_dump_stats() method, - Method to print ELPG stats. - PMU HAL to print PMU engine & ELPG debug info upon error NVGPU JIRA-96 Change-Id: Iaa3d983f1d3b78a1b051beb6c109d3da8f8c90bc Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1516640 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/Makefile.nvgpu1
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_debug.c48
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c155
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h10
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmu.h5
6 files changed, 72 insertions, 155 deletions
diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu
index 6e475fcb..35d5109b 100644
--- a/drivers/gpu/nvgpu/Makefile.nvgpu
+++ b/drivers/gpu/nvgpu/Makefile.nvgpu
@@ -69,6 +69,7 @@ nvgpu-y := \
69 common/pmu/pmu_fw.o \ 69 common/pmu/pmu_fw.o \
70 common/pmu/pmu_pg.o \ 70 common/pmu/pmu_pg.o \
71 common/pmu/pmu_perfmon.o \ 71 common/pmu/pmu_perfmon.o \
72 common/pmu/pmu_debug.o \
72 common/ltc.o \ 73 common/ltc.o \
73 gk20a/gk20a.o \ 74 gk20a/gk20a.o \
74 gk20a/bus_gk20a.o \ 75 gk20a/bus_gk20a.o \
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_debug.c b/drivers/gpu/nvgpu/common/pmu/pmu_debug.c
new file mode 100644
index 00000000..744a618d
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_debug.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <nvgpu/pmu.h>
15#include <nvgpu/log.h>
16#include <nvgpu/timers.h>
17#include <nvgpu/kmem.h>
18#include <nvgpu/dma.h>
19#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
20
21#include "gk20a/gk20a.h"
22
23void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
24{
25 struct gk20a *g = pmu->g;
26
27 /* Print PG stats */
28 nvgpu_err(g, "Print PG stats");
29 nvgpu_flcn_print_dmem(pmu->flcn,
30 pmu->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS],
31 sizeof(struct pmu_pg_stats_v2));
32
33 gk20a_pmu_dump_elpg_stats(pmu);
34}
35
36void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
37{
38 struct gk20a *g = pmu->g;
39
40 nvgpu_flcn_dump_stats(pmu->flcn);
41 gk20a_pmu_dump_falcon_stats(pmu);
42
43 nvgpu_err(g, "pmu state: %d", pmu->pmu_state);
44 nvgpu_err(g, "elpg state: %d", pmu->elpg_stat);
45
46 /* PMU may crash due to FECS crash. Dump FECS status */
47 gk20a_fecs_dump_falcon_stats(g);
48}
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
index b435f4a7..3f74b9f7 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -265,8 +265,8 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
265 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) { 265 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
266 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d", 266 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
267 pmu->elpg_stat); 267 pmu->elpg_stat);
268 pmu_dump_elpg_stats(pmu); 268 nvgpu_pmu_dump_elpg_stats(pmu);
269 pmu_dump_falcon_stats(pmu); 269 nvgpu_pmu_dump_falcon_stats(pmu);
270 ret = -EBUSY; 270 ret = -EBUSY;
271 goto exit_unlock; 271 goto exit_unlock;
272 } 272 }
@@ -315,8 +315,8 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
315 ptr, PMU_ELPG_STAT_OFF); 315 ptr, PMU_ELPG_STAT_OFF);
316 if (*ptr != PMU_ELPG_STAT_OFF) { 316 if (*ptr != PMU_ELPG_STAT_OFF) {
317 nvgpu_err(g, "ELPG_DISALLOW_ACK failed"); 317 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
318 pmu_dump_elpg_stats(pmu); 318 nvgpu_pmu_dump_elpg_stats(pmu);
319 pmu_dump_falcon_stats(pmu); 319 nvgpu_pmu_dump_falcon_stats(pmu);
320 ret = -EBUSY; 320 ret = -EBUSY;
321 goto exit_unlock; 321 goto exit_unlock;
322 } 322 }
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 11de11de..ea0559aa 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -39,7 +39,6 @@
39#define gk20a_dbg_pmu(fmt, arg...) \ 39#define gk20a_dbg_pmu(fmt, arg...) \
40 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 40 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
41 41
42
43bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 42bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
44{ 43{
45 u32 i = 0, j = strlen(strings); 44 u32 i = 0, j = strlen(strings);
@@ -55,11 +54,11 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
55 return false; 54 return false;
56} 55}
57 56
58static void printtrace(struct nvgpu_pmu *pmu) 57static void print_pmu_trace(struct nvgpu_pmu *pmu)
59{ 58{
59 struct gk20a *g = pmu->g;
60 u32 i = 0, j = 0, k, l, m, count; 60 u32 i = 0, j = 0, k, l, m, count;
61 char part_str[40], buf[0x40]; 61 char part_str[40], buf[0x40];
62 struct gk20a *g = gk20a_from_pmu(pmu);
63 void *tracebuffer; 62 void *tracebuffer;
64 char *trace; 63 char *trace;
65 u32 *trace1; 64 u32 *trace1;
@@ -70,13 +69,13 @@ static void printtrace(struct nvgpu_pmu *pmu)
70 return; 69 return;
71 70
72 /* read pmu traces into system memory buffer */ 71 /* read pmu traces into system memory buffer */
73 nvgpu_mem_rd_n(g, &pmu->trace_buf, 72 nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer,
74 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE); 73 GK20A_PMU_TRACE_BUFSIZE);
75 74
76 trace = (char *)tracebuffer; 75 trace = (char *)tracebuffer;
77 trace1 = (u32 *)tracebuffer; 76 trace1 = (u32 *)tracebuffer;
78 77
79 nvgpu_err(g, "Dump pmutrace"); 78 nvgpu_err(g, "dump PMU trace buffer");
80 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { 79 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
81 for (j = 0; j < 0x40; j++) 80 for (j = 0; j < 0x40; j++)
82 if (trace1[(i / 4) + j]) 81 if (trace1[(i / 4) + j])
@@ -100,6 +99,7 @@ static void printtrace(struct nvgpu_pmu *pmu)
100 scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); 99 scnprintf((buf + count), 0x40, "%s", (trace+i+20+m));
101 nvgpu_err(g, "%s", buf); 100 nvgpu_err(g, "%s", buf);
102 } 101 }
102
103 nvgpu_kfree(g, tracebuffer); 103 nvgpu_kfree(g, tracebuffer);
104} 104}
105 105
@@ -597,51 +597,9 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
597 return 0; 597 return 0;
598} 598}
599 599
600void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) 600void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
601{ 601{
602 struct gk20a *g = gk20a_from_pmu(pmu); 602 struct gk20a *g = gk20a_from_pmu(pmu);
603 struct pmu_pg_stats stats;
604
605 nvgpu_flcn_copy_from_dmem(pmu->flcn,
606 pmu->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS],
607 (u8 *)&stats, sizeof(struct pmu_pg_stats), 0);
608
609 gk20a_dbg_pmu("pg_entry_start_timestamp : 0x%016llx",
610 stats.pg_entry_start_timestamp);
611 gk20a_dbg_pmu("pg_exit_start_timestamp : 0x%016llx",
612 stats.pg_exit_start_timestamp);
613 gk20a_dbg_pmu("pg_ingating_start_timestamp : 0x%016llx",
614 stats.pg_ingating_start_timestamp);
615 gk20a_dbg_pmu("pg_ungating_start_timestamp : 0x%016llx",
616 stats.pg_ungating_start_timestamp);
617 gk20a_dbg_pmu("pg_avg_entry_time_us : 0x%08x",
618 stats.pg_avg_entry_time_us);
619 gk20a_dbg_pmu("pg_avg_exit_time_us : 0x%08x",
620 stats.pg_avg_exit_time_us);
621 gk20a_dbg_pmu("pg_ingating_cnt : 0x%08x",
622 stats.pg_ingating_cnt);
623 gk20a_dbg_pmu("pg_ingating_time_us : 0x%08x",
624 stats.pg_ingating_time_us);
625 gk20a_dbg_pmu("pg_ungating_count : 0x%08x",
626 stats.pg_ungating_count);
627 gk20a_dbg_pmu("pg_ungating_time_us 0x%08x: ",
628 stats.pg_ungating_time_us);
629 gk20a_dbg_pmu("pg_gating_cnt : 0x%08x",
630 stats.pg_gating_cnt);
631 gk20a_dbg_pmu("pg_gating_deny_cnt : 0x%08x",
632 stats.pg_gating_deny_cnt);
633
634 /*
635 Turn on PG_DEBUG in ucode and locate symbol "ElpgLog" offset
636 in .nm file, e.g. 0x1000066c. use 0x66c.
637 u32 i, val[20];
638 nvgpu_flcn_copy_from_dmem(pmu->flcn, 0x66c,
639 (u8 *)val, sizeof(val), 0);
640 gk20a_dbg_pmu("elpg log begin");
641 for (i = 0; i < 20; i++)
642 gk20a_dbg_pmu("0x%08x", val[i]);
643 gk20a_dbg_pmu("elpg log end");
644 */
645 603
646 gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x", 604 gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x",
647 gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); 605 gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3)));
@@ -660,40 +618,13 @@ void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
660 gk20a_readl(g, pwr_pmu_idle_count_r(4))); 618 gk20a_readl(g, pwr_pmu_idle_count_r(4)));
661 gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x", 619 gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x",
662 gk20a_readl(g, pwr_pmu_idle_count_r(7))); 620 gk20a_readl(g, pwr_pmu_idle_count_r(7)));
663
664 /*
665 TBD: script can't generate those registers correctly
666 gk20a_dbg_pmu("pwr_pmu_idle_status_r(): 0x%08x",
667 gk20a_readl(g, pwr_pmu_idle_status_r()));
668 gk20a_dbg_pmu("pwr_pmu_pg_ctrl_r(): 0x%08x",
669 gk20a_readl(g, pwr_pmu_pg_ctrl_r()));
670 */
671} 621}
672 622
673void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) 623void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
674{ 624{
675 struct gk20a *g = gk20a_from_pmu(pmu); 625 struct gk20a *g = gk20a_from_pmu(pmu);
676 unsigned int i; 626 unsigned int i;
677 627
678 nvgpu_err(g, "pwr_falcon_os_r : %d",
679 gk20a_readl(g, pwr_falcon_os_r()));
680 nvgpu_err(g, "pwr_falcon_cpuctl_r : 0x%x",
681 gk20a_readl(g, pwr_falcon_cpuctl_r()));
682 nvgpu_err(g, "pwr_falcon_idlestate_r : 0x%x",
683 gk20a_readl(g, pwr_falcon_idlestate_r()));
684 nvgpu_err(g, "pwr_falcon_mailbox0_r : 0x%x",
685 gk20a_readl(g, pwr_falcon_mailbox0_r()));
686 nvgpu_err(g, "pwr_falcon_mailbox1_r : 0x%x",
687 gk20a_readl(g, pwr_falcon_mailbox1_r()));
688 nvgpu_err(g, "pwr_falcon_irqstat_r : 0x%x",
689 gk20a_readl(g, pwr_falcon_irqstat_r()));
690 nvgpu_err(g, "pwr_falcon_irqmode_r : 0x%x",
691 gk20a_readl(g, pwr_falcon_irqmode_r()));
692 nvgpu_err(g, "pwr_falcon_irqmask_r : 0x%x",
693 gk20a_readl(g, pwr_falcon_irqmask_r()));
694 nvgpu_err(g, "pwr_falcon_irqdest_r : 0x%x",
695 gk20a_readl(g, pwr_falcon_irqdest_r()));
696
697 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) 628 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++)
698 nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x", 629 nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x",
699 i, gk20a_readl(g, pwr_pmu_mailbox_r(i))); 630 i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
@@ -702,14 +633,6 @@ void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
702 nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x", 633 nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x",
703 i, gk20a_readl(g, pwr_pmu_debug_r(i))); 634 i, gk20a_readl(g, pwr_pmu_debug_r(i)));
704 635
705 for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) {
706 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
707 pwr_pmu_falcon_icd_cmd_opc_rstat_f() |
708 pwr_pmu_falcon_icd_cmd_idx_f(i));
709 nvgpu_err(g, "pmu_rstat (%d) : 0x%x",
710 i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
711 }
712
713 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r()); 636 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
714 nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i); 637 nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i);
715 if (i != 0) { 638 if (i != 0) {
@@ -736,62 +659,8 @@ void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
736 gk20a_readl(g, mc_enable_r())); 659 gk20a_readl(g, mc_enable_r()));
737 } 660 }
738 661
739 nvgpu_err(g, "pwr_falcon_engctl_r : 0x%x", 662 /* Print PMU F/W debug prints */
740 gk20a_readl(g, pwr_falcon_engctl_r())); 663 print_pmu_trace(pmu);
741 nvgpu_err(g, "pwr_falcon_curctx_r : 0x%x",
742 gk20a_readl(g, pwr_falcon_curctx_r()));
743 nvgpu_err(g, "pwr_falcon_nxtctx_r : 0x%x",
744 gk20a_readl(g, pwr_falcon_nxtctx_r()));
745
746 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
747 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
748 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
749 nvgpu_err(g, "PMU_FALCON_REG_IMB : 0x%x",
750 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
751
752 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
753 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
754 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
755 nvgpu_err(g, "PMU_FALCON_REG_DMB : 0x%x",
756 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
757
758 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
759 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
760 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
761 nvgpu_err(g, "PMU_FALCON_REG_CSW : 0x%x",
762 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
763
764 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
765 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
766 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
767 nvgpu_err(g, "PMU_FALCON_REG_CTX : 0x%x",
768 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
769
770 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
771 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
772 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
773 nvgpu_err(g, "PMU_FALCON_REG_EXCI : 0x%x",
774 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
775
776 for (i = 0; i < 4; i++) {
777 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
778 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
779 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC));
780 nvgpu_err(g, "PMU_FALCON_REG_PC : 0x%x",
781 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
782
783 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
784 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
785 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP));
786 nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x",
787 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
788 }
789 nvgpu_err(g, "elpg stat: %d",
790 pmu->elpg_stat);
791
792 /* PMU may crash due to FECS crash. Dump FECS status */
793 gk20a_fecs_dump_falcon_stats(g);
794 printtrace(pmu);
795} 664}
796 665
797bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu) 666bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu)
@@ -840,7 +709,7 @@ void gk20a_pmu_isr(struct gk20a *g)
840 709
841 if (intr & pwr_falcon_irqstat_halt_true_f()) { 710 if (intr & pwr_falcon_irqstat_halt_true_f()) {
842 nvgpu_err(g, "pmu halt intr not implemented"); 711 nvgpu_err(g, "pmu halt intr not implemented");
843 pmu_dump_falcon_stats(pmu); 712 nvgpu_pmu_dump_falcon_stats(pmu);
844 if (gk20a_readl(g, pwr_pmu_mailbox_r 713 if (gk20a_readl(g, pwr_pmu_mailbox_r
845 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == 714 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) ==
846 PMU_MODE_MISMATCH_STATUS_VAL) 715 PMU_MODE_MISMATCH_STATUS_VAL)
@@ -850,7 +719,7 @@ void gk20a_pmu_isr(struct gk20a *g)
850 if (intr & pwr_falcon_irqstat_exterr_true_f()) { 719 if (intr & pwr_falcon_irqstat_exterr_true_f()) {
851 nvgpu_err(g, 720 nvgpu_err(g,
852 "pmu exterr intr not implemented. Clearing interrupt."); 721 "pmu exterr intr not implemented. Clearing interrupt.");
853 pmu_dump_falcon_stats(pmu); 722 nvgpu_pmu_dump_falcon_stats(pmu);
854 723
855 gk20a_writel(g, pwr_falcon_exterrstat_r(), 724 gk20a_writel(g, pwr_falcon_exterrstat_r(),
856 gk20a_readl(g, pwr_falcon_exterrstat_r()) & 725 gk20a_readl(g, pwr_falcon_exterrstat_r()) &
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index 3ad0c116..3992b029 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -57,12 +57,10 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g);
57void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr); 57void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr);
58bool gk20a_is_pmu_supported(struct gk20a *g); 58bool gk20a_is_pmu_supported(struct gk20a *g);
59 59
60void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
61 u32 dst, u8 *src, u32 size, u8 port);
62int pmu_bootstrap(struct nvgpu_pmu *pmu); 60int pmu_bootstrap(struct nvgpu_pmu *pmu);
63 61
64void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu); 62void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
65void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); 63void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
66 64
67void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable); 65void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable);
68int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, 66int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
@@ -74,8 +72,4 @@ void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
74bool gk20a_pmu_is_engine_in_reset(struct gk20a *g); 72bool gk20a_pmu_is_engine_in_reset(struct gk20a *g);
75int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset); 73int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset);
76 74
77int pmu_idle(struct nvgpu_pmu *pmu);
78
79bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
80
81#endif /*__PMU_GK20A_H__*/ 75#endif /*__PMU_GK20A_H__*/
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
index 556d9f39..60ce8b96 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
@@ -449,4 +449,9 @@ int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
449int nvgpu_pmu_ap_send_command(struct gk20a *g, 449int nvgpu_pmu_ap_send_command(struct gk20a *g,
450 union pmu_ap_cmd *p_ap_cmd, bool b_block); 450 union pmu_ap_cmd *p_ap_cmd, bool b_block);
451 451
452/* PMU debug */
453void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
454void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
455bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
456
452#endif /* __NVGPU_PMU_H__ */ 457#endif /* __NVGPU_PMU_H__ */