summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c52
1 files changed, 27 insertions, 25 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 0531b387..400a49a3 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -39,8 +39,8 @@
39#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> 39#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h>
40#include <nvgpu/hw/gk20a/hw_top_gk20a.h> 40#include <nvgpu/hw/gk20a/hw_top_gk20a.h>
41 41
42#define gk20a_dbg_pmu(fmt, arg...) \ 42#define gk20a_dbg_pmu(g, fmt, arg...) \
43 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 43 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
44 44
45bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 45bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
46{ 46{
@@ -139,7 +139,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
139 u32 intr_mask; 139 u32 intr_mask;
140 u32 intr_dest; 140 u32 intr_dest;
141 141
142 gk20a_dbg_fn(""); 142 nvgpu_log_fn(g, " ");
143 143
144 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, 144 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true,
145 mc_intr_mask_0_pmu_enabled_f()); 145 mc_intr_mask_0_pmu_enabled_f());
@@ -166,7 +166,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
166 mc_intr_mask_0_pmu_enabled_f()); 166 mc_intr_mask_0_pmu_enabled_f());
167 } 167 }
168 168
169 gk20a_dbg_fn("done"); 169 nvgpu_log_fn(g, "done");
170} 170}
171 171
172 172
@@ -179,7 +179,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
179 u64 addr_code, addr_data, addr_load; 179 u64 addr_code, addr_data, addr_load;
180 u32 i, blocks, addr_args; 180 u32 i, blocks, addr_args;
181 181
182 gk20a_dbg_fn(""); 182 nvgpu_log_fn(g, " ");
183 183
184 gk20a_writel(g, pwr_falcon_itfen_r(), 184 gk20a_writel(g, pwr_falcon_itfen_r(),
185 gk20a_readl(g, pwr_falcon_itfen_r()) | 185 gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -286,7 +286,7 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
286 286
287 if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { 287 if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) {
288 BUG_ON(mutex->ref_cnt == 0); 288 BUG_ON(mutex->ref_cnt == 0);
289 gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token); 289 gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token);
290 mutex->ref_cnt++; 290 mutex->ref_cnt++;
291 return 0; 291 return 0;
292 } 292 }
@@ -313,12 +313,12 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
313 313
314 if (owner == data) { 314 if (owner == data) {
315 mutex->ref_cnt = 1; 315 mutex->ref_cnt = 1;
316 gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x", 316 gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x",
317 mutex->index, *token); 317 mutex->index, *token);
318 *token = owner; 318 *token = owner;
319 return 0; 319 return 0;
320 } else { 320 } else {
321 gk20a_dbg_info("fail to acquire mutex idx=0x%08x", 321 nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x",
322 mutex->index); 322 mutex->index);
323 323
324 data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); 324 data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
@@ -370,7 +370,7 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
370 pwr_pmu_mutex_id_release_value_f(owner)); 370 pwr_pmu_mutex_id_release_value_f(owner));
371 gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); 371 gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
372 372
373 gk20a_dbg_pmu("mutex released: id=%d, token=0x%x", 373 gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x",
374 mutex->index, *token); 374 mutex->index, *token);
375 375
376 return 0; 376 return 0;
@@ -475,7 +475,7 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g)
475 struct nvgpu_pmu *pmu = &g->pmu; 475 struct nvgpu_pmu *pmu = &g->pmu;
476 int err = 0; 476 int err = 0;
477 477
478 gk20a_dbg_fn(""); 478 nvgpu_log_fn(g, " ");
479 479
480 nvgpu_mutex_acquire(&pmu->isr_mutex); 480 nvgpu_mutex_acquire(&pmu->isr_mutex);
481 nvgpu_flcn_reset(pmu->flcn); 481 nvgpu_flcn_reset(pmu->flcn);
@@ -554,7 +554,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
554 void *param, u32 handle, u32 status) 554 void *param, u32 handle, u32 status)
555{ 555{
556 struct nvgpu_pmu *pmu = param; 556 struct nvgpu_pmu *pmu = param;
557 gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); 557 gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE");
558 pmu->zbc_save_done = 1; 558 pmu->zbc_save_done = 1;
559} 559}
560 560
@@ -575,7 +575,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
575 575
576 pmu->zbc_save_done = 0; 576 pmu->zbc_save_done = 0;
577 577
578 gk20a_dbg_pmu("cmd post ZBC_TABLE_UPDATE"); 578 gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE");
579 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 579 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
580 pmu_handle_zbc_msg, pmu, &seq, ~0); 580 pmu_handle_zbc_msg, pmu, &seq, ~0);
581 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 581 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
@@ -587,18 +587,20 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
587int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, 587int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
588 struct nv_pmu_therm_msg *msg) 588 struct nv_pmu_therm_msg *msg)
589{ 589{
590 gk20a_dbg_fn(""); 590 struct gk20a *g = gk20a_from_pmu(pmu);
591
592 nvgpu_log_fn(g, " ");
591 593
592 switch (msg->msg_type) { 594 switch (msg->msg_type) {
593 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: 595 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION:
594 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) 596 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1))
595 nvgpu_clk_arb_send_thermal_alarm(pmu->g); 597 nvgpu_clk_arb_send_thermal_alarm(pmu->g);
596 else 598 else
597 gk20a_dbg_pmu("Unwanted/Unregistered thermal event received %d", 599 gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d",
598 msg->hw_slct_msg.mask); 600 msg->hw_slct_msg.mask);
599 break; 601 break;
600 default: 602 default:
601 gk20a_dbg_pmu("unkown therm event received %d", msg->msg_type); 603 gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type);
602 break; 604 break;
603 } 605 }
604 606
@@ -609,22 +611,22 @@ void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
609{ 611{
610 struct gk20a *g = gk20a_from_pmu(pmu); 612 struct gk20a *g = gk20a_from_pmu(pmu);
611 613
612 gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x", 614 gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x",
613 gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); 615 gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3)));
614 gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", 616 gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x",
615 gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); 617 gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3)));
616 gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", 618 gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x",
617 gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); 619 gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3)));
618 gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x", 620 gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x",
619 gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); 621 gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0)));
620 gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x", 622 gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x",
621 gk20a_readl(g, pwr_pmu_pg_intren_r(0))); 623 gk20a_readl(g, pwr_pmu_pg_intren_r(0)));
622 624
623 gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x", 625 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x",
624 gk20a_readl(g, pwr_pmu_idle_count_r(3))); 626 gk20a_readl(g, pwr_pmu_idle_count_r(3)));
625 gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x", 627 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x",
626 gk20a_readl(g, pwr_pmu_idle_count_r(4))); 628 gk20a_readl(g, pwr_pmu_idle_count_r(4)));
627 gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x", 629 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x",
628 gk20a_readl(g, pwr_pmu_idle_count_r(7))); 630 gk20a_readl(g, pwr_pmu_idle_count_r(7)));
629} 631}
630 632
@@ -693,7 +695,7 @@ void gk20a_pmu_isr(struct gk20a *g)
693 u32 intr, mask; 695 u32 intr, mask;
694 bool recheck = false; 696 bool recheck = false;
695 697
696 gk20a_dbg_fn(""); 698 nvgpu_log_fn(g, " ");
697 699
698 nvgpu_mutex_acquire(&pmu->isr_mutex); 700 nvgpu_mutex_acquire(&pmu->isr_mutex);
699 if (!pmu->isr_enabled) { 701 if (!pmu->isr_enabled) {
@@ -706,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g)
706 708
707 intr = gk20a_readl(g, pwr_falcon_irqstat_r()); 709 intr = gk20a_readl(g, pwr_falcon_irqstat_r());
708 710
709 gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr); 711 gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr);
710 712
711 intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; 713 intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask;
712 if (!intr || pmu->pmu_state == PMU_STATE_OFF) { 714 if (!intr || pmu->pmu_state == PMU_STATE_OFF) {