summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b')
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c38
-rw-r--r--drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c6
-rw-r--r--drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c61
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c132
-rw-r--r--drivers/gpu/nvgpu/gv11b/ltc_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.c22
-rw-r--r--drivers/gpu/nvgpu/gv11b/therm_gv11b.c4
10 files changed, 139 insertions, 136 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
index 7ca8c703..673cb7f2 100644
--- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -43,8 +43,8 @@
43#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h> 43#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
44 44
45/*Defines*/ 45/*Defines*/
46#define gv11b_dbg_pmu(fmt, arg...) \ 46#define gv11b_dbg_pmu(g, fmt, arg...) \
47 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 47 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
48 48
49static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value) 49static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
50{ 50{
@@ -60,7 +60,7 @@ int gv11b_alloc_blob_space(struct gk20a *g,
60{ 60{
61 int err; 61 int err;
62 62
63 gv11b_dbg_pmu("alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS"); 63 gv11b_dbg_pmu(g, "alloc blob space: NVGPU_DMA_FORCE_CONTIGUOUS");
64 err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, 64 err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
65 size, mem); 65 size, mem);
66 66
@@ -87,10 +87,10 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
87 start = nvgpu_mem_get_addr(g, &acr->ucode_blob); 87 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
88 size = acr->ucode_blob.size; 88 size = acr->ucode_blob.size;
89 89
90 gv11b_dbg_pmu("acr ucode blob start %llx\n", start); 90 gv11b_dbg_pmu(g, "acr ucode blob start %llx\n", start);
91 gv11b_dbg_pmu("acr ucode blob size %x\n", size); 91 gv11b_dbg_pmu(g, "acr ucode blob size %x\n", size);
92 92
93 gv11b_dbg_pmu(""); 93 gv11b_dbg_pmu(g, " ");
94 94
95 if (!acr_fw) { 95 if (!acr_fw) {
96 /*First time init case*/ 96 /*First time init case*/
@@ -110,17 +110,17 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
110 acr->fw_hdr->hdr_offset); 110 acr->fw_hdr->hdr_offset);
111 img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256); 111 img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
112 112
113 gv11b_dbg_pmu("sig dbg offset %u\n", 113 gv11b_dbg_pmu(g, "sig dbg offset %u\n",
114 acr->fw_hdr->sig_dbg_offset); 114 acr->fw_hdr->sig_dbg_offset);
115 gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); 115 gv11b_dbg_pmu(g, "sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
116 gv11b_dbg_pmu("sig prod offset %u\n", 116 gv11b_dbg_pmu(g, "sig prod offset %u\n",
117 acr->fw_hdr->sig_prod_offset); 117 acr->fw_hdr->sig_prod_offset);
118 gv11b_dbg_pmu("sig prod size %u\n", 118 gv11b_dbg_pmu(g, "sig prod size %u\n",
119 acr->fw_hdr->sig_prod_size); 119 acr->fw_hdr->sig_prod_size);
120 gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc); 120 gv11b_dbg_pmu(g, "patch loc %u\n", acr->fw_hdr->patch_loc);
121 gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig); 121 gv11b_dbg_pmu(g, "patch sig %u\n", acr->fw_hdr->patch_sig);
122 gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset); 122 gv11b_dbg_pmu(g, "header offset %u\n", acr->fw_hdr->hdr_offset);
123 gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size); 123 gv11b_dbg_pmu(g, "header size %u\n", acr->fw_hdr->hdr_size);
124 124
125 /* Lets patch the signatures first.. */ 125 /* Lets patch the signatures first.. */
126 if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load, 126 if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
@@ -144,7 +144,7 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
144 } 144 }
145 145
146 for (index = 0; index < 9; index++) 146 for (index = 0; index < 9; index++)
147 gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n", 147 gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n",
148 acr_ucode_header_t210_load[index]); 148 acr_ucode_header_t210_load[index]);
149 149
150 acr_dmem = (u64 *) 150 acr_dmem = (u64 *)
@@ -212,7 +212,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
212 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; 212 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
213 u32 dst; 213 u32 dst;
214 214
215 gk20a_dbg_fn(""); 215 nvgpu_log_fn(g, " ");
216 216
217 gk20a_writel(g, pwr_falcon_itfen_r(), 217 gk20a_writel(g, pwr_falcon_itfen_r(),
218 gk20a_readl(g, pwr_falcon_itfen_r()) | 218 gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -237,7 +237,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
237 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, 237 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
238 pmu_bl_gm10x_desc->bl_start_tag); 238 pmu_bl_gm10x_desc->bl_start_tag);
239 239
240 gv11b_dbg_pmu("Before starting falcon with BL\n"); 240 gv11b_dbg_pmu(g, "Before starting falcon with BL\n");
241 241
242 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; 242 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
243 243
@@ -281,7 +281,7 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g,
281 struct nvgpu_pmu *pmu = &g->pmu; 281 struct nvgpu_pmu *pmu = &g->pmu;
282 int err; 282 int err;
283 283
284 gk20a_dbg_fn(""); 284 nvgpu_log_fn(g, " ");
285 285
286 nvgpu_mutex_acquire(&pmu->isr_mutex); 286 nvgpu_mutex_acquire(&pmu->isr_mutex);
287 nvgpu_flcn_reset(pmu->flcn); 287 nvgpu_flcn_reset(pmu->flcn);
diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
index bb7c37bd..b4e2cb79 100644
--- a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GV11B Cycle stats snapshots support 2 * GV11B Cycle stats snapshots support
3 * 3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -148,7 +148,7 @@ int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
148 perf_pmasys_mem_block_target_lfb_f())); 148 perf_pmasys_mem_block_target_lfb_f()));
149 149
150 150
151 gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); 151 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n");
152 152
153 return 0; 153 return 0;
154 154
@@ -186,7 +186,7 @@ void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr)
186 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); 186 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
187 data->hw_snapshot = NULL; 187 data->hw_snapshot = NULL;
188 188
189 gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); 189 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n");
190} 190}
191 191
192int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, 192int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending,
diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
index db09016c..5dea7654 100644
--- a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
@@ -57,7 +57,7 @@ int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
57 u32 inst_pa_page; 57 u32 inst_pa_page;
58 int err; 58 int err;
59 59
60 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 60 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
61 err = gk20a_busy(g); 61 err = gk20a_busy(g);
62 if (err) { 62 if (err) {
63 nvgpu_err(g, "failed to poweron"); 63 nvgpu_err(g, "failed to poweron");
@@ -100,7 +100,7 @@ int gv11b_perfbuf_disable_locked(struct gk20a *g)
100{ 100{
101 int err; 101 int err;
102 102
103 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 103 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
104 err = gk20a_busy(g); 104 err = gk20a_busy(g);
105 if (err) { 105 if (err) {
106 nvgpu_err(g, "failed to poweron"); 106 nvgpu_err(g, "failed to poweron");
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index 30a2bca2..8bbde5c3 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -1427,7 +1427,7 @@ static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
1427 u32 reg_val; 1427 u32 reg_val;
1428 struct nvgpu_timeout timeout; 1428 struct nvgpu_timeout timeout;
1429 1429
1430 gk20a_dbg_fn(""); 1430 nvgpu_log_fn(g, " ");
1431 1431
1432 nvgpu_mutex_acquire(&g->mm.tlb_lock); 1432 nvgpu_mutex_acquire(&g->mm.tlb_lock);
1433 1433
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 11b393e5..932e7626 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -60,7 +60,7 @@
60 60
61void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) 61void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
62{ 62{
63 63 struct gk20a *g = tsg->g;
64 u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); 64 u32 runlist_entry_0 = ram_rl_entry_type_tsg_v();
65 65
66 if (tsg->timeslice_timeout) 66 if (tsg->timeslice_timeout)
@@ -79,7 +79,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
79 runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); 79 runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid);
80 runlist[3] = 0; 80 runlist[3] = 0;
81 81
82 gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", 82 nvgpu_log_info(g, "gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n",
83 runlist[0], runlist[1], runlist[2], runlist[3]); 83 runlist[0], runlist[1], runlist[2], runlist[3]);
84 84
85} 85}
@@ -119,7 +119,7 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
119 ram_rl_entry_chid_f(c->chid); 119 ram_rl_entry_chid_f(c->chid);
120 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); 120 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi);
121 121
122 gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", 122 nvgpu_log_info(g, "gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n",
123 runlist[0], runlist[1], runlist[2], runlist[3]); 123 runlist[0], runlist[1], runlist[2], runlist[3]);
124} 124}
125 125
@@ -139,7 +139,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
139 struct nvgpu_mem *mem = &c->inst_block; 139 struct nvgpu_mem *mem = &c->inst_block;
140 u32 data; 140 u32 data;
141 141
142 gk20a_dbg_fn(""); 142 nvgpu_log_fn(g, " ");
143 143
144 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); 144 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
145 145
@@ -211,10 +211,11 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
211 211
212void gv11b_ring_channel_doorbell(struct channel_gk20a *c) 212void gv11b_ring_channel_doorbell(struct channel_gk20a *c)
213{ 213{
214 struct fifo_gk20a *f = &c->g->fifo; 214 struct gk20a *g = c->g;
215 struct fifo_gk20a *f = &g->fifo;
215 u32 hw_chid = f->channel_base + c->chid; 216 u32 hw_chid = f->channel_base + c->chid;
216 217
217 gk20a_dbg_info("channel ring door bell %d\n", c->chid); 218 nvgpu_log_info(g, "channel ring door bell %d\n", c->chid);
218 219
219 nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(), 220 nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(),
220 usermode_notify_channel_pending_id_f(hw_chid)); 221 usermode_notify_channel_pending_id_f(hw_chid));
@@ -256,7 +257,7 @@ void channel_gv11b_unbind(struct channel_gk20a *ch)
256{ 257{
257 struct gk20a *g = ch->g; 258 struct gk20a *g = ch->g;
258 259
259 gk20a_dbg_fn(""); 260 nvgpu_log_fn(g, " ");
260 261
261 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { 262 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
262 gk20a_writel(g, ccsr_channel_inst_r(ch->chid), 263 gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
@@ -729,7 +730,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
729 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, 730 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id,
730 timeout_rc_type); 731 timeout_rc_type);
731 if (func_ret != 0) { 732 if (func_ret != 0) {
732 gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); 733 nvgpu_log_info(g, "preempt timeout pbdma %d", pbdma_id);
733 ret |= func_ret; 734 ret |= func_ret;
734 } 735 }
735 } 736 }
@@ -743,7 +744,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
743 timeout_rc_type); 744 timeout_rc_type);
744 745
745 if (func_ret != 0) { 746 if (func_ret != 0) {
746 gk20a_dbg_info("preempt timeout engine %d", act_eng_id); 747 nvgpu_log_info(g, "preempt timeout engine %d", act_eng_id);
747 ret |= func_ret; 748 ret |= func_ret;
748 } 749 }
749 } 750 }
@@ -812,10 +813,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
812 u32 mutex_ret = 0; 813 u32 mutex_ret = 0;
813 u32 runlist_id; 814 u32 runlist_id;
814 815
815 gk20a_dbg_fn("%d", tsgid); 816 nvgpu_log_fn(g, "%d", tsgid);
816 817
817 runlist_id = f->tsg[tsgid].runlist_id; 818 runlist_id = f->tsg[tsgid].runlist_id;
818 gk20a_dbg_fn("runlist_id %d", runlist_id); 819 nvgpu_log_fn(g, "runlist_id %d", runlist_id);
819 820
820 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 821 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
821 822
@@ -839,7 +840,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
839 u32 mutex_ret = 0; 840 u32 mutex_ret = 0;
840 u32 runlist_id; 841 u32 runlist_id;
841 842
842 gk20a_dbg_fn(""); 843 nvgpu_log_fn(g, " ");
843 844
844 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { 845 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
845 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) 846 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
@@ -910,11 +911,11 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
910 return -EINVAL; 911 return -EINVAL;
911 912
912 if (runlist_id >= g->fifo.max_runlists) { 913 if (runlist_id >= g->fifo.max_runlists) {
913 gk20a_dbg_info("runlist_id = %d", runlist_id); 914 nvgpu_log_info(g, "runlist_id = %d", runlist_id);
914 return -EINVAL; 915 return -EINVAL;
915 } 916 }
916 917
917 gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); 918 nvgpu_log_fn(g, "preempt id = %d, runlist_id = %d", id, runlist_id);
918 919
919 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 920 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
920 921
@@ -1155,7 +1156,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1155 unsigned int i; 1156 unsigned int i;
1156 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 1157 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
1157 1158
1158 gk20a_dbg_fn(""); 1159 nvgpu_log_fn(g, " ");
1159 1160
1160 /* enable pmc pfifo */ 1161 /* enable pmc pfifo */
1161 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); 1162 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
@@ -1208,11 +1209,11 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1208 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); 1209 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
1209 1210
1210 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); 1211 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
1211 gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); 1212 nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
1212 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); 1213 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
1213 1214
1214 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); 1215 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i));
1215 gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); 1216 nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall);
1216 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); 1217 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall);
1217 } 1218 }
1218 1219
@@ -1246,12 +1247,12 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1246 /* clear and enable pfifo interrupt */ 1247 /* clear and enable pfifo interrupt */
1247 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); 1248 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
1248 mask = gv11b_fifo_intr_0_en_mask(g); 1249 mask = gv11b_fifo_intr_0_en_mask(g);
1249 gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); 1250 nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
1250 gk20a_writel(g, fifo_intr_en_0_r(), mask); 1251 gk20a_writel(g, fifo_intr_en_0_r(), mask);
1251 gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); 1252 nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
1252 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); 1253 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
1253 1254
1254 gk20a_dbg_fn("done"); 1255 nvgpu_log_fn(g, "done");
1255 1256
1256 return 0; 1257 return 0;
1257} 1258}
@@ -1350,7 +1351,7 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
1350 1351
1351 tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); 1352 tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
1352 } 1353 }
1353 gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); 1354 nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid);
1354 1355
1355 /* 1356 /*
1356 * STATUS indicates whether the context request ack was eventually 1357 * STATUS indicates whether the context request ack was eventually
@@ -1391,14 +1392,14 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
1391 if (*info_status == 1392 if (*info_status ==
1392 fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { 1393 fifo_intr_ctxsw_timeout_info_status_ack_received_v()) {
1393 1394
1394 gk20a_dbg_info("ctxsw timeout info : ack received"); 1395 nvgpu_log_info(g, "ctxsw timeout info : ack received");
1395 /* no need to recover */ 1396 /* no need to recover */
1396 tsgid = FIFO_INVAL_TSG_ID; 1397 tsgid = FIFO_INVAL_TSG_ID;
1397 1398
1398 } else if (*info_status == 1399 } else if (*info_status ==
1399 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { 1400 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) {
1400 1401
1401 gk20a_dbg_info("ctxsw timeout info : dropped timeout"); 1402 nvgpu_log_info(g, "ctxsw timeout info : dropped timeout");
1402 /* no need to recover */ 1403 /* no need to recover */
1403 tsgid = FIFO_INVAL_TSG_ID; 1404 tsgid = FIFO_INVAL_TSG_ID;
1404 1405
@@ -1429,7 +1430,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
1429 timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); 1430 timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
1430 timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); 1431 timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val);
1431 1432
1432 gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); 1433 nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val);
1433 1434
1434 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { 1435 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
1435 active_eng_id = g->fifo.active_engines_list[engine_id]; 1436 active_eng_id = g->fifo.active_engines_list[engine_id];
@@ -1469,7 +1470,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
1469 true, true, verbose, 1470 true, true, verbose,
1470 RC_TYPE_CTXSW_TIMEOUT); 1471 RC_TYPE_CTXSW_TIMEOUT);
1471 } else { 1472 } else {
1472 gk20a_dbg_info( 1473 nvgpu_log_info(g,
1473 "fifo is waiting for ctx switch: " 1474 "fifo is waiting for ctx switch: "
1474 "for %d ms, %s=%d", ms, "tsg", tsgid); 1475 "for %d ms, %s=%d", ms, "tsg", tsgid);
1475 } 1476 }
@@ -1490,7 +1491,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
1490 pbdma_intr_0, handled, error_notifier); 1491 pbdma_intr_0, handled, error_notifier);
1491 1492
1492 if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { 1493 if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) {
1493 gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", 1494 nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d",
1494 pbdma_id); 1495 pbdma_id);
1495 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); 1496 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
1496 *handled |= pbdma_intr_0_clear_faulted_error_pending_f(); 1497 *handled |= pbdma_intr_0_clear_faulted_error_pending_f();
@@ -1498,7 +1499,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
1498 } 1499 }
1499 1500
1500 if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { 1501 if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) {
1501 gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", 1502 nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d",
1502 pbdma_id); 1503 pbdma_id);
1503 *handled |= pbdma_intr_0_eng_reset_pending_f(); 1504 *handled |= pbdma_intr_0_eng_reset_pending_f();
1504 rc_type = RC_TYPE_PBDMA_FAULT; 1505 rc_type = RC_TYPE_PBDMA_FAULT;
@@ -1545,7 +1546,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
1545 return RC_TYPE_NO_RC; 1546 return RC_TYPE_NO_RC;
1546 1547
1547 if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { 1548 if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) {
1548 gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", 1549 nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d",
1549 pbdma_id); 1550 pbdma_id);
1550 nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", 1551 nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ",
1551 pbdma_id, pbdma_intr_1); 1552 pbdma_id, pbdma_intr_1);
@@ -1753,7 +1754,7 @@ void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
1753 u64 gpu_va = gpu_va_base + 1754 u64 gpu_va = gpu_va_base +
1754 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); 1755 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id);
1755 1756
1756 gk20a_dbg_fn(""); 1757 nvgpu_log_fn(g, " ");
1757 1758
1758 off = cmd->off + off; 1759 off = cmd->off + off;
1759 1760
@@ -1792,7 +1793,7 @@ void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
1792{ 1793{
1793 u32 off = cmd->off; 1794 u32 off = cmd->off;
1794 1795
1795 gk20a_dbg_fn(""); 1796 nvgpu_log_fn(g, " ");
1796 1797
1797 /* semaphore_a */ 1798 /* semaphore_a */
1798 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); 1799 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
index 52e442f3..536d9dcb 100644
--- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -96,7 +96,7 @@ bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num)
96 default: 96 default:
97 break; 97 break;
98 } 98 }
99 gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); 99 nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid);
100 return valid; 100 return valid;
101} 101}
102 102
@@ -190,7 +190,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
190 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status); 190 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status);
191 191
192 if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) { 192 if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) {
193 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 193 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
194 "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", 194 "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]",
195 l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow); 195 l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow);
196 196
@@ -205,7 +205,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
205 0); 205 0);
206 } 206 }
207 if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) { 207 if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) {
208 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 208 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
209 "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", 209 "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]",
210 l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow); 210 l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow);
211 211
@@ -282,7 +282,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc,
282 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status); 282 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status);
283 283
284 if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) { 284 if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) {
285 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 285 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
286 "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", 286 "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]",
287 lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow); 287 lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow);
288 288
@@ -297,7 +297,7 @@ static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc,
297 0); 297 0);
298 } 298 }
299 if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) { 299 if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) {
300 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 300 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
301 "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", 301 "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]",
302 lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow); 302 lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow);
303 303
@@ -441,7 +441,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
441 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status); 441 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status);
442 442
443 if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) { 443 if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) {
444 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 444 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
445 "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", 445 "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]",
446 cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow); 446 cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow);
447 447
@@ -456,7 +456,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
456 0); 456 0);
457 } 457 }
458 if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) { 458 if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) {
459 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 459 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
460 "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", 460 "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]",
461 cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow); 461 cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow);
462 462
@@ -521,7 +521,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
521 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status); 521 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status);
522 522
523 if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) { 523 if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) {
524 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 524 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
525 "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", 525 "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]",
526 l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow); 526 l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow);
527 527
@@ -536,7 +536,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
536 0); 536 0);
537 } 537 }
538 if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) { 538 if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) {
539 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 539 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
540 "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", 540 "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]",
541 l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow); 541 l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow);
542 542
@@ -605,7 +605,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
605 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status); 605 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status);
606 606
607 if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) { 607 if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) {
608 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 608 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
609 "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", 609 "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]",
610 icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow); 610 icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow);
611 611
@@ -620,7 +620,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
620 0); 620 0);
621 } 621 }
622 if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) { 622 if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) {
623 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, 623 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
624 "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", 624 "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]",
625 icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow); 625 icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow);
626 626
@@ -1129,14 +1129,14 @@ static void gr_gv11b_set_coalesce_buffer_size(struct gk20a *g, u32 data)
1129{ 1129{
1130 u32 val; 1130 u32 val;
1131 1131
1132 gk20a_dbg_fn(""); 1132 nvgpu_log_fn(g, " ");
1133 1133
1134 val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); 1134 val = gk20a_readl(g, gr_gpcs_tc_debug0_r());
1135 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), 1135 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(),
1136 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); 1136 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data));
1137 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); 1137 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val);
1138 1138
1139 gk20a_dbg_fn("done"); 1139 nvgpu_log_fn(g, "done");
1140} 1140}
1141 1141
1142static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) 1142static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data)
@@ -1144,7 +1144,7 @@ static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data)
1144 u32 val; 1144 u32 val;
1145 bool flag; 1145 bool flag;
1146 1146
1147 gk20a_dbg_fn(""); 1147 nvgpu_log_fn(g, " ");
1148 1148
1149 val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r()); 1149 val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r());
1150 flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0; 1150 flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0;
@@ -1190,7 +1190,7 @@ static void gr_gv11b_set_skedcheck(struct gk20a *g, u32 data)
1190 1190
1191static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data) 1191static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data)
1192{ 1192{
1193 gk20a_dbg_fn(""); 1193 nvgpu_log_fn(g, " ");
1194 1194
1195 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { 1195 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) {
1196 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), 1196 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(),
@@ -1224,7 +1224,7 @@ static void gr_gv11b_set_shader_cut_collector(struct gk20a *g, u32 data)
1224int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, 1224int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
1225 u32 class_num, u32 offset, u32 data) 1225 u32 class_num, u32 offset, u32 data)
1226{ 1226{
1227 gk20a_dbg_fn(""); 1227 nvgpu_log_fn(g, " ");
1228 1228
1229 if (class_num == VOLTA_COMPUTE_A) { 1229 if (class_num == VOLTA_COMPUTE_A) {
1230 switch (offset << 2) { 1230 switch (offset << 2) {
@@ -1315,7 +1315,7 @@ void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
1315 u32 pd_ab_max_output; 1315 u32 pd_ab_max_output;
1316 u32 alpha_cb_size = data * 4; 1316 u32 alpha_cb_size = data * 4;
1317 1317
1318 gk20a_dbg_fn(""); 1318 nvgpu_log_fn(g, " ");
1319 1319
1320 if (alpha_cb_size > gr->alpha_cb_size) 1320 if (alpha_cb_size > gr->alpha_cb_size)
1321 alpha_cb_size = gr->alpha_cb_size; 1321 alpha_cb_size = gr->alpha_cb_size;
@@ -1360,7 +1360,7 @@ void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data)
1360 u32 gpc_index, ppc_index, stride, val; 1360 u32 gpc_index, ppc_index, stride, val;
1361 u32 cb_size_steady = data * 4, cb_size; 1361 u32 cb_size_steady = data * 4, cb_size;
1362 1362
1363 gk20a_dbg_fn(""); 1363 nvgpu_log_fn(g, " ");
1364 1364
1365 if (cb_size_steady > gr->attrib_cb_size) 1365 if (cb_size_steady > gr->attrib_cb_size)
1366 cb_size_steady = gr->attrib_cb_size; 1366 cb_size_steady = gr->attrib_cb_size;
@@ -1423,8 +1423,9 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size,
1423 struct nvgpu_mem *mem) 1423 struct nvgpu_mem *mem)
1424{ 1424{
1425 int err; 1425 int err;
1426 struct gk20a *g = gk20a_from_vm(vm);
1426 1427
1427 gk20a_dbg_fn(""); 1428 nvgpu_log_fn(g, " ");
1428 1429
1429 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); 1430 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
1430 if (err) 1431 if (err)
@@ -1500,9 +1501,9 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g,
1500 g->gr.max_tpc_count; 1501 g->gr.max_tpc_count;
1501 attrib_cb_size = ALIGN(attrib_cb_size, 128); 1502 attrib_cb_size = ALIGN(attrib_cb_size, 128);
1502 1503
1503 gk20a_dbg_info("gfxp context spill_size=%d", spill_size); 1504 nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
1504 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 1505 nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
1505 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 1506 nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
1506 attrib_cb_size); 1507 attrib_cb_size);
1507 1508
1508 err = gr_gp10b_alloc_buffer(vm, 1509 err = gr_gp10b_alloc_buffer(vm,
@@ -1590,7 +1591,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1590 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); 1591 ctxsw_prog_main_image_compute_preemption_options_control_cta_f();
1591 int err; 1592 int err;
1592 1593
1593 gk20a_dbg_fn(""); 1594 nvgpu_log_fn(g, " ");
1594 1595
1595 tsg = tsg_gk20a_from_ch(c); 1596 tsg = tsg_gk20a_from_ch(c);
1596 if (!tsg) 1597 if (!tsg)
@@ -1600,7 +1601,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1600 1601
1601 if (gr_ctx->graphics_preempt_mode == 1602 if (gr_ctx->graphics_preempt_mode ==
1602 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { 1603 NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
1603 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1604 nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option);
1604 nvgpu_mem_wr(g, mem, 1605 nvgpu_mem_wr(g, mem,
1605 ctxsw_prog_main_image_graphics_preemption_options_o(), 1606 ctxsw_prog_main_image_graphics_preemption_options_o(),
1606 gfxp_preempt_option); 1607 gfxp_preempt_option);
@@ -1608,7 +1609,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1608 1609
1609 if (gr_ctx->compute_preempt_mode == 1610 if (gr_ctx->compute_preempt_mode ==
1610 NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { 1611 NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
1611 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1612 nvgpu_log_info(g, "CILP: %x", cilp_preempt_option);
1612 nvgpu_mem_wr(g, mem, 1613 nvgpu_mem_wr(g, mem,
1613 ctxsw_prog_main_image_compute_preemption_options_o(), 1614 ctxsw_prog_main_image_compute_preemption_options_o(),
1614 cilp_preempt_option); 1615 cilp_preempt_option);
@@ -1616,7 +1617,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1616 1617
1617 if (gr_ctx->compute_preempt_mode == 1618 if (gr_ctx->compute_preempt_mode ==
1618 NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 1619 NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
1619 gk20a_dbg_info("CTA: %x", cta_preempt_option); 1620 nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
1620 nvgpu_mem_wr(g, mem, 1621 nvgpu_mem_wr(g, mem,
1621 ctxsw_prog_main_image_compute_preemption_options_o(), 1622 ctxsw_prog_main_image_compute_preemption_options_o(),
1622 cta_preempt_option); 1623 cta_preempt_option);
@@ -1647,7 +1648,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1647 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << 1648 (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) <<
1648 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 1649 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
1649 1650
1650 gk20a_dbg_info("attrib cb addr : 0x%016x", addr); 1651 nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr);
1651 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); 1652 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true);
1652 1653
1653 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> 1654 addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >>
@@ -1698,7 +1699,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
1698 } 1699 }
1699 1700
1700out: 1701out:
1701 gk20a_dbg_fn("done"); 1702 nvgpu_log_fn(g, "done");
1702} 1703}
1703static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g, 1704static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g,
1704 struct gk20a_debug_output *o, 1705 struct gk20a_debug_output *o,
@@ -1949,7 +1950,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1949 u32 activity0, activity1, activity2, activity4; 1950 u32 activity0, activity1, activity2, activity4;
1950 struct nvgpu_timeout timeout; 1951 struct nvgpu_timeout timeout;
1951 1952
1952 gk20a_dbg_fn(""); 1953 nvgpu_log_fn(g, " ");
1953 1954
1954 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 1955 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
1955 1956
@@ -1974,7 +1975,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1974 gr_activity_empty_or_preempted(activity4)); 1975 gr_activity_empty_or_preempted(activity4));
1975 1976
1976 if (!gr_enabled || (!gr_busy && !ctxsw_active)) { 1977 if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
1977 gk20a_dbg_fn("done"); 1978 nvgpu_log_fn(g, "done");
1978 return 0; 1979 return 0;
1979 } 1980 }
1980 1981
@@ -2191,7 +2192,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2191 NVGPU_PREEMPTION_MODE_COMPUTE_CILP); 2192 NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
2192 } 2193 }
2193 2194
2194 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2195 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2195 "SM Exception received on gpc %d tpc %d sm %d = 0x%08x", 2196 "SM Exception received on gpc %d tpc %d sm %d = 0x%08x",
2196 gpc, tpc, sm, global_esr); 2197 gpc, tpc, sm, global_esr);
2197 2198
@@ -2210,13 +2211,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2210 if (warp_esr != 0 || (global_esr & global_mask) != 0) { 2211 if (warp_esr != 0 || (global_esr & global_mask) != 0) {
2211 *ignore_debugger = true; 2212 *ignore_debugger = true;
2212 2213
2213 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2214 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2214 "CILP: starting wait for LOCKED_DOWN on " 2215 "CILP: starting wait for LOCKED_DOWN on "
2215 "gpc %d tpc %d sm %d", 2216 "gpc %d tpc %d sm %d",
2216 gpc, tpc, sm); 2217 gpc, tpc, sm);
2217 2218
2218 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { 2219 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) {
2219 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2220 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2220 "CILP: Broadcasting STOP_TRIGGER from " 2221 "CILP: Broadcasting STOP_TRIGGER from "
2221 "gpc %d tpc %d sm %d", 2222 "gpc %d tpc %d sm %d",
2222 gpc, tpc, sm); 2223 gpc, tpc, sm);
@@ -2225,7 +2226,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2225 2226
2226 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); 2227 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch);
2227 } else { 2228 } else {
2228 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2229 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2229 "CILP: STOP_TRIGGER from " 2230 "CILP: STOP_TRIGGER from "
2230 "gpc %d tpc %d sm %d", 2231 "gpc %d tpc %d sm %d",
2231 gpc, tpc, sm); 2232 gpc, tpc, sm);
@@ -2238,12 +2239,12 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2238 gpc, tpc, sm); 2239 gpc, tpc, sm);
2239 g->ops.gr.clear_sm_hww(g, 2240 g->ops.gr.clear_sm_hww(g,
2240 gpc, tpc, sm, global_esr_copy); 2241 gpc, tpc, sm, global_esr_copy);
2241 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2242 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2242 "CILP: HWWs cleared for " 2243 "CILP: HWWs cleared for "
2243 "gpc %d tpc %d sm %d", 2244 "gpc %d tpc %d sm %d",
2244 gpc, tpc, sm); 2245 gpc, tpc, sm);
2245 2246
2246 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); 2247 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
2247 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); 2248 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
2248 if (ret) { 2249 if (ret) {
2249 nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); 2250 nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
@@ -2252,7 +2253,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2252 2253
2253 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); 2254 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
2254 if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { 2255 if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) {
2255 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2256 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2256 "CILP: clearing SINGLE_STEP_MODE " 2257 "CILP: clearing SINGLE_STEP_MODE "
2257 "before resume for gpc %d tpc %d sm %d", 2258 "before resume for gpc %d tpc %d sm %d",
2258 gpc, tpc, sm); 2259 gpc, tpc, sm);
@@ -2262,13 +2263,13 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
2262 gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); 2263 gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0);
2263 } 2264 }
2264 2265
2265 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2266 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2266 "CILP: resume for gpc %d tpc %d sm %d", 2267 "CILP: resume for gpc %d tpc %d sm %d",
2267 gpc, tpc, sm); 2268 gpc, tpc, sm);
2268 g->ops.gr.resume_single_sm(g, gpc, tpc, sm); 2269 g->ops.gr.resume_single_sm(g, gpc, tpc, sm);
2269 2270
2270 *ignore_debugger = true; 2271 *ignore_debugger = true;
2271 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2272 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2272 "CILP: All done on gpc %d, tpc %d sm %d", 2273 "CILP: All done on gpc %d, tpc %d sm %d",
2273 gpc, tpc, sm); 2274 gpc, tpc, sm);
2274 } 2275 }
@@ -2388,7 +2389,7 @@ int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr)
2388 GPU_LIT_NUM_TPC_PER_GPC); 2389 GPU_LIT_NUM_TPC_PER_GPC);
2389 u32 num_tpcs = num_gpcs * num_tpc_per_gpc; 2390 u32 num_tpcs = num_gpcs * num_tpc_per_gpc;
2390 2391
2391 gk20a_dbg_fn(""); 2392 nvgpu_log_fn(g, " ");
2392 2393
2393 if (!gr->map_tiles) 2394 if (!gr->map_tiles)
2394 return -1; 2395 return -1;
@@ -2535,7 +2536,7 @@ void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
2535{ 2536{
2536 u32 val, i, j; 2537 u32 val, i, j;
2537 2538
2538 gk20a_dbg_fn(""); 2539 nvgpu_log_fn(g, " ");
2539 2540
2540 for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) { 2541 for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) {
2541 val = 2542 val =
@@ -2666,8 +2667,9 @@ int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
2666 u32 addr_hi; 2667 u32 addr_hi;
2667 struct ctx_header_desc *ctx; 2668 struct ctx_header_desc *ctx;
2668 int err; 2669 int err;
2670 struct gk20a *g = c->g;
2669 2671
2670 gk20a_dbg_fn(""); 2672 nvgpu_log_fn(g, " ");
2671 2673
2672 err = gv11b_alloc_subctx_header(c); 2674 err = gv11b_alloc_subctx_header(c);
2673 if (err) 2675 if (err)
@@ -2704,7 +2706,7 @@ int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c)
2704 u32 pe_vaf; 2706 u32 pe_vaf;
2705 u32 pe_vsc_vpc; 2707 u32 pe_vsc_vpc;
2706 2708
2707 gk20a_dbg_fn(""); 2709 nvgpu_log_fn(g, " ");
2708 2710
2709 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); 2711 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r());
2710 ds_debug = gk20a_readl(g, gr_ds_debug_r()); 2712 ds_debug = gk20a_readl(g, gr_ds_debug_r());
@@ -2814,7 +2816,7 @@ void gr_gv11b_load_tpc_mask(struct gk20a *g)
2814 } 2816 }
2815 } 2817 }
2816 2818
2817 gk20a_dbg_info("pes_tpc_mask %u\n", pes_tpc_mask); 2819 nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask);
2818 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc); 2820 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc);
2819 if (g->tpc_fs_mask_user && 2821 if (g->tpc_fs_mask_user &&
2820 g->tpc_fs_mask_user != fuse_tpc_mask && 2822 g->tpc_fs_mask_user != fuse_tpc_mask &&
@@ -2860,7 +2862,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g)
2860 u32 ver = g->params.gpu_arch + g->params.gpu_impl; 2862 u32 ver = g->params.gpu_arch + g->params.gpu_impl;
2861 u32 rev = g->params.gpu_rev; 2863 u32 rev = g->params.gpu_rev;
2862 2864
2863 gk20a_dbg_fn(""); 2865 nvgpu_log_fn(g, " ");
2864 2866
2865 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); 2867 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r());
2866 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), 2868 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(),
@@ -2928,14 +2930,14 @@ void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
2928 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 2930 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
2929 2931
2930 reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset); 2932 reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset);
2931 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2933 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2932 "sm tpc esr sm sel reg val: 0x%x", reg_val); 2934 "sm tpc esr sm sel reg val: 0x%x", reg_val);
2933 *esr_sm_sel = 0; 2935 *esr_sm_sel = 0;
2934 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) 2936 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val))
2935 *esr_sm_sel = 1; 2937 *esr_sm_sel = 1;
2936 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) 2938 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val))
2937 *esr_sm_sel |= 1 << 1; 2939 *esr_sm_sel |= 1 << 1;
2938 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 2940 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2939 "esr_sm_sel bitmask: 0x%x", *esr_sm_sel); 2941 "esr_sm_sel bitmask: 0x%x", *esr_sm_sel);
2940} 2942}
2941 2943
@@ -2954,7 +2956,7 @@ int gv11b_gr_sm_trigger_suspend(struct gk20a *g)
2954 gk20a_writel(g, 2956 gk20a_writel(g,
2955 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); 2957 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0);
2956 2958
2957 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 2959 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
2958 "stop trigger enable: broadcast dbgr_control0: 0x%x ", 2960 "stop trigger enable: broadcast dbgr_control0: 0x%x ",
2959 dbgr_control0); 2961 dbgr_control0);
2960 2962
@@ -3012,19 +3014,19 @@ void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
3012 3014
3013 /* Only for debug purpose */ 3015 /* Only for debug purpose */
3014 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 3016 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
3015 gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", 3017 nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n",
3016 sm_id, w_state[sm_id].valid_warps[0]); 3018 sm_id, w_state[sm_id].valid_warps[0]);
3017 gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", 3019 nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n",
3018 sm_id, w_state[sm_id].valid_warps[1]); 3020 sm_id, w_state[sm_id].valid_warps[1]);
3019 3021
3020 gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", 3022 nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n",
3021 sm_id, w_state[sm_id].trapped_warps[0]); 3023 sm_id, w_state[sm_id].trapped_warps[0]);
3022 gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", 3024 nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n",
3023 sm_id, w_state[sm_id].trapped_warps[1]); 3025 sm_id, w_state[sm_id].trapped_warps[1]);
3024 3026
3025 gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", 3027 nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n",
3026 sm_id, w_state[sm_id].paused_warps[0]); 3028 sm_id, w_state[sm_id].paused_warps[0]);
3027 gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", 3029 nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n",
3028 sm_id, w_state[sm_id].paused_warps[1]); 3030 sm_id, w_state[sm_id].paused_warps[1]);
3029 } 3031 }
3030} 3032}
@@ -3257,7 +3259,7 @@ bool gv11b_gr_sm_debugger_attached(struct gk20a *g)
3257 */ 3259 */
3258 debugger_mode = 3260 debugger_mode =
3259 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0); 3261 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0);
3260 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3262 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3261 "SM Debugger Mode: %d", debugger_mode); 3263 "SM Debugger Mode: %d", debugger_mode);
3262 if (debugger_mode == 3264 if (debugger_mode ==
3263 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v()) 3265 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v())
@@ -3576,7 +3578,7 @@ static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g,
3576 dbgr_status0, dbgr_control0, warps_valid, 3578 dbgr_status0, dbgr_control0, warps_valid,
3577 warps_paused, warps_trapped); 3579 warps_paused, warps_trapped);
3578 else 3580 else
3579 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3581 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3580 "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " 3582 "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx "
3581 "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", 3583 "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n",
3582 dbgr_status0, dbgr_control0, warps_valid, 3584 dbgr_status0, dbgr_control0, warps_valid,
@@ -3598,7 +3600,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
3598 gk20a_gr_tpc_offset(g, tpc) + 3600 gk20a_gr_tpc_offset(g, tpc) +
3599 gv11b_gr_sm_offset(g, sm); 3601 gv11b_gr_sm_offset(g, sm);
3600 3602
3601 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3603 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3602 "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); 3604 "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm);
3603 3605
3604 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 3606 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
@@ -3642,7 +3644,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
3642 } 3644 }
3643 3645
3644 if (locked_down || no_error_pending) { 3646 if (locked_down || no_error_pending) {
3645 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3647 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3646 "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm); 3648 "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm);
3647 return 0; 3649 return 0;
3648 } 3650 }
@@ -3677,7 +3679,7 @@ int gv11b_gr_lock_down_sm(struct gk20a *g,
3677 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + 3679 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) +
3678 gv11b_gr_sm_offset(g, sm); 3680 gv11b_gr_sm_offset(g, sm);
3679 3681
3680 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 3682 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3681 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); 3683 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm);
3682 3684
3683 /* assert stop trigger */ 3685 /* assert stop trigger */
@@ -3699,13 +3701,13 @@ void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
3699 3701
3700 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, 3702 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset,
3701 global_esr); 3703 global_esr);
3702 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 3704 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
3703 "Cleared HWW global esr, current reg val: 0x%x", 3705 "Cleared HWW global esr, current reg val: 0x%x",
3704 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + 3706 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() +
3705 offset)); 3707 offset));
3706 3708
3707 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0); 3709 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0);
3708 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, 3710 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
3709 "Cleared HWW warp esr, current reg val: 0x%x", 3711 "Cleared HWW warp esr, current reg val: 0x%x",
3710 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + 3712 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() +
3711 offset)); 3713 offset));
@@ -4440,7 +4442,7 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr,
4440{ 4442{
4441 u32 gpc_addr; 4443 u32 gpc_addr;
4442 4444
4443 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 4445 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
4444 4446
4445 /* setup defaults */ 4447 /* setup defaults */
4446 *addr_type = CTXSW_ADDR_TYPE_SYS; 4448 *addr_type = CTXSW_ADDR_TYPE_SYS;
@@ -4591,12 +4593,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4591 t = 0; 4593 t = 0;
4592 *num_registers = 0; 4594 *num_registers = 0;
4593 4595
4594 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 4596 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
4595 4597
4596 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 4598 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type,
4597 &gpc_num, &tpc_num, &ppc_num, &be_num, 4599 &gpc_num, &tpc_num, &ppc_num, &be_num,
4598 &broadcast_flags); 4600 &broadcast_flags);
4599 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); 4601 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type);
4600 if (err) 4602 if (err)
4601 return err; 4603 return err;
4602 4604
@@ -4690,7 +4692,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4690 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || 4692 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) ||
4691 (addr_type == CTXSW_ADDR_TYPE_ETPC)) && 4693 (addr_type == CTXSW_ADDR_TYPE_ETPC)) &&
4692 g->ops.gr.egpc_etpc_priv_addr_table) { 4694 g->ops.gr.egpc_etpc_priv_addr_table) {
4693 gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); 4695 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC");
4694 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, 4696 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num,
4695 broadcast_flags, priv_addr_table, &t); 4697 broadcast_flags, priv_addr_table, &t);
4696 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { 4698 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) {
diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
index 9f6d176e..9f9ff337 100644
--- a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
@@ -56,7 +56,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g)
56 u32 ltc_intr; 56 u32 ltc_intr;
57 u32 reg; 57 u32 reg;
58 58
59 gk20a_dbg_info("initialize gv11b l2"); 59 nvgpu_log_info(g, "initialize gv11b l2");
60 60
61 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | 61 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
62 mc_enable_l2_enabled_f()); 62 mc_enable_l2_enabled_f());
@@ -67,7 +67,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g)
67 67
68 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); 68 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
69 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); 69 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
70 gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count); 70 nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count);
71 71
72 /* Disable LTC interrupts */ 72 /* Disable LTC interrupts */
73 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); 73 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index b46ecb0a..f4084ad6 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -54,7 +54,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
54{ 54{
55 struct gk20a *g = gk20a_from_vm(vm); 55 struct gk20a *g = gk20a_from_vm(vm);
56 56
57 gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", 57 nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p",
58 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); 58 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va);
59 59
60 g->ops.mm.init_pdb(g, inst_block, vm); 60 g->ops.mm.init_pdb(g, inst_block, vm);
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
index c1b519d0..3f0e2f22 100644
--- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
@@ -37,8 +37,8 @@
37 37
38#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h> 38#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
39 39
40#define gv11b_dbg_pmu(fmt, arg...) \ 40#define gv11b_dbg_pmu(g, fmt, arg...) \
41 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 41 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
42 42
43#define ALIGN_4KB 12 43#define ALIGN_4KB 12
44 44
@@ -121,7 +121,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g)
121 u32 reg_writes; 121 u32 reg_writes;
122 u32 index; 122 u32 index;
123 123
124 gk20a_dbg_fn(""); 124 nvgpu_log_fn(g, " ");
125 125
126 if (g->elpg_enabled) { 126 if (g->elpg_enabled) {
127 reg_writes = ((sizeof(_pginitseq_gv11b) / 127 reg_writes = ((sizeof(_pginitseq_gv11b) /
@@ -133,7 +133,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g)
133 } 133 }
134 } 134 }
135 135
136 gk20a_dbg_fn("done"); 136 nvgpu_log_fn(g, "done");
137 return ret; 137 return ret;
138} 138}
139 139
@@ -187,7 +187,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
187 u64 addr_code_hi, addr_data_hi; 187 u64 addr_code_hi, addr_data_hi;
188 u32 i, blocks, addr_args; 188 u32 i, blocks, addr_args;
189 189
190 gk20a_dbg_fn(""); 190 nvgpu_log_fn(g, " ");
191 191
192 gk20a_writel(g, pwr_falcon_itfen_r(), 192 gk20a_writel(g, pwr_falcon_itfen_r(),
193 gk20a_readl(g, pwr_falcon_itfen_r()) | 193 gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -407,28 +407,28 @@ u32 gv11b_pmu_get_irqdest(struct gk20a *g)
407static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, 407static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
408 void *param, u32 handle, u32 status) 408 void *param, u32 handle, u32 status)
409{ 409{
410 gk20a_dbg_fn(""); 410 nvgpu_log_fn(g, " ");
411 411
412 if (status != 0) { 412 if (status != 0) {
413 nvgpu_err(g, "Sub-feature mask update cmd aborted\n"); 413 nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
414 return; 414 return;
415 } 415 }
416 416
417 gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n", 417 gv11b_dbg_pmu(g, "sub-feature mask update is acknowledged from PMU %x\n",
418 msg->msg.pg.msg_type); 418 msg->msg.pg.msg_type);
419} 419}
420 420
421static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, 421static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
422 void *param, u32 handle, u32 status) 422 void *param, u32 handle, u32 status)
423{ 423{
424 gk20a_dbg_fn(""); 424 nvgpu_log_fn(g, " ");
425 425
426 if (status != 0) { 426 if (status != 0) {
427 nvgpu_err(g, "GR PARAM cmd aborted\n"); 427 nvgpu_err(g, "GR PARAM cmd aborted\n");
428 return; 428 return;
429 } 429 }
430 430
431 gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n", 431 gv11b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x\n",
432 msg->msg.pg.msg_type); 432 msg->msg.pg.msg_type);
433} 433}
434 434
@@ -450,7 +450,7 @@ int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
450 cmd.cmd.pg.gr_init_param_v1.featuremask = 450 cmd.cmd.pg.gr_init_param_v1.featuremask =
451 NVGPU_PMU_GR_FEATURE_MASK_ALL; 451 NVGPU_PMU_GR_FEATURE_MASK_ALL;
452 452
453 gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); 453 gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n");
454 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 454 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
455 pmu_handle_pg_param_msg, pmu, &seq, ~0); 455 pmu_handle_pg_param_msg, pmu, &seq, ~0);
456 456
@@ -488,7 +488,7 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
488 NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC | 488 NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC |
489 NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG; 489 NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG;
490 490
491 gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); 491 gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n");
492 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 492 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
493 pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0); 493 pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0);
494 } else 494 } else
diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
index 067c464b..961ab5c0 100644
--- a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
@@ -34,7 +34,7 @@ int gv11b_init_therm_setup_hw(struct gk20a *g)
34{ 34{
35 u32 v; 35 u32 v;
36 36
37 gk20a_dbg_fn(""); 37 nvgpu_log_fn(g, " ");
38 38
39 /* program NV_THERM registers */ 39 /* program NV_THERM registers */
40 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | 40 gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() |
@@ -108,7 +108,7 @@ int gv11b_elcg_init_idle_filters(struct gk20a *g)
108 if (nvgpu_platform_is_simulation(g)) 108 if (nvgpu_platform_is_simulation(g))
109 return 0; 109 return 0;
110 110
111 gk20a_dbg_info("init clock/power gate reg"); 111 nvgpu_log_info(g, "init clock/power gate reg");
112 112
113 for (engine_id = 0; engine_id < f->num_engines; engine_id++) { 113 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
114 active_engine_id = f->active_engines_list[engine_id]; 114 active_engine_id = f->active_engines_list[engine_id];