summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-30 10:44:03 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:19 -0400
commit3ba374a5d94f8c2067731155afaf79f03e6c390c (patch)
treed8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff)
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c329
1 files changed, 163 insertions, 166 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index af02491e..06374fb7 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -33,6 +33,7 @@
33#include <nvgpu/kmem.h> 33#include <nvgpu/kmem.h>
34#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/nvgpu_common.h> 35#include <nvgpu/nvgpu_common.h>
36#include <nvgpu/log.h>
36 37
37#include "gk20a.h" 38#include "gk20a.h"
38#include "kind_gk20a.h" 39#include "kind_gk20a.h"
@@ -126,81 +127,81 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
126{ 127{
127 unsigned int i; 128 unsigned int i;
128 129
129 gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d", 130 nvgpu_err(g, "gr_fecs_os_r : %d",
130 gk20a_readl(g, gr_fecs_os_r())); 131 gk20a_readl(g, gr_fecs_os_r()));
131 gk20a_err(dev_from_gk20a(g), "gr_fecs_cpuctl_r : 0x%x", 132 nvgpu_err(g, "gr_fecs_cpuctl_r : 0x%x",
132 gk20a_readl(g, gr_fecs_cpuctl_r())); 133 gk20a_readl(g, gr_fecs_cpuctl_r()));
133 gk20a_err(dev_from_gk20a(g), "gr_fecs_idlestate_r : 0x%x", 134 nvgpu_err(g, "gr_fecs_idlestate_r : 0x%x",
134 gk20a_readl(g, gr_fecs_idlestate_r())); 135 gk20a_readl(g, gr_fecs_idlestate_r()));
135 gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox0_r : 0x%x", 136 nvgpu_err(g, "gr_fecs_mailbox0_r : 0x%x",
136 gk20a_readl(g, gr_fecs_mailbox0_r())); 137 gk20a_readl(g, gr_fecs_mailbox0_r()));
137 gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox1_r : 0x%x", 138 nvgpu_err(g, "gr_fecs_mailbox1_r : 0x%x",
138 gk20a_readl(g, gr_fecs_mailbox1_r())); 139 gk20a_readl(g, gr_fecs_mailbox1_r()));
139 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqstat_r : 0x%x", 140 nvgpu_err(g, "gr_fecs_irqstat_r : 0x%x",
140 gk20a_readl(g, gr_fecs_irqstat_r())); 141 gk20a_readl(g, gr_fecs_irqstat_r()));
141 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmode_r : 0x%x", 142 nvgpu_err(g, "gr_fecs_irqmode_r : 0x%x",
142 gk20a_readl(g, gr_fecs_irqmode_r())); 143 gk20a_readl(g, gr_fecs_irqmode_r()));
143 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmask_r : 0x%x", 144 nvgpu_err(g, "gr_fecs_irqmask_r : 0x%x",
144 gk20a_readl(g, gr_fecs_irqmask_r())); 145 gk20a_readl(g, gr_fecs_irqmask_r()));
145 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqdest_r : 0x%x", 146 nvgpu_err(g, "gr_fecs_irqdest_r : 0x%x",
146 gk20a_readl(g, gr_fecs_irqdest_r())); 147 gk20a_readl(g, gr_fecs_irqdest_r()));
147 gk20a_err(dev_from_gk20a(g), "gr_fecs_debug1_r : 0x%x", 148 nvgpu_err(g, "gr_fecs_debug1_r : 0x%x",
148 gk20a_readl(g, gr_fecs_debug1_r())); 149 gk20a_readl(g, gr_fecs_debug1_r()));
149 gk20a_err(dev_from_gk20a(g), "gr_fecs_debuginfo_r : 0x%x", 150 nvgpu_err(g, "gr_fecs_debuginfo_r : 0x%x",
150 gk20a_readl(g, gr_fecs_debuginfo_r())); 151 gk20a_readl(g, gr_fecs_debuginfo_r()));
151 152
152 for (i = 0; i < gr_fecs_ctxsw_mailbox__size_1_v(); i++) 153 for (i = 0; i < gr_fecs_ctxsw_mailbox__size_1_v(); i++)
153 gk20a_err(dev_from_gk20a(g), "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", 154 nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
154 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i))); 155 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
155 156
156 gk20a_err(dev_from_gk20a(g), "gr_fecs_engctl_r : 0x%x", 157 nvgpu_err(g, "gr_fecs_engctl_r : 0x%x",
157 gk20a_readl(g, gr_fecs_engctl_r())); 158 gk20a_readl(g, gr_fecs_engctl_r()));
158 gk20a_err(dev_from_gk20a(g), "gr_fecs_curctx_r : 0x%x", 159 nvgpu_err(g, "gr_fecs_curctx_r : 0x%x",
159 gk20a_readl(g, gr_fecs_curctx_r())); 160 gk20a_readl(g, gr_fecs_curctx_r()));
160 gk20a_err(dev_from_gk20a(g), "gr_fecs_nxtctx_r : 0x%x", 161 nvgpu_err(g, "gr_fecs_nxtctx_r : 0x%x",
161 gk20a_readl(g, gr_fecs_nxtctx_r())); 162 gk20a_readl(g, gr_fecs_nxtctx_r()));
162 163
163 gk20a_writel(g, gr_fecs_icd_cmd_r(), 164 gk20a_writel(g, gr_fecs_icd_cmd_r(),
164 gr_fecs_icd_cmd_opc_rreg_f() | 165 gr_fecs_icd_cmd_opc_rreg_f() |
165 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); 166 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
166 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_IMB : 0x%x", 167 nvgpu_err(g, "FECS_FALCON_REG_IMB : 0x%x",
167 gk20a_readl(g, gr_fecs_icd_rdata_r())); 168 gk20a_readl(g, gr_fecs_icd_rdata_r()));
168 169
169 gk20a_writel(g, gr_fecs_icd_cmd_r(), 170 gk20a_writel(g, gr_fecs_icd_cmd_r(),
170 gr_fecs_icd_cmd_opc_rreg_f() | 171 gr_fecs_icd_cmd_opc_rreg_f() |
171 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); 172 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
172 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_DMB : 0x%x", 173 nvgpu_err(g, "FECS_FALCON_REG_DMB : 0x%x",
173 gk20a_readl(g, gr_fecs_icd_rdata_r())); 174 gk20a_readl(g, gr_fecs_icd_rdata_r()));
174 175
175 gk20a_writel(g, gr_fecs_icd_cmd_r(), 176 gk20a_writel(g, gr_fecs_icd_cmd_r(),
176 gr_fecs_icd_cmd_opc_rreg_f() | 177 gr_fecs_icd_cmd_opc_rreg_f() |
177 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); 178 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
178 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CSW : 0x%x", 179 nvgpu_err(g, "FECS_FALCON_REG_CSW : 0x%x",
179 gk20a_readl(g, gr_fecs_icd_rdata_r())); 180 gk20a_readl(g, gr_fecs_icd_rdata_r()));
180 181
181 gk20a_writel(g, gr_fecs_icd_cmd_r(), 182 gk20a_writel(g, gr_fecs_icd_cmd_r(),
182 gr_fecs_icd_cmd_opc_rreg_f() | 183 gr_fecs_icd_cmd_opc_rreg_f() |
183 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); 184 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
184 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CTX : 0x%x", 185 nvgpu_err(g, "FECS_FALCON_REG_CTX : 0x%x",
185 gk20a_readl(g, gr_fecs_icd_rdata_r())); 186 gk20a_readl(g, gr_fecs_icd_rdata_r()));
186 187
187 gk20a_writel(g, gr_fecs_icd_cmd_r(), 188 gk20a_writel(g, gr_fecs_icd_cmd_r(),
188 gr_fecs_icd_cmd_opc_rreg_f() | 189 gr_fecs_icd_cmd_opc_rreg_f() |
189 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); 190 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
190 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_EXCI : 0x%x", 191 nvgpu_err(g, "FECS_FALCON_REG_EXCI : 0x%x",
191 gk20a_readl(g, gr_fecs_icd_rdata_r())); 192 gk20a_readl(g, gr_fecs_icd_rdata_r()));
192 193
193 for (i = 0; i < 4; i++) { 194 for (i = 0; i < 4; i++) {
194 gk20a_writel(g, gr_fecs_icd_cmd_r(), 195 gk20a_writel(g, gr_fecs_icd_cmd_r(),
195 gr_fecs_icd_cmd_opc_rreg_f() | 196 gr_fecs_icd_cmd_opc_rreg_f() |
196 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC)); 197 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC));
197 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_PC : 0x%x", 198 nvgpu_err(g, "FECS_FALCON_REG_PC : 0x%x",
198 gk20a_readl(g, gr_fecs_icd_rdata_r())); 199 gk20a_readl(g, gr_fecs_icd_rdata_r()));
199 200
200 gk20a_writel(g, gr_fecs_icd_cmd_r(), 201 gk20a_writel(g, gr_fecs_icd_cmd_r(),
201 gr_fecs_icd_cmd_opc_rreg_f() | 202 gr_fecs_icd_cmd_opc_rreg_f() |
202 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP)); 203 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP));
203 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_SP : 0x%x", 204 nvgpu_err(g, "FECS_FALCON_REG_SP : 0x%x",
204 gk20a_readl(g, gr_fecs_icd_rdata_r())); 205 gk20a_readl(g, gr_fecs_icd_rdata_r()));
205 } 206 }
206} 207}
@@ -373,7 +374,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
373 374
374 } while (!nvgpu_timeout_expired(&timeout)); 375 } while (!nvgpu_timeout_expired(&timeout));
375 376
376 gk20a_err(dev_from_gk20a(g), 377 nvgpu_err(g,
377 "timeout, ctxsw busy : %d, gr busy : %d", 378 "timeout, ctxsw busy : %d, gr busy : %d",
378 ctxsw_active, gr_busy); 379 ctxsw_active, gr_busy);
379 380
@@ -408,7 +409,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
408 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 409 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
409 } while (!nvgpu_timeout_expired(&timeout)); 410 } while (!nvgpu_timeout_expired(&timeout));
410 411
411 gk20a_err(dev_from_gk20a(g), 412 nvgpu_err(g,
412 "timeout, fe busy : %x", val); 413 "timeout, fe busy : %x", val);
413 414
414 return -EAGAIN; 415 return -EAGAIN;
@@ -466,7 +467,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
466 /* do no success check */ 467 /* do no success check */
467 break; 468 break;
468 default: 469 default:
469 gk20a_err(dev_from_gk20a(g), 470 nvgpu_err(g,
470 "invalid success opcode 0x%x", opc_success); 471 "invalid success opcode 0x%x", opc_success);
471 472
472 check = WAIT_UCODE_ERROR; 473 check = WAIT_UCODE_ERROR;
@@ -498,7 +499,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
498 /* do no check on fail*/ 499 /* do no check on fail*/
499 break; 500 break;
500 default: 501 default:
501 gk20a_err(dev_from_gk20a(g), 502 nvgpu_err(g,
502 "invalid fail opcode 0x%x", opc_fail); 503 "invalid fail opcode 0x%x", opc_fail);
503 check = WAIT_UCODE_ERROR; 504 check = WAIT_UCODE_ERROR;
504 break; 505 break;
@@ -512,13 +513,13 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
512 } 513 }
513 514
514 if (check == WAIT_UCODE_TIMEOUT) { 515 if (check == WAIT_UCODE_TIMEOUT) {
515 gk20a_err(dev_from_gk20a(g), 516 nvgpu_err(g,
516 "timeout waiting on ucode response"); 517 "timeout waiting on ucode response");
517 gk20a_fecs_dump_falcon_stats(g); 518 gk20a_fecs_dump_falcon_stats(g);
518 gk20a_gr_debug_dump(g->dev); 519 gk20a_gr_debug_dump(g->dev);
519 return -1; 520 return -1;
520 } else if (check == WAIT_UCODE_ERROR) { 521 } else if (check == WAIT_UCODE_ERROR) {
521 gk20a_err(dev_from_gk20a(g), 522 nvgpu_err(g,
522 "ucode method failed on mailbox=%d value=0x%08x", 523 "ucode method failed on mailbox=%d value=0x%08x",
523 mailbox_id, reg); 524 mailbox_id, reg);
524 gk20a_fecs_dump_falcon_stats(g); 525 gk20a_fecs_dump_falcon_stats(g);
@@ -735,7 +736,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
735 .cond.ok = GR_IS_UCODE_OP_AND, 736 .cond.ok = GR_IS_UCODE_OP_AND,
736 .cond.fail = GR_IS_UCODE_OP_AND}, true); 737 .cond.fail = GR_IS_UCODE_OP_AND}, true);
737 if (ret) 738 if (ret)
738 gk20a_err(dev_from_gk20a(g), 739 nvgpu_err(g,
739 "bind channel instance failed"); 740 "bind channel instance failed");
740 741
741 return ret; 742 return ret;
@@ -786,13 +787,13 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
786 787
787 ret = gk20a_disable_channel_tsg(g, c); 788 ret = gk20a_disable_channel_tsg(g, c);
788 if (ret) { 789 if (ret) {
789 gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); 790 nvgpu_err(g, "failed to disable channel/TSG\n");
790 goto clean_up; 791 goto clean_up;
791 } 792 }
792 ret = gk20a_fifo_preempt(g, c); 793 ret = gk20a_fifo_preempt(g, c);
793 if (ret) { 794 if (ret) {
794 gk20a_enable_channel_tsg(g, c); 795 gk20a_enable_channel_tsg(g, c);
795 gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); 796 nvgpu_err(g, "failed to preempt channel/TSG\n");
796 goto clean_up; 797 goto clean_up;
797 } 798 }
798 799
@@ -1493,7 +1494,7 @@ static int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type)
1493 }, true); 1494 }, true);
1494 1495
1495 if (ret) 1496 if (ret)
1496 gk20a_err(dev_from_gk20a(g), "save context image failed"); 1497 nvgpu_err(g, "save context image failed");
1497 1498
1498 return ret; 1499 return ret;
1499} 1500}
@@ -1821,7 +1822,7 @@ restore_fe_go_idle:
1821 1822
1822clean_up: 1823clean_up:
1823 if (err) 1824 if (err)
1824 gk20a_err(dev_from_gk20a(g), "fail"); 1825 nvgpu_err(g, "fail");
1825 else 1826 else
1826 gk20a_dbg_fn("done"); 1827 gk20a_dbg_fn("done");
1827 1828
@@ -1844,7 +1845,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1844 gk20a_dbg_fn(""); 1845 gk20a_dbg_fn("");
1845 1846
1846 if (!ch_ctx->gr_ctx) { 1847 if (!ch_ctx->gr_ctx) {
1847 gk20a_err(dev_from_gk20a(g), "no graphics context allocated"); 1848 nvgpu_err(g, "no graphics context allocated");
1848 return -EFAULT; 1849 return -EFAULT;
1849 } 1850 }
1850 1851
@@ -1852,13 +1853,13 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1852 1853
1853 ret = gk20a_disable_channel_tsg(g, c); 1854 ret = gk20a_disable_channel_tsg(g, c);
1854 if (ret) { 1855 if (ret) {
1855 gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); 1856 nvgpu_err(g, "failed to disable channel/TSG\n");
1856 goto out; 1857 goto out;
1857 } 1858 }
1858 ret = gk20a_fifo_preempt(g, c); 1859 ret = gk20a_fifo_preempt(g, c);
1859 if (ret) { 1860 if (ret) {
1860 gk20a_enable_channel_tsg(g, c); 1861 gk20a_enable_channel_tsg(g, c);
1861 gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); 1862 nvgpu_err(g, "failed to preempt channel/TSG\n");
1862 goto out; 1863 goto out;
1863 } 1864 }
1864 1865
@@ -1904,7 +1905,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1904 gk20a_dbg_fn(""); 1905 gk20a_dbg_fn("");
1905 1906
1906 if (!ch_ctx->gr_ctx) { 1907 if (!ch_ctx->gr_ctx) {
1907 gk20a_err(dev_from_gk20a(g), "no graphics context allocated"); 1908 nvgpu_err(g, "no graphics context allocated");
1908 return -EFAULT; 1909 return -EFAULT;
1909 } 1910 }
1910 1911
@@ -1920,14 +1921,14 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1920 1921
1921 ret = gk20a_disable_channel_tsg(g, c); 1922 ret = gk20a_disable_channel_tsg(g, c);
1922 if (ret) { 1923 if (ret) {
1923 gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); 1924 nvgpu_err(g, "failed to disable channel/TSG\n");
1924 return ret; 1925 return ret;
1925 } 1926 }
1926 1927
1927 ret = gk20a_fifo_preempt(g, c); 1928 ret = gk20a_fifo_preempt(g, c);
1928 if (ret) { 1929 if (ret) {
1929 gk20a_enable_channel_tsg(g, c); 1930 gk20a_enable_channel_tsg(g, c);
1930 gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); 1931 nvgpu_err(g, "failed to preempt channel/TSG\n");
1931 return ret; 1932 return ret;
1932 } 1933 }
1933 1934
@@ -1944,7 +1945,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1944 &pm_ctx->mem); 1945 &pm_ctx->mem);
1945 if (ret) { 1946 if (ret) {
1946 c->g->ops.fifo.enable_channel(c); 1947 c->g->ops.fifo.enable_channel(c);
1947 gk20a_err(dev_from_gk20a(g), 1948 nvgpu_err(g,
1948 "failed to allocate pm ctxt buffer"); 1949 "failed to allocate pm ctxt buffer");
1949 return ret; 1950 return ret;
1950 } 1951 }
@@ -1956,7 +1957,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1956 gk20a_mem_flag_none, true, 1957 gk20a_mem_flag_none, true,
1957 pm_ctx->mem.aperture); 1958 pm_ctx->mem.aperture);
1958 if (!pm_ctx->mem.gpu_va) { 1959 if (!pm_ctx->mem.gpu_va) {
1959 gk20a_err(dev_from_gk20a(g), 1960 nvgpu_err(g,
1960 "failed to map pm ctxt buffer"); 1961 "failed to map pm ctxt buffer");
1961 nvgpu_dma_free(g, &pm_ctx->mem); 1962 nvgpu_dma_free(g, &pm_ctx->mem);
1962 c->g->ops.fifo.enable_channel(c); 1963 c->g->ops.fifo.enable_channel(c);
@@ -2152,7 +2153,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2152 */ 2153 */
2153 if (ch_ctx->pm_ctx.pm_mode == ctxsw_prog_main_image_pm_mode_ctxsw_f()) { 2154 if (ch_ctx->pm_ctx.pm_mode == ctxsw_prog_main_image_pm_mode_ctxsw_f()) {
2154 if (ch_ctx->pm_ctx.mem.gpu_va == 0) { 2155 if (ch_ctx->pm_ctx.mem.gpu_va == 0) {
2155 gk20a_err(dev_from_gk20a(g), 2156 nvgpu_err(g,
2156 "context switched pm with no pm buffer!"); 2157 "context switched pm with no pm buffer!");
2157 nvgpu_mem_end(g, mem); 2158 nvgpu_mem_end(g, mem);
2158 return -EFAULT; 2159 return -EFAULT;
@@ -2201,7 +2202,6 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2201{ 2202{
2202 struct mm_gk20a *mm = &g->mm; 2203 struct mm_gk20a *mm = &g->mm;
2203 struct vm_gk20a *vm = &mm->pmu.vm; 2204 struct vm_gk20a *vm = &mm->pmu.vm;
2204 struct device *d = dev_from_gk20a(g);
2205 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info; 2205 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info;
2206 int err; 2206 int err;
2207 2207
@@ -2220,7 +2220,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2220 false, 2220 false,
2221 ucode_info->surface_desc.aperture); 2221 ucode_info->surface_desc.aperture);
2222 if (!ucode_info->surface_desc.gpu_va) { 2222 if (!ucode_info->surface_desc.gpu_va) {
2223 gk20a_err(d, "failed to update gmmu ptes\n"); 2223 nvgpu_err(g, "failed to update gmmu ptes\n");
2224 return -ENOMEM; 2224 return -ENOMEM;
2225 } 2225 }
2226 2226
@@ -2274,7 +2274,6 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2274 2274
2275int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) 2275int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2276{ 2276{
2277 struct device *d = dev_from_gk20a(g);
2278 struct mm_gk20a *mm = &g->mm; 2277 struct mm_gk20a *mm = &g->mm;
2279 struct vm_gk20a *vm = &mm->pmu.vm; 2278 struct vm_gk20a *vm = &mm->pmu.vm;
2280 struct gk20a_ctxsw_bootloader_desc *fecs_boot_desc; 2279 struct gk20a_ctxsw_bootloader_desc *fecs_boot_desc;
@@ -2289,7 +2288,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2289 2288
2290 fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0); 2289 fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0);
2291 if (!fecs_fw) { 2290 if (!fecs_fw) {
2292 gk20a_err(d, "failed to load fecs ucode!!"); 2291 nvgpu_err(g, "failed to load fecs ucode!!");
2293 return -ENOENT; 2292 return -ENOENT;
2294 } 2293 }
2295 2294
@@ -2300,7 +2299,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2300 gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0); 2299 gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0);
2301 if (!gpccs_fw) { 2300 if (!gpccs_fw) {
2302 release_firmware(fecs_fw); 2301 release_firmware(fecs_fw);
2303 gk20a_err(d, "failed to load gpccs ucode!!"); 2302 nvgpu_err(g, "failed to load gpccs ucode!!");
2304 return -ENOENT; 2303 return -ENOENT;
2305 } 2304 }
2306 2305
@@ -2373,7 +2372,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2373 retries--; 2372 retries--;
2374 } 2373 }
2375 if (!retries) { 2374 if (!retries) {
2376 gk20a_err(dev_from_gk20a(g), 2375 nvgpu_err(g,
2377 "arbiter idle timeout, status: %08x", 2376 "arbiter idle timeout, status: %08x",
2378 gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); 2377 gk20a_readl(g, gr_fecs_ctxsw_status_1_r()));
2379 } 2378 }
@@ -2405,7 +2404,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2405 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2404 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
2406 } 2405 }
2407 if (!retries) 2406 if (!retries)
2408 gk20a_err(dev_from_gk20a(g), "arbiter complete timeout"); 2407 nvgpu_err(g, "arbiter complete timeout");
2409 2408
2410 gk20a_writel(g, gr_fecs_current_ctx_r(), 2409 gk20a_writel(g, gr_fecs_current_ctx_r(),
2411 gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) | 2410 gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) |
@@ -2422,7 +2421,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2422 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2421 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
2423 } 2422 }
2424 if (!retries) 2423 if (!retries)
2425 gk20a_err(dev_from_gk20a(g), "arbiter complete timeout"); 2424 nvgpu_err(g, "arbiter complete timeout");
2426} 2425}
2427 2426
2428void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base, 2427void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base,
@@ -2499,7 +2498,7 @@ void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base,
2499 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2498 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0);
2500 break; 2499 break;
2501 default: 2500 default:
2502 gk20a_err(dev_from_gk20a(g), 2501 nvgpu_err(g,
2503 "unknown falcon ucode boot signature 0x%08x" 2502 "unknown falcon ucode boot signature 0x%08x"
2504 " with reg_offset 0x%08x", 2503 " with reg_offset 0x%08x",
2505 segments->boot_signature, reg_offset); 2504 segments->boot_signature, reg_offset);
@@ -2631,7 +2630,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2631 eUcodeHandshakeInitComplete, 2630 eUcodeHandshakeInitComplete,
2632 GR_IS_UCODE_OP_SKIP, 0, false); 2631 GR_IS_UCODE_OP_SKIP, 0, false);
2633 if (ret) { 2632 if (ret) {
2634 gk20a_err(dev_from_gk20a(g), "falcon ucode init timeout"); 2633 nvgpu_err(g, "falcon ucode init timeout");
2635 return ret; 2634 return ret;
2636 } 2635 }
2637 2636
@@ -2666,7 +2665,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2666 op.mailbox.ret = &g->gr.ctx_vars.golden_image_size; 2665 op.mailbox.ret = &g->gr.ctx_vars.golden_image_size;
2667 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2666 ret = gr_gk20a_submit_fecs_method_op(g, op, false);
2668 if (ret) { 2667 if (ret) {
2669 gk20a_err(dev_from_gk20a(g), 2668 nvgpu_err(g,
2670 "query golden image size failed"); 2669 "query golden image size failed");
2671 return ret; 2670 return ret;
2672 } 2671 }
@@ -2675,7 +2674,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2675 op.mailbox.ret = &g->gr.ctx_vars.zcull_ctxsw_image_size; 2674 op.mailbox.ret = &g->gr.ctx_vars.zcull_ctxsw_image_size;
2676 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2675 ret = gr_gk20a_submit_fecs_method_op(g, op, false);
2677 if (ret) { 2676 if (ret) {
2678 gk20a_err(dev_from_gk20a(g), 2677 nvgpu_err(g,
2679 "query zcull ctx image size failed"); 2678 "query zcull ctx image size failed");
2680 return ret; 2679 return ret;
2681 } 2680 }
@@ -2684,7 +2683,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2684 op.mailbox.ret = &g->gr.ctx_vars.pm_ctxsw_image_size; 2683 op.mailbox.ret = &g->gr.ctx_vars.pm_ctxsw_image_size;
2685 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2684 ret = gr_gk20a_submit_fecs_method_op(g, op, false);
2686 if (ret) { 2685 if (ret) {
2687 gk20a_err(dev_from_gk20a(g), 2686 nvgpu_err(g,
2688 "query pm ctx image size failed"); 2687 "query pm ctx image size failed");
2689 return ret; 2688 return ret;
2690 } 2689 }
@@ -2815,7 +2814,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2815 return 0; 2814 return 0;
2816 2815
2817 clean_up: 2816 clean_up:
2818 gk20a_err(dev_from_gk20a(g), "fail"); 2817 nvgpu_err(g, "fail");
2819 gr_gk20a_free_global_ctx_buffers(g); 2818 gr_gk20a_free_global_ctx_buffers(g);
2820 return -ENOMEM; 2819 return -ENOMEM;
2821} 2820}
@@ -2988,7 +2987,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
2988 int err; 2987 int err;
2989 2988
2990 if (!tsg->vm) { 2989 if (!tsg->vm) {
2991 gk20a_err(dev_from_gk20a(tsg->g), "No address space bound\n"); 2990 nvgpu_err(tsg->g, "No address space bound\n");
2992 return -ENOMEM; 2991 return -ENOMEM;
2993 } 2992 }
2994 2993
@@ -3029,7 +3028,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3029void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) 3028void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
3030{ 3029{
3031 if (!tsg->vm) { 3030 if (!tsg->vm) {
3032 gk20a_err(dev_from_gk20a(tsg->g), "No address space bound\n"); 3031 nvgpu_err(tsg->g, "No address space bound\n");
3033 return; 3032 return;
3034 } 3033 }
3035 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx); 3034 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx);
@@ -3139,14 +3138,14 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3139 3138
3140 /* an address space needs to have been bound at this point.*/ 3139 /* an address space needs to have been bound at this point.*/
3141 if (!gk20a_channel_as_bound(c) && !c->vm) { 3140 if (!gk20a_channel_as_bound(c) && !c->vm) {
3142 gk20a_err(dev_from_gk20a(g), 3141 nvgpu_err(g,
3143 "not bound to address space at time" 3142 "not bound to address space at time"
3144 " of grctx allocation"); 3143 " of grctx allocation");
3145 return -EINVAL; 3144 return -EINVAL;
3146 } 3145 }
3147 3146
3148 if (!g->ops.gr.is_valid_class(g, args->class_num)) { 3147 if (!g->ops.gr.is_valid_class(g, args->class_num)) {
3149 gk20a_err(dev_from_gk20a(g), 3148 nvgpu_err(g,
3150 "invalid obj class 0x%x", args->class_num); 3149 "invalid obj class 0x%x", args->class_num);
3151 err = -EINVAL; 3150 err = -EINVAL;
3152 goto out; 3151 goto out;
@@ -3163,7 +3162,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3163 args->class_num, 3162 args->class_num,
3164 args->flags); 3163 args->flags);
3165 if (err) { 3164 if (err) {
3166 gk20a_err(dev_from_gk20a(g), 3165 nvgpu_err(g,
3167 "fail to allocate gr ctx buffer"); 3166 "fail to allocate gr ctx buffer");
3168 goto out; 3167 goto out;
3169 } 3168 }
@@ -3171,7 +3170,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3171 /*TBD: needs to be more subtle about which is 3170 /*TBD: needs to be more subtle about which is
3172 * being allocated as some are allowed to be 3171 * being allocated as some are allowed to be
3173 * allocated along same channel */ 3172 * allocated along same channel */
3174 gk20a_err(dev_from_gk20a(g), 3173 nvgpu_err(g,
3175 "too many classes alloc'd on same channel"); 3174 "too many classes alloc'd on same channel");
3176 err = -EINVAL; 3175 err = -EINVAL;
3177 goto out; 3176 goto out;
@@ -3184,7 +3183,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3184 args->class_num, 3183 args->class_num,
3185 args->flags); 3184 args->flags);
3186 if (err) { 3185 if (err) {
3187 gk20a_err(dev_from_gk20a(g), 3186 nvgpu_err(g,
3188 "fail to allocate TSG gr ctx buffer"); 3187 "fail to allocate TSG gr ctx buffer");
3189 gk20a_vm_put(tsg->vm); 3188 gk20a_vm_put(tsg->vm);
3190 tsg->vm = NULL; 3189 tsg->vm = NULL;
@@ -3200,7 +3199,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3200 /* commit gr ctx buffer */ 3199 /* commit gr ctx buffer */
3201 err = g->ops.gr.commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); 3200 err = g->ops.gr.commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
3202 if (err) { 3201 if (err) {
3203 gk20a_err(dev_from_gk20a(g), 3202 nvgpu_err(g,
3204 "fail to commit gr ctx buffer"); 3203 "fail to commit gr ctx buffer");
3205 goto out; 3204 goto out;
3206 } 3205 }
@@ -3209,7 +3208,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3209 if (ch_ctx->patch_ctx.mem.sgt == NULL) { 3208 if (ch_ctx->patch_ctx.mem.sgt == NULL) {
3210 err = gr_gk20a_alloc_channel_patch_ctx(g, c); 3209 err = gr_gk20a_alloc_channel_patch_ctx(g, c);
3211 if (err) { 3210 if (err) {
3212 gk20a_err(dev_from_gk20a(g), 3211 nvgpu_err(g,
3213 "fail to allocate patch buffer"); 3212 "fail to allocate patch buffer");
3214 goto out; 3213 goto out;
3215 } 3214 }
@@ -3219,7 +3218,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3219 if (!ch_ctx->global_ctx_buffer_mapped) { 3218 if (!ch_ctx->global_ctx_buffer_mapped) {
3220 err = gr_gk20a_map_global_ctx_buffers(g, c); 3219 err = gr_gk20a_map_global_ctx_buffers(g, c);
3221 if (err) { 3220 if (err) {
3222 gk20a_err(dev_from_gk20a(g), 3221 nvgpu_err(g,
3223 "fail to map global ctx buffer"); 3222 "fail to map global ctx buffer");
3224 goto out; 3223 goto out;
3225 } 3224 }
@@ -3237,7 +3236,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3237 if (support_gk20a_pmu(g->dev)) { 3236 if (support_gk20a_pmu(g->dev)) {
3238 err = gk20a_pmu_disable_elpg(g); 3237 err = gk20a_pmu_disable_elpg(g);
3239 if (err) { 3238 if (err) {
3240 gk20a_err(dev_from_gk20a(g), 3239 nvgpu_err(g,
3241 "failed to set disable elpg"); 3240 "failed to set disable elpg");
3242 } 3241 }
3243 } 3242 }
@@ -3278,7 +3277,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3278 lockboost, true); 3277 lockboost, true);
3279 gr_gk20a_ctx_patch_write_end(g, ch_ctx); 3278 gr_gk20a_ctx_patch_write_end(g, ch_ctx);
3280 } else { 3279 } else {
3281 gk20a_err(dev_from_gk20a(g), 3280 nvgpu_err(g,
3282 "failed to set texlock for compute class"); 3281 "failed to set texlock for compute class");
3283 } 3282 }
3284 3283
@@ -3291,7 +3290,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3291 /* init golden image, ELPG enabled after this is done */ 3290 /* init golden image, ELPG enabled after this is done */
3292 err = gr_gk20a_init_golden_ctx_image(g, c); 3291 err = gr_gk20a_init_golden_ctx_image(g, c);
3293 if (err) { 3292 if (err) {
3294 gk20a_err(dev_from_gk20a(g), 3293 nvgpu_err(g,
3295 "fail to init golden ctx image"); 3294 "fail to init golden ctx image");
3296 goto out; 3295 goto out;
3297 } 3296 }
@@ -3301,14 +3300,14 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3301 err = gr_gk20a_elpg_protected_call(g, 3300 err = gr_gk20a_elpg_protected_call(g,
3302 gr_gk20a_load_golden_ctx_image(g, c)); 3301 gr_gk20a_load_golden_ctx_image(g, c));
3303 if (err) { 3302 if (err) {
3304 gk20a_err(dev_from_gk20a(g), 3303 nvgpu_err(g,
3305 "fail to load golden ctx image"); 3304 "fail to load golden ctx image");
3306 goto out; 3305 goto out;
3307 } 3306 }
3308 if (g->ops.fecs_trace.bind_channel && !c->vpr) { 3307 if (g->ops.fecs_trace.bind_channel && !c->vpr) {
3309 err = g->ops.fecs_trace.bind_channel(g, c); 3308 err = g->ops.fecs_trace.bind_channel(g, c);
3310 if (err) { 3309 if (err) {
3311 gk20a_warn(dev_from_gk20a(g), 3310 nvgpu_warn(g,
3312 "fail to bind channel for ctxsw trace"); 3311 "fail to bind channel for ctxsw trace");
3313 } 3312 }
3314 } 3313 }
@@ -3322,7 +3321,7 @@ out:
3322 can be reused so no need to release them. 3321 can be reused so no need to release them.
3323 2. golden image init and load is a one time thing so if 3322 2. golden image init and load is a one time thing so if
3324 they pass, no need to undo. */ 3323 they pass, no need to undo. */
3325 gk20a_err(dev_from_gk20a(g), "fail"); 3324 nvgpu_err(g, "fail");
3326 return err; 3325 return err;
3327} 3326}
3328 3327
@@ -3490,7 +3489,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3490 gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS); 3489 gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS);
3491 3490
3492 if (!gr->gpc_count) { 3491 if (!gr->gpc_count) {
3493 gk20a_err(dev_from_gk20a(g), "gpc_count==0!"); 3492 nvgpu_err(g, "gpc_count==0!");
3494 goto clean_up; 3493 goto clean_up;
3495 } 3494 }
3496 3495
@@ -3846,7 +3845,7 @@ clean_up:
3846 nvgpu_kfree(g, sorted_to_unsorted_gpc_map); 3845 nvgpu_kfree(g, sorted_to_unsorted_gpc_map);
3847 3846
3848 if (ret) 3847 if (ret)
3849 gk20a_err(dev_from_gk20a(g), "fail"); 3848 nvgpu_err(g, "fail");
3850 else 3849 else
3851 gk20a_dbg_fn("done"); 3850 gk20a_dbg_fn("done");
3852 3851
@@ -3936,7 +3935,7 @@ static void gr_gk20a_detect_sm_arch(struct gk20a *g)
3936 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v()) 3935 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v())
3937 version = 0x320; /* SM 3.2 */ 3936 version = 0x320; /* SM 3.2 */
3938 else 3937 else
3939 gk20a_err(dev_from_gk20a(g), "Unknown SM version 0x%x\n", 3938 nvgpu_err(g, "Unknown SM version 0x%x\n",
3940 raw_version); 3939 raw_version);
3941 3940
3942 /* on Kepler, SM version == SPA version */ 3941 /* on Kepler, SM version == SPA version */
@@ -4030,7 +4029,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4030 4029
4031 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); 4030 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
4032 if (ret) { 4031 if (ret) {
4033 gk20a_err(dev_from_gk20a(g), 4032 nvgpu_err(g,
4034 "failed to disable gr engine activity"); 4033 "failed to disable gr engine activity");
4035 return; 4034 return;
4036 } 4035 }
@@ -4038,7 +4037,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4038 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 4037 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
4039 GR_IDLE_CHECK_DEFAULT); 4038 GR_IDLE_CHECK_DEFAULT);
4040 if (ret) { 4039 if (ret) {
4041 gk20a_err(dev_from_gk20a(g), 4040 nvgpu_err(g,
4042 "failed to idle graphics"); 4041 "failed to idle graphics");
4043 goto clean_up; 4042 goto clean_up;
4044 } 4043 }
@@ -4049,7 +4048,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4049clean_up: 4048clean_up:
4050 ret = gk20a_fifo_enable_engine_activity(g, gr_info); 4049 ret = gk20a_fifo_enable_engine_activity(g, gr_info);
4051 if (ret) { 4050 if (ret) {
4052 gk20a_err(dev_from_gk20a(g), 4051 nvgpu_err(g,
4053 "failed to enable gr engine activity\n"); 4052 "failed to enable gr engine activity\n");
4054 } 4053 }
4055} 4054}
@@ -4080,7 +4079,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
4080 4079
4081 if (memcmp(c_tbl->color_l2, zbc_val->color_l2, 4080 if (memcmp(c_tbl->color_l2, zbc_val->color_l2,
4082 sizeof(zbc_val->color_l2))) { 4081 sizeof(zbc_val->color_l2))) {
4083 gk20a_err(dev_from_gk20a(g), 4082 nvgpu_err(g,
4084 "zbc l2 and ds color don't match with existing entries"); 4083 "zbc l2 and ds color don't match with existing entries");
4085 ret = -EINVAL; 4084 ret = -EINVAL;
4086 goto err_mutex; 4085 goto err_mutex;
@@ -4140,14 +4139,14 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
4140 if (g->ops.gr.add_zbc_type_s) { 4139 if (g->ops.gr.add_zbc_type_s) {
4141 added = g->ops.gr.add_zbc_type_s(g, gr, zbc_val, &ret); 4140 added = g->ops.gr.add_zbc_type_s(g, gr, zbc_val, &ret);
4142 } else { 4141 } else {
4143 gk20a_err(dev_from_gk20a(g), 4142 nvgpu_err(g,
4144 "invalid zbc table type %d", zbc_val->type); 4143 "invalid zbc table type %d", zbc_val->type);
4145 ret = -EINVAL; 4144 ret = -EINVAL;
4146 goto err_mutex; 4145 goto err_mutex;
4147 } 4146 }
4148 break; 4147 break;
4149 default: 4148 default:
4150 gk20a_err(dev_from_gk20a(g), 4149 nvgpu_err(g,
4151 "invalid zbc table type %d", zbc_val->type); 4150 "invalid zbc table type %d", zbc_val->type);
4152 ret = -EINVAL; 4151 ret = -EINVAL;
4153 goto err_mutex; 4152 goto err_mutex;
@@ -4179,7 +4178,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4179 break; 4178 break;
4180 case GK20A_ZBC_TYPE_COLOR: 4179 case GK20A_ZBC_TYPE_COLOR:
4181 if (index >= GK20A_ZBC_TABLE_SIZE) { 4180 if (index >= GK20A_ZBC_TABLE_SIZE) {
4182 gk20a_err(dev_from_gk20a(g), 4181 nvgpu_err(g,
4183 "invalid zbc color table index\n"); 4182 "invalid zbc color table index\n");
4184 return -EINVAL; 4183 return -EINVAL;
4185 } 4184 }
@@ -4194,7 +4193,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4194 break; 4193 break;
4195 case GK20A_ZBC_TYPE_DEPTH: 4194 case GK20A_ZBC_TYPE_DEPTH:
4196 if (index >= GK20A_ZBC_TABLE_SIZE) { 4195 if (index >= GK20A_ZBC_TABLE_SIZE) {
4197 gk20a_err(dev_from_gk20a(g), 4196 nvgpu_err(g,
4198 "invalid zbc depth table index\n"); 4197 "invalid zbc depth table index\n");
4199 return -EINVAL; 4198 return -EINVAL;
4200 } 4199 }
@@ -4207,13 +4206,13 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4207 return g->ops.gr.zbc_s_query_table(g, gr, 4206 return g->ops.gr.zbc_s_query_table(g, gr,
4208 query_params); 4207 query_params);
4209 } else { 4208 } else {
4210 gk20a_err(dev_from_gk20a(g), 4209 nvgpu_err(g,
4211 "invalid zbc table type\n"); 4210 "invalid zbc table type\n");
4212 return -EINVAL; 4211 return -EINVAL;
4213 } 4212 }
4214 break; 4213 break;
4215 default: 4214 default:
4216 gk20a_err(dev_from_gk20a(g), 4215 nvgpu_err(g,
4217 "invalid zbc table type\n"); 4216 "invalid zbc table type\n");
4218 return -EINVAL; 4217 return -EINVAL;
4219 } 4218 }
@@ -4303,7 +4302,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4303 if (!err) 4302 if (!err)
4304 gr->max_default_color_index = 3; 4303 gr->max_default_color_index = 3;
4305 else { 4304 else {
4306 gk20a_err(dev_from_gk20a(g), 4305 nvgpu_err(g,
4307 "fail to load default zbc color table\n"); 4306 "fail to load default zbc color table\n");
4308 return err; 4307 return err;
4309 } 4308 }
@@ -4322,7 +4321,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4322 if (!err) 4321 if (!err)
4323 gr->max_default_depth_index = 2; 4322 gr->max_default_depth_index = 2;
4324 else { 4323 else {
4325 gk20a_err(dev_from_gk20a(g), 4324 nvgpu_err(g,
4326 "fail to load default zbc depth table\n"); 4325 "fail to load default zbc depth table\n");
4327 return err; 4326 return err;
4328 } 4327 }
@@ -4349,7 +4348,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4349 4348
4350 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); 4349 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
4351 if (ret) { 4350 if (ret) {
4352 gk20a_err(dev_from_gk20a(g), 4351 nvgpu_err(g,
4353 "failed to disable gr engine activity"); 4352 "failed to disable gr engine activity");
4354 return ret; 4353 return ret;
4355 } 4354 }
@@ -4357,7 +4356,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4357 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 4356 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
4358 GR_IDLE_CHECK_DEFAULT); 4357 GR_IDLE_CHECK_DEFAULT);
4359 if (ret) { 4358 if (ret) {
4360 gk20a_err(dev_from_gk20a(g), 4359 nvgpu_err(g,
4361 "failed to idle graphics"); 4360 "failed to idle graphics");
4362 goto clean_up; 4361 goto clean_up;
4363 } 4362 }
@@ -4366,7 +4365,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4366 4365
4367clean_up: 4366clean_up:
4368 if (gk20a_fifo_enable_engine_activity(g, gr_info)) { 4367 if (gk20a_fifo_enable_engine_activity(g, gr_info)) {
4369 gk20a_err(dev_from_gk20a(g), 4368 nvgpu_err(g,
4370 "failed to enable gr engine activity"); 4369 "failed to enable gr engine activity");
4371 } 4370 }
4372 4371
@@ -4400,7 +4399,7 @@ void gr_gk20a_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine)
4400 therm_gate_ctrl_blk_clk_auto_f()); 4399 therm_gate_ctrl_blk_clk_auto_f());
4401 break; 4400 break;
4402 default: 4401 default:
4403 gk20a_err(dev_from_gk20a(g), 4402 nvgpu_err(g,
4404 "invalid blcg mode %d", mode); 4403 "invalid blcg mode %d", mode);
4405 return; 4404 return;
4406 } 4405 }
@@ -4435,7 +4434,7 @@ void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
4435 therm_gate_ctrl_eng_clk_auto_f()); 4434 therm_gate_ctrl_eng_clk_auto_f());
4436 break; 4435 break;
4437 default: 4436 default:
4438 gk20a_err(dev_from_gk20a(g), 4437 nvgpu_err(g,
4439 "invalid elcg mode %d", mode); 4438 "invalid elcg mode %d", mode);
4440 } 4439 }
4441 4440
@@ -4462,7 +4461,7 @@ void gr_gk20a_init_cg_mode(struct gk20a *g, u32 cgmode, u32 mode_config)
4462 g->ops.gr.init_elcg_mode(g, mode_config, 4461 g->ops.gr.init_elcg_mode(g, mode_config,
4463 active_engine_id); 4462 active_engine_id);
4464 else 4463 else
4465 gk20a_err(dev_from_gk20a(g), "invalid cg mode %d %d", cgmode, mode_config); 4464 nvgpu_err(g, "invalid cg mode %d %d", cgmode, mode_config);
4466 } 4465 }
4467} 4466}
4468 4467
@@ -4592,7 +4591,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4592 zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); 4591 zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32));
4593 4592
4594 if (!zcull_map_tiles) { 4593 if (!zcull_map_tiles) {
4595 gk20a_err(dev_from_gk20a(g), 4594 nvgpu_err(g,
4596 "failed to allocate zcull map titles"); 4595 "failed to allocate zcull map titles");
4597 return -ENOMEM; 4596 return -ENOMEM;
4598 } 4597 }
@@ -4600,7 +4599,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4600 zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); 4599 zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32));
4601 4600
4602 if (!zcull_bank_counters) { 4601 if (!zcull_bank_counters) {
4603 gk20a_err(dev_from_gk20a(g), 4602 nvgpu_err(g,
4604 "failed to allocate zcull bank counters"); 4603 "failed to allocate zcull bank counters");
4605 nvgpu_kfree(g, zcull_map_tiles); 4604 nvgpu_kfree(g, zcull_map_tiles);
4606 return -ENOMEM; 4605 return -ENOMEM;
@@ -4626,7 +4625,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4626 4625
4627 if (gpc_zcull_count != gr->max_zcull_per_gpc_count && 4626 if (gpc_zcull_count != gr->max_zcull_per_gpc_count &&
4628 gpc_zcull_count < gpc_tpc_count) { 4627 gpc_zcull_count < gpc_tpc_count) {
4629 gk20a_err(dev_from_gk20a(g), 4628 nvgpu_err(g,
4630 "zcull_banks (%d) less than tpcs (%d) for gpc (%d)", 4629 "zcull_banks (%d) less than tpcs (%d) for gpc (%d)",
4631 gpc_zcull_count, gpc_tpc_count, gpc_index); 4630 gpc_zcull_count, gpc_tpc_count, gpc_index);
4632 return -EINVAL; 4631 return -EINVAL;
@@ -4991,7 +4990,7 @@ static int gk20a_init_gr_prepare(struct gk20a *g)
4991 if (!g->gr.ctx_vars.valid) { 4990 if (!g->gr.ctx_vars.valid) {
4992 err = gr_gk20a_init_ctx_vars(g, &g->gr); 4991 err = gr_gk20a_init_ctx_vars(g, &g->gr);
4993 if (err) 4992 if (err)
4994 gk20a_err(dev_from_gk20a(g), 4993 nvgpu_err(g,
4995 "fail to load gr init ctx"); 4994 "fail to load gr init ctx");
4996 } 4995 }
4997 return err; 4996 return err;
@@ -5024,7 +5023,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
5024 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); 5023 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT);
5025 } while (!nvgpu_timeout_expired(&timeout)); 5024 } while (!nvgpu_timeout_expired(&timeout));
5026 5025
5027 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 5026 nvgpu_err(g, "Falcon mem scrubbing timeout");
5028 return -ETIMEDOUT; 5027 return -ETIMEDOUT;
5029} 5028}
5030 5029
@@ -5042,7 +5041,7 @@ static int gr_gk20a_init_ctxsw(struct gk20a *g)
5042 5041
5043out: 5042out:
5044 if (err) 5043 if (err)
5045 gk20a_err(dev_from_gk20a(g), "fail"); 5044 nvgpu_err(g, "fail");
5046 else 5045 else
5047 gk20a_dbg_fn("done"); 5046 gk20a_dbg_fn("done");
5048 5047
@@ -5076,7 +5075,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
5076 5075
5077out: 5076out:
5078 if (err) 5077 if (err)
5079 gk20a_err(dev_from_gk20a(g), "fail"); 5078 nvgpu_err(g, "fail");
5080 else 5079 else
5081 gk20a_dbg_fn("done"); 5080 gk20a_dbg_fn("done");
5082 5081
@@ -5094,7 +5093,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5094 unsigned int num_entries = 0; 5093 unsigned int num_entries = 0;
5095 5094
5096 if (nvgpu_mem_begin(g, mem)) { 5095 if (nvgpu_mem_begin(g, mem)) {
5097 gk20a_err(dev_from_gk20a(g), 5096 nvgpu_err(g,
5098 "failed to map priv access map memory"); 5097 "failed to map priv access map memory");
5099 return -ENOMEM; 5098 return -ENOMEM;
5100 } 5099 }
@@ -5188,7 +5187,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
5188 return 0; 5187 return 0;
5189 5188
5190clean_up: 5189clean_up:
5191 gk20a_err(dev_from_gk20a(g), "fail"); 5190 nvgpu_err(g, "fail");
5192 gk20a_remove_gr_support(gr); 5191 gk20a_remove_gr_support(gr);
5193 return err; 5192 return err;
5194} 5193}
@@ -5198,7 +5197,6 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5198 struct pmu_gk20a *pmu = &g->pmu; 5197 struct pmu_gk20a *pmu = &g->pmu;
5199 struct mm_gk20a *mm = &g->mm; 5198 struct mm_gk20a *mm = &g->mm;
5200 struct vm_gk20a *vm = &mm->pmu.vm; 5199 struct vm_gk20a *vm = &mm->pmu.vm;
5201 struct device *d = dev_from_gk20a(g);
5202 int err = 0; 5200 int err = 0;
5203 5201
5204 u32 size; 5202 u32 size;
@@ -5209,7 +5207,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5209 5207
5210 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 5208 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
5211 if (err) { 5209 if (err) {
5212 gk20a_err(dev_from_gk20a(g), 5210 nvgpu_err(g,
5213 "fail to query fecs pg buffer size"); 5211 "fail to query fecs pg buffer size");
5214 return err; 5212 return err;
5215 } 5213 }
@@ -5217,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5217 if (!pmu->pg_buf.cpu_va) { 5215 if (!pmu->pg_buf.cpu_va) {
5218 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); 5216 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
5219 if (err) { 5217 if (err) {
5220 gk20a_err(d, "failed to allocate memory\n"); 5218 nvgpu_err(g, "failed to allocate memory\n");
5221 return -ENOMEM; 5219 return -ENOMEM;
5222 } 5220 }
5223 } 5221 }
@@ -5225,14 +5223,14 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5225 5223
5226 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block); 5224 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block);
5227 if (err) { 5225 if (err) {
5228 gk20a_err(dev_from_gk20a(g), 5226 nvgpu_err(g,
5229 "fail to bind pmu inst to gr"); 5227 "fail to bind pmu inst to gr");
5230 return err; 5228 return err;
5231 } 5229 }
5232 5230
5233 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va); 5231 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va);
5234 if (err) { 5232 if (err) {
5235 gk20a_err(dev_from_gk20a(g), 5233 nvgpu_err(g,
5236 "fail to set pg buffer pmu va"); 5234 "fail to set pg buffer pmu va");
5237 return err; 5235 return err;
5238 } 5236 }
@@ -5496,21 +5494,21 @@ int gk20a_gr_reset(struct gk20a *g)
5496 size = 0; 5494 size = 0;
5497 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 5495 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
5498 if (err) { 5496 if (err) {
5499 gk20a_err(dev_from_gk20a(g), 5497 nvgpu_err(g,
5500 "fail to query fecs pg buffer size"); 5498 "fail to query fecs pg buffer size");
5501 return err; 5499 return err;
5502 } 5500 }
5503 5501
5504 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block); 5502 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block);
5505 if (err) { 5503 if (err) {
5506 gk20a_err(dev_from_gk20a(g), 5504 nvgpu_err(g,
5507 "fail to bind pmu inst to gr"); 5505 "fail to bind pmu inst to gr");
5508 return err; 5506 return err;
5509 } 5507 }
5510 5508
5511 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va); 5509 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va);
5512 if (err) { 5510 if (err) {
5513 gk20a_err(dev_from_gk20a(g), 5511 nvgpu_err(g,
5514 "fail to set pg buffer pmu va"); 5512 "fail to set pg buffer pmu va");
5515 return err; 5513 return err;
5516 } 5514 }
@@ -5593,7 +5591,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5593 gk20a_dbg_fn(""); 5591 gk20a_dbg_fn("");
5594 gk20a_gr_set_error_notifier(g, isr_data, 5592 gk20a_gr_set_error_notifier(g, isr_data,
5595 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); 5593 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
5596 gk20a_err(dev_from_gk20a(g), 5594 nvgpu_err(g,
5597 "gr semaphore timeout\n"); 5595 "gr semaphore timeout\n");
5598 return -EINVAL; 5596 return -EINVAL;
5599} 5597}
@@ -5605,7 +5603,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
5605 gk20a_gr_set_error_notifier(g, isr_data, 5603 gk20a_gr_set_error_notifier(g, isr_data,
5606 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5604 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5607 /* This is an unrecoverable error, reset is needed */ 5605 /* This is an unrecoverable error, reset is needed */
5608 gk20a_err(dev_from_gk20a(g), 5606 nvgpu_err(g,
5609 "gr semaphore timeout\n"); 5607 "gr semaphore timeout\n");
5610 return -EINVAL; 5608 return -EINVAL;
5611} 5609}
@@ -5619,7 +5617,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
5619 if (ret) { 5617 if (ret) {
5620 gk20a_gr_set_error_notifier(g, isr_data, 5618 gk20a_gr_set_error_notifier(g, isr_data,
5621 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5619 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5622 gk20a_err(dev_from_gk20a(g), "invalid method class 0x%08x" 5620 nvgpu_err(g, "invalid method class 0x%08x"
5623 ", offset 0x%08x address 0x%08x\n", 5621 ", offset 0x%08x address 0x%08x\n",
5624 isr_data->class_num, isr_data->offset, isr_data->addr); 5622 isr_data->class_num, isr_data->offset, isr_data->addr);
5625 } 5623 }
@@ -5632,7 +5630,7 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g,
5632 gk20a_dbg_fn(""); 5630 gk20a_dbg_fn("");
5633 gk20a_gr_set_error_notifier(g, isr_data, 5631 gk20a_gr_set_error_notifier(g, isr_data,
5634 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5632 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5635 gk20a_err(dev_from_gk20a(g), 5633 nvgpu_err(g,
5636 "invalid class 0x%08x, offset 0x%08x", 5634 "invalid class 0x%08x, offset 0x%08x",
5637 isr_data->class_num, isr_data->offset); 5635 isr_data->class_num, isr_data->offset);
5638 return -EINVAL; 5636 return -EINVAL;
@@ -5649,14 +5647,14 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
5649 if (!gr_fecs_intr) 5647 if (!gr_fecs_intr)
5650 return 0; 5648 return 0;
5651 5649
5652 gk20a_err(dev_from_gk20a(g), 5650 nvgpu_err(g,
5653 "unhandled fecs error interrupt 0x%08x for channel %u", 5651 "unhandled fecs error interrupt 0x%08x for channel %u",
5654 gr_fecs_intr, isr_data->chid); 5652 gr_fecs_intr, isr_data->chid);
5655 5653
5656 if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) { 5654 if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) {
5657 gk20a_gr_set_error_notifier(g, isr_data, 5655 gk20a_gr_set_error_notifier(g, isr_data,
5658 NVGPU_CHANNEL_FECS_ERR_UNIMP_FIRMWARE_METHOD); 5656 NVGPU_CHANNEL_FECS_ERR_UNIMP_FIRMWARE_METHOD);
5659 gk20a_err(dev_from_gk20a(g), 5657 nvgpu_err(g,
5660 "firmware method error 0x%08x for offset 0x%04x", 5658 "firmware method error 0x%08x for offset 0x%04x",
5661 gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)), 5659 gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)),
5662 isr_data->data_lo); 5660 isr_data->data_lo);
@@ -5678,7 +5676,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5678 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); 5676 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r()));
5679 gk20a_gr_set_error_notifier(g, isr_data, 5677 gk20a_gr_set_error_notifier(g, isr_data,
5680 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5678 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5681 gk20a_err(dev_from_gk20a(g), 5679 nvgpu_err(g,
5682 "class error 0x%08x, offset 0x%08x," 5680 "class error 0x%08x, offset 0x%08x,"
5683 " unhandled intr 0x%08x for channel %u\n", 5681 " unhandled intr 0x%08x for channel %u\n",
5684 isr_data->class_num, isr_data->offset, 5682 isr_data->class_num, isr_data->offset,
@@ -5694,7 +5692,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5694 5692
5695 gk20a_gr_set_error_notifier(g, isr_data, 5693 gk20a_gr_set_error_notifier(g, isr_data,
5696 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5694 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5697 gk20a_err(dev_from_gk20a(g), 5695 nvgpu_err(g,
5698 "firmware method 0x%08x, offset 0x%08x for channel %u\n", 5696 "firmware method 0x%08x, offset 0x%08x for channel %u\n",
5699 isr_data->class_num, isr_data->offset, 5697 isr_data->class_num, isr_data->offset,
5700 isr_data->chid); 5698 isr_data->chid);
@@ -5772,7 +5770,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5772 /* validate offset */ 5770 /* validate offset */
5773 if (offset + sizeof(struct share_buffer_head) > buffer_size || 5771 if (offset + sizeof(struct share_buffer_head) > buffer_size ||
5774 offset + sizeof(struct share_buffer_head) < offset) { 5772 offset + sizeof(struct share_buffer_head) < offset) {
5775 gk20a_err(dev_from_gk20a(g), 5773 nvgpu_err(g,
5776 "cyclestats buffer overrun at offset 0x%x\n", 5774 "cyclestats buffer overrun at offset 0x%x\n",
5777 offset); 5775 offset);
5778 break; 5776 break;
@@ -5790,7 +5788,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5790 if (sh_hdr->size < min_element_size || 5788 if (sh_hdr->size < min_element_size ||
5791 offset + sh_hdr->size > buffer_size || 5789 offset + sh_hdr->size > buffer_size ||
5792 offset + sh_hdr->size < offset) { 5790 offset + sh_hdr->size < offset) {
5793 gk20a_err(dev_from_gk20a(g), 5791 nvgpu_err(g,
5794 "bad cyclestate buffer header size at offset 0x%x\n", 5792 "bad cyclestate buffer header size at offset 0x%x\n",
5795 offset); 5793 offset);
5796 sh_hdr->failed = true; 5794 sh_hdr->failed = true;
@@ -5814,7 +5812,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5814 u64 v; 5812 u64 v;
5815 5813
5816 if (!valid) { 5814 if (!valid) {
5817 gk20a_err(dev_from_gk20a(g), 5815 nvgpu_err(g,
5818 "invalid cycletstats op offset: 0x%x\n", 5816 "invalid cycletstats op offset: 0x%x\n",
5819 op_elem->offset_bar0); 5817 op_elem->offset_bar0);
5820 5818
@@ -6070,7 +6068,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g,
6070 6068
6071 err = gr_gk20a_disable_ctxsw(g); 6069 err = gr_gk20a_disable_ctxsw(g);
6072 if (err) { 6070 if (err) {
6073 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n"); 6071 nvgpu_err(g, "unable to stop gr ctxsw\n");
6074 goto fail; 6072 goto fail;
6075 } 6073 }
6076 6074
@@ -6130,7 +6128,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g,
6130 6128
6131 err = gr_gk20a_disable_ctxsw(g); 6129 err = gr_gk20a_disable_ctxsw(g);
6132 if (err) { 6130 if (err) {
6133 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n"); 6131 nvgpu_err(g, "unable to stop gr ctxsw\n");
6134 goto fail; 6132 goto fail;
6135 } 6133 }
6136 6134
@@ -6183,7 +6181,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6183 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr); 6181 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr);
6184 6182
6185 if (!sm_debugger_attached) { 6183 if (!sm_debugger_attached) {
6186 gk20a_err(dev_from_gk20a(g), "sm hww global %08x warp %08x\n", 6184 nvgpu_err(g, "sm hww global %08x warp %08x\n",
6187 global_esr, warp_esr); 6185 global_esr, warp_esr);
6188 return -EFAULT; 6186 return -EFAULT;
6189 } 6187 }
@@ -6203,7 +6201,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6203 &early_exit, 6201 &early_exit,
6204 &ignore_debugger); 6202 &ignore_debugger);
6205 if (ret) { 6203 if (ret) {
6206 gk20a_err(dev_from_gk20a(g), "could not pre-process sm error!\n"); 6204 nvgpu_err(g, "could not pre-process sm error!\n");
6207 return ret; 6205 return ret;
6208 } 6206 }
6209 } 6207 }
@@ -6237,7 +6235,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6237 if (do_warp_sync) { 6235 if (do_warp_sync) {
6238 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true); 6236 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true);
6239 if (ret) { 6237 if (ret) {
6240 gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n"); 6238 nvgpu_err(g, "sm did not lock down!\n");
6241 return ret; 6239 return ret;
6242 } 6240 }
6243 } 6241 }
@@ -6389,7 +6387,6 @@ static int gk20a_gr_post_bpt_events(struct gk20a *g, struct channel_gk20a *ch,
6389 6387
6390int gk20a_gr_isr(struct gk20a *g) 6388int gk20a_gr_isr(struct gk20a *g)
6391{ 6389{
6392 struct device *dev = dev_from_gk20a(g);
6393 struct gr_gk20a_isr_data isr_data; 6390 struct gr_gk20a_isr_data isr_data;
6394 u32 grfifo_ctl; 6391 u32 grfifo_ctl;
6395 u32 obj_table; 6392 u32 obj_table;
@@ -6520,14 +6517,14 @@ int gk20a_gr_isr(struct gk20a *g)
6520 6517
6521 if (exception & gr_exception_fe_m()) { 6518 if (exception & gr_exception_fe_m()) {
6522 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); 6519 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r());
6523 gk20a_err(dev, "fe warning %08x", fe); 6520 nvgpu_err(g, "fe warning %08x", fe);
6524 gk20a_writel(g, gr_fe_hww_esr_r(), fe); 6521 gk20a_writel(g, gr_fe_hww_esr_r(), fe);
6525 need_reset |= -EFAULT; 6522 need_reset |= -EFAULT;
6526 } 6523 }
6527 6524
6528 if (exception & gr_exception_memfmt_m()) { 6525 if (exception & gr_exception_memfmt_m()) {
6529 u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r()); 6526 u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r());
6530 gk20a_err(dev, "memfmt exception %08x", memfmt); 6527 nvgpu_err(g, "memfmt exception %08x", memfmt);
6531 gk20a_writel(g, gr_memfmt_hww_esr_r(), memfmt); 6528 gk20a_writel(g, gr_memfmt_hww_esr_r(), memfmt);
6532 need_reset |= -EFAULT; 6529 need_reset |= -EFAULT;
6533 } 6530 }
@@ -6556,7 +6553,7 @@ int gk20a_gr_isr(struct gk20a *g)
6556 6553
6557 if (exception & gr_exception_ds_m()) { 6554 if (exception & gr_exception_ds_m()) {
6558 u32 ds = gk20a_readl(g, gr_ds_hww_esr_r()); 6555 u32 ds = gk20a_readl(g, gr_ds_hww_esr_r());
6559 gk20a_err(dev, "ds exception %08x", ds); 6556 nvgpu_err(g, "ds exception %08x", ds);
6560 gk20a_writel(g, gr_ds_hww_esr_r(), ds); 6557 gk20a_writel(g, gr_ds_hww_esr_r(), ds);
6561 need_reset |= -EFAULT; 6558 need_reset |= -EFAULT;
6562 } 6559 }
@@ -6565,7 +6562,7 @@ int gk20a_gr_isr(struct gk20a *g)
6565 gr_intr &= ~gr_intr_exception_pending_f(); 6562 gr_intr &= ~gr_intr_exception_pending_f();
6566 6563
6567 if (need_reset) { 6564 if (need_reset) {
6568 gk20a_err(dev, "set gr exception notifier"); 6565 nvgpu_err(g, "set gr exception notifier");
6569 gk20a_gr_set_error_notifier(g, &isr_data, 6566 gk20a_gr_set_error_notifier(g, &isr_data,
6570 NVGPU_CHANNEL_GR_EXCEPTION); 6567 NVGPU_CHANNEL_GR_EXCEPTION);
6571 } 6568 }
@@ -6586,7 +6583,7 @@ int gk20a_gr_isr(struct gk20a *g)
6586 if (gr_intr && !ch) { 6583 if (gr_intr && !ch) {
6587 /* Clear interrupts for unused channel. This is 6584 /* Clear interrupts for unused channel. This is
6588 probably an interrupt during gk20a_free_channel() */ 6585 probably an interrupt during gk20a_free_channel() */
6589 gk20a_err(dev_from_gk20a(g), 6586 nvgpu_err(g,
6590 "unhandled gr interrupt 0x%08x for unreferenceable channel, clearing", 6587 "unhandled gr interrupt 0x%08x for unreferenceable channel, clearing",
6591 gr_intr); 6588 gr_intr);
6592 gk20a_writel(g, gr_intr_r(), gr_intr); 6589 gk20a_writel(g, gr_intr_r(), gr_intr);
@@ -6598,7 +6595,7 @@ int gk20a_gr_isr(struct gk20a *g)
6598 gr_gpfifo_ctl_semaphore_access_f(1)); 6595 gr_gpfifo_ctl_semaphore_access_f(1));
6599 6596
6600 if (gr_intr) 6597 if (gr_intr)
6601 gk20a_err(dev_from_gk20a(g), 6598 nvgpu_err(g,
6602 "unhandled gr interrupt 0x%08x", gr_intr); 6599 "unhandled gr interrupt 0x%08x", gr_intr);
6603 6600
6604 /* Posting of BPT events should be the last thing in this function */ 6601 /* Posting of BPT events should be the last thing in this function */
@@ -7330,13 +7327,13 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7330 context = (u8 *)context_buffer; 7327 context = (u8 *)context_buffer;
7331 /* sanity check main header */ 7328 /* sanity check main header */
7332 if (!check_main_image_header_magic(context)) { 7329 if (!check_main_image_header_magic(context)) {
7333 gk20a_err(dev_from_gk20a(g), 7330 nvgpu_err(g,
7334 "Invalid main header: magic value"); 7331 "Invalid main header: magic value");
7335 return -EINVAL; 7332 return -EINVAL;
7336 } 7333 }
7337 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); 7334 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o());
7338 if (gpc_num >= num_gpcs) { 7335 if (gpc_num >= num_gpcs) {
7339 gk20a_err(dev_from_gk20a(g), 7336 nvgpu_err(g,
7340 "GPC 0x%08x is greater than total count 0x%08x!\n", 7337 "GPC 0x%08x is greater than total count 0x%08x!\n",
7341 gpc_num, num_gpcs); 7338 gpc_num, num_gpcs);
7342 return -EINVAL; 7339 return -EINVAL;
@@ -7357,7 +7354,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7357 /* check local header magic */ 7354 /* check local header magic */
7358 context += ctxsw_prog_ucode_header_size_in_bytes(); 7355 context += ctxsw_prog_ucode_header_size_in_bytes();
7359 if (!check_local_header_magic(context)) { 7356 if (!check_local_header_magic(context)) {
7360 gk20a_err(dev_from_gk20a(g), 7357 nvgpu_err(g,
7361 "Invalid local header: magic value\n"); 7358 "Invalid local header: magic value\n");
7362 return -EINVAL; 7359 return -EINVAL;
7363 } 7360 }
@@ -7388,7 +7385,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7388 (sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask); 7385 (sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask);
7389 7386
7390 if (chk_addr != addr) { 7387 if (chk_addr != addr) {
7391 gk20a_err(dev_from_gk20a(g), 7388 nvgpu_err(g,
7392 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7389 "Oops addr miss-match! : 0x%08x != 0x%08x\n",
7393 addr, chk_addr); 7390 addr, chk_addr);
7394 return -EINVAL; 7391 return -EINVAL;
@@ -7419,7 +7416,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7419 tpc_gpc_mask); 7416 tpc_gpc_mask);
7420 7417
7421 if (chk_addr != addr) { 7418 if (chk_addr != addr) {
7422 gk20a_err(dev_from_gk20a(g), 7419 nvgpu_err(g,
7423 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7420 "Oops addr miss-match! : 0x%08x != 0x%08x\n",
7424 addr, chk_addr); 7421 addr, chk_addr);
7425 return -EINVAL; 7422 return -EINVAL;
@@ -7488,7 +7485,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7488 /* last sanity check: did we somehow compute an offset outside the 7485 /* last sanity check: did we somehow compute an offset outside the
7489 * extended buffer? */ 7486 * extended buffer? */
7490 if (offset_to_segment > offset_to_segment_end) { 7487 if (offset_to_segment > offset_to_segment_end) {
7491 gk20a_err(dev_from_gk20a(g), 7488 nvgpu_err(g,
7492 "Overflow ctxsw buffer! 0x%08x > 0x%08x\n", 7489 "Overflow ctxsw buffer! 0x%08x > 0x%08x\n",
7493 offset_to_segment, offset_to_segment_end); 7490 offset_to_segment, offset_to_segment_end);
7494 return -EINVAL; 7491 return -EINVAL;
@@ -7680,7 +7677,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7680 7677
7681 context = (u8 *)context_buffer; 7678 context = (u8 *)context_buffer;
7682 if (!check_main_image_header_magic(context)) { 7679 if (!check_main_image_header_magic(context)) {
7683 gk20a_err(dev_from_gk20a(g), 7680 nvgpu_err(g,
7684 "Invalid main header: magic value"); 7681 "Invalid main header: magic value");
7685 return -EINVAL; 7682 return -EINVAL;
7686 } 7683 }
@@ -7689,7 +7686,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7689 /* Parse the FECS local header. */ 7686 /* Parse the FECS local header. */
7690 context += ctxsw_prog_ucode_header_size_in_bytes(); 7687 context += ctxsw_prog_ucode_header_size_in_bytes();
7691 if (!check_local_header_magic(context)) { 7688 if (!check_local_header_magic(context)) {
7692 gk20a_err(dev_from_gk20a(g), 7689 nvgpu_err(g,
7693 "Invalid FECS local header: magic value\n"); 7690 "Invalid FECS local header: magic value\n");
7694 return -EINVAL; 7691 return -EINVAL;
7695 } 7692 }
@@ -7724,7 +7721,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7724 } 7721 }
7725 7722
7726 if ((gpc_num + 1) > num_gpcs) { 7723 if ((gpc_num + 1) > num_gpcs) {
7727 gk20a_err(dev_from_gk20a(g), 7724 nvgpu_err(g,
7728 "GPC %d not in this context buffer.\n", 7725 "GPC %d not in this context buffer.\n",
7729 gpc_num); 7726 gpc_num);
7730 return -EINVAL; 7727 return -EINVAL;
@@ -7734,7 +7731,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7734 for (i = 0; i < num_gpcs; i++) { 7731 for (i = 0; i < num_gpcs; i++) {
7735 context += ctxsw_prog_ucode_header_size_in_bytes(); 7732 context += ctxsw_prog_ucode_header_size_in_bytes();
7736 if (!check_local_header_magic(context)) { 7733 if (!check_local_header_magic(context)) {
7737 gk20a_err(dev_from_gk20a(g), 7734 nvgpu_err(g,
7738 "Invalid GPCCS local header: magic value\n"); 7735 "Invalid GPCCS local header: magic value\n");
7739 return -EINVAL; 7736 return -EINVAL;
7740 7737
@@ -7751,7 +7748,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7751 num_tpcs = *(u32 *)(context + ctxsw_prog_local_image_num_tpcs_o()); 7748 num_tpcs = *(u32 *)(context + ctxsw_prog_local_image_num_tpcs_o());
7752 7749
7753 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) { 7750 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) {
7754 gk20a_err(dev_from_gk20a(g), 7751 nvgpu_err(g,
7755 "GPC %d TPC %d not in this context buffer.\n", 7752 "GPC %d TPC %d not in this context buffer.\n",
7756 gpc_num, tpc_num); 7753 gpc_num, tpc_num);
7757 return -EINVAL; 7754 return -EINVAL;
@@ -8159,7 +8156,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
8159 goto cleanup; 8156 goto cleanup;
8160 8157
8161 if (offset > hwpm_ctxsw_buffer_size) { 8158 if (offset > hwpm_ctxsw_buffer_size) {
8162 gk20a_err(dev_from_gk20a(g), "offset > buffer size"); 8159 nvgpu_err(g, "offset > buffer size");
8163 goto cleanup; 8160 goto cleanup;
8164 } 8161 }
8165 8162
@@ -8175,7 +8172,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
8175 8172
8176 return 0; 8173 return 0;
8177cleanup: 8174cleanup:
8178 gk20a_err(dev_from_gk20a(g), "Failed to create HWPM buffer offset map"); 8175 nvgpu_err(g, "Failed to create HWPM buffer offset map");
8179 nvgpu_big_free(g, map); 8176 nvgpu_big_free(g, map);
8180 return -EINVAL; 8177 return -EINVAL;
8181} 8178}
@@ -8213,7 +8210,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g,
8213 if (result) 8210 if (result)
8214 *priv_offset = result->offset; 8211 *priv_offset = result->offset;
8215 else { 8212 else {
8216 gk20a_err(dev_from_gk20a(g), "Lookup failed for address 0x%x", addr); 8213 nvgpu_err(g, "Lookup failed for address 0x%x", addr);
8217 err = -EINVAL; 8214 err = -EINVAL;
8218 } 8215 }
8219 return err; 8216 return err;
@@ -8278,7 +8275,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8278 */ 8275 */
8279 err = gr_gk20a_disable_ctxsw(g); 8276 err = gr_gk20a_disable_ctxsw(g);
8280 if (err) { 8277 if (err) {
8281 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 8278 nvgpu_err(g, "unable to stop gr ctxsw");
8282 /* this should probably be ctx-fatal... */ 8279 /* this should probably be ctx-fatal... */
8283 goto cleanup; 8280 goto cleanup;
8284 } 8281 }
@@ -8418,7 +8415,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8418 if (!pm_ctx_ready) { 8415 if (!pm_ctx_ready) {
8419 /* Make sure ctx buffer was initialized */ 8416 /* Make sure ctx buffer was initialized */
8420 if (!ch_ctx->pm_ctx.mem.pages) { 8417 if (!ch_ctx->pm_ctx.mem.pages) {
8421 gk20a_err(dev_from_gk20a(g), 8418 nvgpu_err(g,
8422 "Invalid ctx buffer"); 8419 "Invalid ctx buffer");
8423 err = -EINVAL; 8420 err = -EINVAL;
8424 goto cleanup; 8421 goto cleanup;
@@ -8515,7 +8512,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8515 if (restart_gr_ctxsw) { 8512 if (restart_gr_ctxsw) {
8516 int tmp_err = gr_gk20a_enable_ctxsw(g); 8513 int tmp_err = gr_gk20a_enable_ctxsw(g);
8517 if (tmp_err) { 8514 if (tmp_err) {
8518 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 8515 nvgpu_err(g, "unable to restart ctxsw!\n");
8519 err = tmp_err; 8516 err = tmp_err;
8520 } 8517 }
8521 } 8518 }
@@ -8659,7 +8656,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8659 * enabled, the sm will never lock down. */ 8656 * enabled, the sm will never lock down. */
8660 if (!mmu_debug_mode_enabled && 8657 if (!mmu_debug_mode_enabled &&
8661 (g->ops.mm.mmu_fault_pending(g))) { 8658 (g->ops.mm.mmu_fault_pending(g))) {
8662 gk20a_err(dev_from_gk20a(g), 8659 nvgpu_err(g,
8663 "GPC%d TPC%d: mmu fault pending," 8660 "GPC%d TPC%d: mmu fault pending,"
8664 " sm will never lock down!", gpc, tpc); 8661 " sm will never lock down!", gpc, tpc);
8665 return -EFAULT; 8662 return -EFAULT;
@@ -8684,9 +8681,9 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8684 warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r() + offset) << 32; 8681 warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r() + offset) << 32;
8685 warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + offset); 8682 warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + offset);
8686 8683
8687 gk20a_err(dev_from_gk20a(g), 8684 nvgpu_err(g,
8688 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc); 8685 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc);
8689 gk20a_err(dev_from_gk20a(g), 8686 nvgpu_err(g,
8690 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", 8687 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n",
8691 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0, 8688 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0,
8692 warps_valid, warps_paused, warps_trapped); 8689 warps_valid, warps_paused, warps_trapped);
@@ -8707,7 +8704,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8707 8704
8708 /* if an SM debugger isn't attached, skip suspend */ 8705 /* if an SM debugger isn't attached, skip suspend */
8709 if (!gk20a_gr_sm_debugger_attached(g)) { 8706 if (!gk20a_gr_sm_debugger_attached(g)) {
8710 gk20a_err(dev_from_gk20a(g), 8707 nvgpu_err(g,
8711 "SM debugger not attached, skipping suspend!\n"); 8708 "SM debugger not attached, skipping suspend!\n");
8712 return; 8709 return;
8713 } 8710 }
@@ -8722,7 +8719,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8722 err = gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, 8719 err = gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc,
8723 global_esr_mask, check_errors); 8720 global_esr_mask, check_errors);
8724 if (err) { 8721 if (err) {
8725 gk20a_err(dev_from_gk20a(g), 8722 nvgpu_err(g,
8726 "SuspendSm failed\n"); 8723 "SuspendSm failed\n");
8727 return; 8724 return;
8728 } 8725 }
@@ -8738,7 +8735,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8738 8735
8739 /* if an SM debugger isn't attached, skip suspend */ 8736 /* if an SM debugger isn't attached, skip suspend */
8740 if (!gk20a_gr_sm_debugger_attached(g)) { 8737 if (!gk20a_gr_sm_debugger_attached(g)) {
8741 gk20a_err(dev_from_gk20a(g), 8738 nvgpu_err(g,
8742 "SM debugger not attached, skipping suspend!\n"); 8739 "SM debugger not attached, skipping suspend!\n");
8743 return; 8740 return;
8744 } 8741 }
@@ -8759,7 +8756,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8759 gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, 8756 gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc,
8760 global_esr_mask, check_errors); 8757 global_esr_mask, check_errors);
8761 if (err) { 8758 if (err) {
8762 gk20a_err(dev_from_gk20a(g), 8759 nvgpu_err(g,
8763 "SuspendAllSms failed\n"); 8760 "SuspendAllSms failed\n");
8764 return; 8761 return;
8765 } 8762 }
@@ -9068,7 +9065,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
9068 9065
9069 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); 9066 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0);
9070 if (err) 9067 if (err)
9071 gk20a_err(dev_from_gk20a(g), "Failed to access register\n"); 9068 nvgpu_err(g, "Failed to access register\n");
9072 nvgpu_kfree(g, ops); 9069 nvgpu_kfree(g, ops);
9073 return err; 9070 return err;
9074} 9071}
@@ -9188,7 +9185,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9188 9185
9189 err = gr_gk20a_disable_ctxsw(g); 9186 err = gr_gk20a_disable_ctxsw(g);
9190 if (err) { 9187 if (err) {
9191 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 9188 nvgpu_err(g, "unable to stop gr ctxsw");
9192 goto clean_up; 9189 goto clean_up;
9193 } 9190 }
9194 9191
@@ -9206,7 +9203,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9206 9203
9207 err = gr_gk20a_enable_ctxsw(g); 9204 err = gr_gk20a_enable_ctxsw(g);
9208 if (err) 9205 if (err)
9209 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 9206 nvgpu_err(g, "unable to restart ctxsw!\n");
9210 9207
9211 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9208 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9212 9209
@@ -9230,7 +9227,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9230 9227
9231 err = gr_gk20a_disable_ctxsw(g); 9228 err = gr_gk20a_disable_ctxsw(g);
9232 if (err) { 9229 if (err) {
9233 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 9230 nvgpu_err(g, "unable to stop gr ctxsw");
9234 goto clean_up; 9231 goto clean_up;
9235 } 9232 }
9236 9233
@@ -9244,7 +9241,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9244 9241
9245 err = gr_gk20a_enable_ctxsw(g); 9242 err = gr_gk20a_enable_ctxsw(g);
9246 if (err) 9243 if (err)
9247 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 9244 nvgpu_err(g, "unable to restart ctxsw!\n");
9248 9245
9249 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9246 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9250 9247
@@ -9308,7 +9305,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch)
9308 9305
9309 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); 9306 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1);
9310 if (err) { 9307 if (err) {
9311 gk20a_err(dev_from_gk20a(g), "Failed to read register"); 9308 nvgpu_err(g, "Failed to read register");
9312 return err; 9309 return err;
9313 } 9310 }
9314 9311
@@ -9318,7 +9315,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch)
9318 ops.value_lo = set_field(regval, gr_pri_gpcs_gcc_dbg_invalidate_m(), 1); 9315 ops.value_lo = set_field(regval, gr_pri_gpcs_gcc_dbg_invalidate_m(), 1);
9319 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 1, 0); 9316 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 1, 0);
9320 if (err) { 9317 if (err) {
9321 gk20a_err(dev_from_gk20a(g), "Failed to write register"); 9318 nvgpu_err(g, "Failed to write register");
9322 return err; 9319 return err;
9323 } 9320 }
9324 9321
@@ -9326,7 +9323,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch)
9326 ops.offset = gr_pri_gpc0_tpc0_sm_cache_control_r(); 9323 ops.offset = gr_pri_gpc0_tpc0_sm_cache_control_r();
9327 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); 9324 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1);
9328 if (err) { 9325 if (err) {
9329 gk20a_err(dev_from_gk20a(g), "Failed to read register"); 9326 nvgpu_err(g, "Failed to read register");
9330 return err; 9327 return err;
9331 } 9328 }
9332 9329
@@ -9380,7 +9377,7 @@ int gr_gk20a_wait_for_pause(struct gk20a *g, struct warpstate *w_state)
9380 err = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, false); 9377 err = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, false);
9381 9378
9382 if (err) { 9379 if (err) {
9383 gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n"); 9380 nvgpu_err(g, "sm did not lock down!");
9384 return err; 9381 return err;
9385 } 9382 }
9386 } 9383 }